summaryrefslogtreecommitdiff
path: root/runtime/onert/backend/cpu/ops/EinsumLayer.cc
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2020-12-14 14:43:04 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2020-12-14 14:43:04 +0900
commit12d88feea8573f8490629cf62fc342b152e57d65 (patch)
tree3c734cc4d629834d2d523f4575ef84cd64684e57 /runtime/onert/backend/cpu/ops/EinsumLayer.cc
parentd6b371e095d737922187a518b8faba1ef6f3a2b1 (diff)
downloadnnfw-12d88feea8573f8490629cf62fc342b152e57d65.tar.gz
nnfw-12d88feea8573f8490629cf62fc342b152e57d65.tar.bz2
nnfw-12d88feea8573f8490629cf62fc342b152e57d65.zip
Imported Upstream version 1.11.0upstream/1.11.0
Diffstat (limited to 'runtime/onert/backend/cpu/ops/EinsumLayer.cc')
-rw-r--r--runtime/onert/backend/cpu/ops/EinsumLayer.cc84
1 files changed, 84 insertions, 0 deletions
diff --git a/runtime/onert/backend/cpu/ops/EinsumLayer.cc b/runtime/onert/backend/cpu/ops/EinsumLayer.cc
new file mode 100644
index 000000000..8c16740a3
--- /dev/null
+++ b/runtime/onert/backend/cpu/ops/EinsumLayer.cc
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "EinsumLayer.h"
+
+#include <cker/operation/Einsum.h>
+
+namespace onert
+{
+namespace backend
+{
+namespace cpu
+{
+namespace ops
+{
+
+EinsumLayer::EinsumLayer()
+ : _inputs(), _output(nullptr), _equation(), _einsum_kernel(new nnfw::cker::Einsum())
+{
+ // DO NOTHING
+}
+
+EinsumLayer::~EinsumLayer() = default;
+
+void EinsumLayer::einsumFloat32()
+{
+ uint32_t num_inputs = _inputs.size();
+ nnfw::cker::Einsum &kernel = *_einsum_kernel;
+
+ kernel.prepare(_equation);
+
+ std::vector<nnfw::cker::Shape> inputShapes;
+ std::vector<const float *> inputFloatPtrs;
+
+ for (uint32_t i = 0; i < num_inputs; i++)
+ {
+ inputShapes.emplace_back(getTensorShape(_inputs[i]));
+ inputFloatPtrs.emplace_back(reinterpret_cast<const float *>(_inputs[i]->buffer()));
+ }
+
+ kernel(_equation, inputShapes, inputFloatPtrs, getTensorShape(_output),
+ reinterpret_cast<float *>(_output->buffer()));
+}
+
+void EinsumLayer::run()
+{
+ if (_output->data_type() == OperandType::FLOAT32)
+ {
+ einsumFloat32();
+ }
+ else
+ {
+ throw std::runtime_error{"Einsum: unsupported data type"};
+ }
+}
+
+void EinsumLayer::configure(const std::vector<const IPortableTensor *> &inputs,
+ std::string equation, IPortableTensor *output)
+{
+ assert(inputs.size() > 0);
+ assert(output != nullptr);
+
+ _inputs = inputs;
+ _equation = equation;
+ _output = output;
+}
+
+} // namespace ops
+} // namespace cpu
+} // namespace backend
+} // namespace onert