summaryrefslogtreecommitdiff
path: root/runtime/onert/core/include
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/onert/core/include')
-rw-r--r--runtime/onert/core/include/backend/BackendContext.h2
-rw-r--r--runtime/onert/core/include/backend/IExternalContext.h34
-rw-r--r--runtime/onert/core/include/backend/IPortableTensor.h3
-rw-r--r--runtime/onert/core/include/backend/ITensor.h11
-rw-r--r--runtime/onert/core/include/backend/ITensorBuilder.h4
-rw-r--r--runtime/onert/core/include/backend/ITensorRegistry.h68
-rw-r--r--runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h4
-rw-r--r--runtime/onert/core/include/backend/cpu_common/Tensor.h50
-rw-r--r--runtime/onert/core/include/compiler/StaticShapeInference.h2
-rw-r--r--runtime/onert/core/include/exec/DynamicShapeInference.h3
-rw-r--r--runtime/onert/core/include/ir/Operand.h8
-rw-r--r--runtime/onert/core/include/ir/Operations.Include.h3
-rw-r--r--runtime/onert/core/include/ir/Operations.lst3
-rw-r--r--runtime/onert/core/include/ir/TypeInfo.h17
-rw-r--r--runtime/onert/core/include/ir/operation/BatchToSpaceND.h3
-rw-r--r--runtime/onert/core/include/ir/operation/LogSoftmax.h2
-rw-r--r--runtime/onert/core/include/ir/operation/Pad.h2
-rw-r--r--runtime/onert/core/include/ir/operation/Quantize.h49
-rw-r--r--runtime/onert/core/include/ir/operation/ResizeBilinear.h4
-rw-r--r--runtime/onert/core/include/ir/operation/SplitV.h59
-rw-r--r--runtime/onert/core/include/ir/operation/StatelessRandomUniform.h52
-rw-r--r--runtime/onert/core/include/util/ShapeInference.h3
22 files changed, 326 insertions, 60 deletions
diff --git a/runtime/onert/core/include/backend/BackendContext.h b/runtime/onert/core/include/backend/BackendContext.h
index c82e5b7a9..c263aef2b 100644
--- a/runtime/onert/core/include/backend/BackendContext.h
+++ b/runtime/onert/core/include/backend/BackendContext.h
@@ -56,6 +56,8 @@ public:
{
}
+ virtual ~BackendContext() = default;
+
void initialize(const std::vector<OperationInfo> &operation_list,
const std::vector<ir::OperandIndex> &operand_list);
void initConsts();
diff --git a/runtime/onert/core/include/backend/IExternalContext.h b/runtime/onert/core/include/backend/IExternalContext.h
new file mode 100644
index 000000000..88ffb502c
--- /dev/null
+++ b/runtime/onert/core/include/backend/IExternalContext.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_BACKEND_IEXTERNAL_CONTEXT_H__
+#define __ONERT_BACKEND_IEXTERNAL_CONTEXT_H__
+
+namespace onert
+{
+namespace backend
+{
+
+struct IExternalContext
+{
+ virtual ~IExternalContext() = default;
+ virtual void setMaxNumThreads(int) = 0;
+};
+
+} // namespace backend
+} // namespace onert
+
+#endif // __ONERT_BACKEND_IEXTERNAL_CONTEXT__
diff --git a/runtime/onert/core/include/backend/IPortableTensor.h b/runtime/onert/core/include/backend/IPortableTensor.h
index 2b2d00899..a05b39a33 100644
--- a/runtime/onert/core/include/backend/IPortableTensor.h
+++ b/runtime/onert/core/include/backend/IPortableTensor.h
@@ -37,6 +37,9 @@ class IPortableTensor : public ITensor
{
public:
virtual ~IPortableTensor() = default;
+ virtual bool is_sparse() const { return false; }
+ virtual const uint16_t *w1_segments() const { return nullptr; }
+ virtual const uint16_t *w1_indices() const { return nullptr; }
public:
bool has_padding() const final { return false; }
diff --git a/runtime/onert/core/include/backend/ITensor.h b/runtime/onert/core/include/backend/ITensor.h
index 217d9debc..12b1c5433 100644
--- a/runtime/onert/core/include/backend/ITensor.h
+++ b/runtime/onert/core/include/backend/ITensor.h
@@ -32,6 +32,8 @@ namespace onert
namespace backend
{
+struct IDynamicTensorManager;
+
class ITensor
{
public:
@@ -51,6 +53,15 @@ public:
virtual void access(const std::function<void(ITensor &tensor)> &fn) = 0;
/**
+ * @brief Return the dynamic tensor manager
+ *
+ * If dynamic tensors are not supported, it returns @c nullptr .
+ *
+ * @return IDynamicTensorManager* DynamicTensorManager
+ */
+ virtual IDynamicTensorManager *dynamic_tensor_manager() { return nullptr; }
+
+ /**
* @brief Return true if the tensor is constant
*/
virtual bool is_constant() const
diff --git a/runtime/onert/core/include/backend/ITensorBuilder.h b/runtime/onert/core/include/backend/ITensorBuilder.h
index a49525ba7..b760cda0e 100644
--- a/runtime/onert/core/include/backend/ITensorBuilder.h
+++ b/runtime/onert/core/include/backend/ITensorBuilder.h
@@ -112,12 +112,12 @@ public: // methods for static tensor allocation
virtual std::shared_ptr<ITensor> tensorAt(const ir::OperandIndex &ind) = 0;
/**
- * @brief Set the External Tensor object
+ * @brief Set the migrant tensor object
*
* @return true if succeeded
* @return false if failed or unsupported
*/
- virtual bool setExternalTensor(const ir::OperandIndex &, const std::shared_ptr<IPortableTensor> &)
+ virtual bool setMigrantTensor(const ir::OperandIndex &, const std::shared_ptr<IPortableTensor> &)
{
return false;
}
diff --git a/runtime/onert/core/include/backend/ITensorRegistry.h b/runtime/onert/core/include/backend/ITensorRegistry.h
index f5a95f49c..855513124 100644
--- a/runtime/onert/core/include/backend/ITensorRegistry.h
+++ b/runtime/onert/core/include/backend/ITensorRegistry.h
@@ -35,17 +35,22 @@ struct ITensorRegistry
virtual ~ITensorRegistry() = default;
/**
- * @brief Returns pointer of ITensor among managed and external tensors
+ * @brief Returns pointer of ITensor among native and migrant tensors
+ *
+ * Native Tensor is a tensor that is managed by this backend
+ * Migrant Tensor is a tensor that is imported from another backend
+ *
* @note Return tensor cannot be used longer than dynamic tensor manager
*/
virtual std::shared_ptr<ITensor> getITensor(const ir::OperandIndex &) = 0;
/**
- * @brief Returns pointer of ITensor among managed tensors
+ * @brief Returns pointer of ITensor among native tensors
*
- * Unlike @c getITensor , this function only searches from managed tensors
- * @note Return tensor cannot be used longer than dynamic tensor manager
+ * Unlike @c getITensor , this function only searches from native tensors
+ *
+ * @note Returned tensor cannot be used longer than dynamic tensor manager
*/
- virtual std::shared_ptr<ITensor> getManagedITensor(const ir::OperandIndex &) = 0;
+ virtual std::shared_ptr<ITensor> getNativeITensor(const ir::OperandIndex &) = 0;
};
} // namespace backend
@@ -73,68 +78,67 @@ public:
std::shared_ptr<ITensor> getITensor(const ir::OperandIndex &ind) override
{
static_assert(std::is_base_of<ITensor, T_Tensor>::value, "T_Tensor must derive from ITensor.");
- auto external_tensor = _external.find(ind);
- if (external_tensor != _external.end())
+ auto external_tensor = _migrant.find(ind);
+ if (external_tensor != _migrant.end())
return external_tensor->second;
- return getManagedTensor(ind);
+ return getNativeTensor(ind);
}
- std::shared_ptr<ITensor> getManagedITensor(const ir::OperandIndex &ind) override
+ std::shared_ptr<ITensor> getNativeITensor(const ir::OperandIndex &ind) override
{
- return getManagedTensor(ind);
+ return getNativeTensor(ind);
}
std::shared_ptr<IPortableTensor> getPortableTensor(const ir::OperandIndex &ind)
{
- auto external_tensor = _external.find(ind);
- if (external_tensor != _external.end())
+ auto external_tensor = _migrant.find(ind);
+ if (external_tensor != _migrant.end())
{
if (external_tensor->second)
return external_tensor->second;
}
- return getManagedTensor(ind);
+ return getNativeTensor(ind);
}
- std::shared_ptr<T_Tensor> getManagedTensor(const ir::OperandIndex &ind)
+ std::shared_ptr<T_Tensor> getNativeTensor(const ir::OperandIndex &ind)
{
- auto tensor = _managed.find(ind);
- if (tensor != _managed.end())
+ auto tensor = _native.find(ind);
+ if (tensor != _native.end())
return tensor->second;
return nullptr;
}
- bool setExternalTensor(const ir::OperandIndex &ind,
- const std::shared_ptr<IPortableTensor> &tensor)
+ bool setMigrantTensor(const ir::OperandIndex &ind, const std::shared_ptr<IPortableTensor> &tensor)
{
// TODO Uncomment this as two tensors for an index is not allowed.
// But now it is temporarily allowed as a workaround. External one hides Managed one.
- // auto itr = _managed.find(ind);
- // if (itr != _managed.end() && itr->second != nullptr && tensor != nullptr)
+ // auto itr = _native.find(ind);
+ // if (itr != _native.end() && itr->second != nullptr && tensor != nullptr)
// throw std::runtime_error{
- // "Tried to set an external tensor but an managed tensor already exists."};
- _external[ind] = tensor;
+ // "Tried to set an migrant tensor but an native tensor already exists."};
+ _migrant[ind] = tensor;
return true;
}
- void setManagedTensor(const ir::OperandIndex &ind, const std::shared_ptr<T_Tensor> &tensor)
+ void setNativeTensor(const ir::OperandIndex &ind, const std::shared_ptr<T_Tensor> &tensor)
{
- auto itr = _external.find(ind);
- if (itr != _external.end() && itr->second != nullptr && tensor != nullptr)
+ auto itr = _migrant.find(ind);
+ if (itr != _migrant.end() && itr->second != nullptr && tensor != nullptr)
throw std::runtime_error{
- "Tried to set a managed tensor but an external tensor already exists."};
- _managed[ind] = tensor;
+ "Tried to set a native tensor but an migrant tensor already exists."};
+ _native[ind] = tensor;
}
- const ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &managed_tensors() { return _managed; }
+ const ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &native_tensors() { return _native; }
- const ir::OperandIndexMap<std::shared_ptr<IPortableTensor>> &external_tensors()
+ const ir::OperandIndexMap<std::shared_ptr<IPortableTensor>> &migrant_tensors()
{
- return _external;
+ return _migrant;
}
private:
- ir::OperandIndexMap<std::shared_ptr<IPortableTensor>> _external;
- ir::OperandIndexMap<std::shared_ptr<T_Tensor>> _managed;
+ ir::OperandIndexMap<std::shared_ptr<IPortableTensor>> _migrant;
+ ir::OperandIndexMap<std::shared_ptr<T_Tensor>> _native;
};
} // namespace backend
diff --git a/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h b/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h
index 6ddacc7bc..a7e034a91 100644
--- a/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h
+++ b/runtime/onert/core/include/backend/cpu_common/StaticTensorManager.h
@@ -19,7 +19,7 @@
#include "MemoryManager.h"
-#include "backend/ITensorManager.h"
+#include "backend/IStaticTensorManager.h"
#include "ir/OperandIndexMap.h"
#include "ir/OperandInfo.h"
#include "TensorRegistry.h"
@@ -31,7 +31,7 @@ namespace backend
namespace cpu_common
{
-class StaticTensorManager : public backend::ITensorManager
+class StaticTensorManager : public backend::IStaticTensorManager
{
public:
StaticTensorManager(const std::shared_ptr<TensorRegistry> &reg);
diff --git a/runtime/onert/core/include/backend/cpu_common/Tensor.h b/runtime/onert/core/include/backend/cpu_common/Tensor.h
index a0db96dc3..974501ecb 100644
--- a/runtime/onert/core/include/backend/cpu_common/Tensor.h
+++ b/runtime/onert/core/include/backend/cpu_common/Tensor.h
@@ -35,27 +35,42 @@ public:
Tensor() = delete;
public:
- Tensor(const ir::OperandInfo &info, const ir::Layout layout)
- : _info(info), _layout(layout), _buffer(nullptr), _num_references(0), _allocator(nullptr)
+ Tensor(const ir::OperandInfo &info, const ir::Layout layout,
+ IDynamicTensorManager *dynamic_tensor_manager)
+ : _info(info), _layout(layout), _buffer(nullptr), _num_references(0),
+ _dynamic_tensor_manager(dynamic_tensor_manager), _allocator(nullptr)
{
// DO NOTHING
}
public:
// Only one of two method 'setBuffer' must be called once
+
+ /**
+ * @brief Set the Buffer object. This method is called for static and non-const tensor
+ */
void setBuffer(uint8_t *buffer)
{
- assert(_buffer == nullptr && _allocator == nullptr);
+ assert(_buffer == nullptr);
_buffer = buffer;
}
+
+ /**
+ * @brief Set the Buffer object. This method is called for dynamic or const tensor
+ */
void setBuffer(const std::shared_ptr<Allocator> &alloc)
{
- assert(_buffer == nullptr && _allocator == nullptr);
+ assert(_buffer == nullptr);
_allocator = alloc;
+ _buffer = alloc->base();
}
// This works just as setBuffer but it simply overwrite existing Allocator without nullptr check
- void overwriteBuffer(const std::shared_ptr<Allocator> &alloc) { _allocator = alloc; }
+ void overwriteBuffer(const std::shared_ptr<Allocator> &alloc)
+ {
+ _allocator = alloc;
+ _buffer = alloc->base();
+ }
/**
* @brief Mark this tensor does not have memory.
@@ -68,13 +83,7 @@ public:
}
public:
- uint8_t *buffer() const override
- {
- if (_allocator != nullptr)
- return _allocator->base();
- else
- return _buffer;
- }
+ uint8_t *buffer() const override { return _buffer; }
/**
* @brief Get dimension by index
*
@@ -96,12 +105,16 @@ public:
bool is_constant() const override { return _info.isConstant(); }
bool is_dynamic() const override { return _info.isDynamic(); }
void set_dynamic() override { _info.setDynamic(); }
+ IDynamicTensorManager *dynamic_tensor_manager() override { return _dynamic_tensor_manager; }
+ bool is_sparse() const override { return _info.typeInfo().sparse(); }
+ virtual const uint16_t *w1_segments() const override { return _info.typeInfo().w1_segments(); }
+ virtual const uint16_t *w1_indices() const override { return _info.typeInfo().w1_indices(); }
virtual void increase_ref()
{
assert(is_dynamic() ||
// when not dynamic
- (_buffer != nullptr || _allocator != nullptr));
+ (_buffer != nullptr));
++_num_references;
}
@@ -110,12 +123,12 @@ public:
assert(_buffer != nullptr || _allocator != nullptr);
assert(_num_references > 0);
--_num_references;
- // Only constant tensor has allocator pointer
+ // constant tensor and dynamic tensor has _allocator
if (_num_references == 0)
{
if (_buffer != nullptr)
_buffer = nullptr;
- else
+ if (_allocator != nullptr)
{
_allocator->release();
_allocator = nullptr;
@@ -130,8 +143,15 @@ protected:
ir::Layout _layout;
uint8_t *_buffer;
int32_t _num_references;
+ IDynamicTensorManager *_dynamic_tensor_manager;
private:
+ /**
+ * @brief Memory allocator for dynamic tensor and const tensor
+ * Since maintaing _allocator and also _buffer makes confusion,
+ * we will mainly use _buffer (not _allocator.base()) for memory pointer in this code.
+ * _allocator(shared_ptr) is used to guarantee that we have valid _buffer.
+ */
std::shared_ptr<Allocator> _allocator;
};
diff --git a/runtime/onert/core/include/compiler/StaticShapeInference.h b/runtime/onert/core/include/compiler/StaticShapeInference.h
index 379143baf..bff68c9fa 100644
--- a/runtime/onert/core/include/compiler/StaticShapeInference.h
+++ b/runtime/onert/core/include/compiler/StaticShapeInference.h
@@ -99,6 +99,7 @@ private:
void visit(const ir::operation::LogicalNot &op) override;
void visit(const ir::operation::LogicalOr &op) override;
void visit(const ir::operation::Logistic &op) override;
+ void visit(const ir::operation::L2Normalization &op) override;
void visit(const ir::operation::MatrixBandPart &op) override;
void visit(const ir::operation::Max &op) override;
void visit(const ir::operation::Min &op) override;
@@ -114,6 +115,7 @@ private:
void visit(const ir::operation::Reshape &op) override;
void visit(const ir::operation::Round &op) override;
void visit(const ir::operation::RSQRT &op) override;
+ void visit(const ir::operation::ResizeBilinear &op) override;
void visit(const ir::operation::Reverse &op) override;
void visit(const ir::operation::Select &op) override;
void visit(const ir::operation::Shape &op) override;
diff --git a/runtime/onert/core/include/exec/DynamicShapeInference.h b/runtime/onert/core/include/exec/DynamicShapeInference.h
index 113c34809..bca80db09 100644
--- a/runtime/onert/core/include/exec/DynamicShapeInference.h
+++ b/runtime/onert/core/include/exec/DynamicShapeInference.h
@@ -72,6 +72,7 @@ public:
void visit(const ir::operation::LogicalNot &op) override;
void visit(const ir::operation::LogicalOr &op) override;
void visit(const ir::operation::Logistic &op) override;
+ void visit(const ir::operation::L2Normalization &op) override;
void visit(const ir::operation::MatrixBandPart &op) override;
void visit(const ir::operation::Max &op) override;
void visit(const ir::operation::Min &op) override;
@@ -88,6 +89,7 @@ public:
void visit(const ir::operation::Reshape &op) override;
void visit(const ir::operation::Round &op) override;
void visit(const ir::operation::RSQRT &op) override;
+ void visit(const ir::operation::ResizeBilinear &op) override;
void visit(const ir::operation::Reverse &op) override;
void visit(const ir::operation::Select &op) override;
void visit(const ir::operation::Shape &op) override;
@@ -127,6 +129,7 @@ private:
/**
* @brief To allocate memory for output tensor if needed
*/
+ // TODO Remove this, as it is no longer used
backend::IDynamicTensorManager *_dynamic_tensor_manager;
/**
* @brief To get tensor object and access tensor-level info, e.g., ITensor::buffer()
diff --git a/runtime/onert/core/include/ir/Operand.h b/runtime/onert/core/include/ir/Operand.h
index 53371d606..1b3a43b02 100644
--- a/runtime/onert/core/include/ir/Operand.h
+++ b/runtime/onert/core/include/ir/Operand.h
@@ -49,11 +49,11 @@ public:
size_t operandSize(void) const;
const OperationIndexSet &getUses() const { return _uses; }
- const OperationIndexSet &getDef() const { return _def; }
+ OperationIndex getDef() const { return _def; }
void insertUse(const OperationIndex &idx);
void removeUse(const OperationIndex &idx);
- void insertDef(const OperationIndex &idx);
- void removeDef(const OperationIndex &idx);
+ void setDef(const OperationIndex &idx);
+ void unsetDef();
public:
void type(const DataType type) { _info.type(type); };
@@ -107,7 +107,7 @@ private:
std::shared_ptr<Data> _data;
OperationIndexSet _uses;
- OperationIndexSet _def; // size is 0 (constant) or 1 (from def operation)
+ OperationIndex _def;
};
} // namespace ir
diff --git a/runtime/onert/core/include/ir/Operations.Include.h b/runtime/onert/core/include/ir/Operations.Include.h
index 5fac54e26..30c4ff25a 100644
--- a/runtime/onert/core/include/ir/Operations.Include.h
+++ b/runtime/onert/core/include/ir/Operations.Include.h
@@ -79,6 +79,7 @@
#include "ir/operation/Pack.h"
#include "ir/operation/Select.h"
#include "ir/operation/Split.h"
+#include "ir/operation/SplitV.h"
#include "ir/operation/Unpack.h"
#include "ir/operation/Pad.h"
#include "ir/operation/Min.h"
@@ -103,3 +104,5 @@
#include "ir/operation/BatchMatMul.h"
#include "ir/operation/FusedBatchNorm.h"
#include "ir/operation/LogSoftmax.h"
+#include "ir/operation/Quantize.h"
+#include "ir/operation/StatelessRandomUniform.h"
diff --git a/runtime/onert/core/include/ir/Operations.lst b/runtime/onert/core/include/ir/Operations.lst
index 9d0642fba..75c6d8221 100644
--- a/runtime/onert/core/include/ir/Operations.lst
+++ b/runtime/onert/core/include/ir/Operations.lst
@@ -81,6 +81,7 @@ OP(DepthToSpace)
OP(Pack)
OP(Select)
OP(Split)
+OP(SplitV)
OP(Unpack)
OP(Pad)
OP(Custom)
@@ -106,3 +107,5 @@ OP(MatrixBandPart)
OP(BatchMatMul)
OP(FusedBatchNorm)
OP(LogSoftmax)
+OP(Quantize)
+OP(StatelessRandomUniform)
diff --git a/runtime/onert/core/include/ir/TypeInfo.h b/runtime/onert/core/include/ir/TypeInfo.h
index 07d82b6a7..3f7eab4c0 100644
--- a/runtime/onert/core/include/ir/TypeInfo.h
+++ b/runtime/onert/core/include/ir/TypeInfo.h
@@ -18,6 +18,7 @@
#define __ONERT_IR_TYPEINFO_H__
#include <cstdint>
+#include <vector>
#include "ir/DataType.h"
@@ -32,7 +33,7 @@ public:
TypeInfo() = delete;
explicit TypeInfo(DataType type, float scale = 0, int32_t offset = 0)
- : _type(type), _scale(scale), _offset(offset)
+ : _type(type), _scale(scale), _offset(offset), _sparse(false)
{
}
@@ -40,14 +41,28 @@ public:
DataType type() const { return _type; }
float scale() const { return _scale; }
int32_t offset() const { return _offset; }
+ bool sparse() const { return _sparse; }
+ const uint16_t *w1_segments() const { return _w1_segments.data(); }
+ const uint16_t *w1_indices() const { return _w1_indices.data(); }
public:
void type(const DataType type) { _type = type; }
+ void sparse2DMetadata(std::vector<uint16_t> &&w1_segments, std::vector<uint16_t> &&w1_indices)
+ {
+ _sparse = true;
+ _w1_segments = w1_segments;
+ _w1_indices = w1_indices;
+ }
private:
DataType _type;
+ // for quantization
float _scale;
int32_t _offset;
+ // for sparsity
+ bool _sparse;
+ std::vector<uint16_t> _w1_segments;
+ std::vector<uint16_t> _w1_indices;
};
bool operator==(const TypeInfo &lhs, const TypeInfo &rhs);
diff --git a/runtime/onert/core/include/ir/operation/BatchToSpaceND.h b/runtime/onert/core/include/ir/operation/BatchToSpaceND.h
index bb6be57d7..3e69b42c7 100644
--- a/runtime/onert/core/include/ir/operation/BatchToSpaceND.h
+++ b/runtime/onert/core/include/ir/operation/BatchToSpaceND.h
@@ -32,7 +32,8 @@ public:
enum Input
{
INPUT = 0,
- BLOCK_SIZE = 1
+ BLOCK_SIZE = 1,
+ CROPS_DATA = 2
};
public:
diff --git a/runtime/onert/core/include/ir/operation/LogSoftmax.h b/runtime/onert/core/include/ir/operation/LogSoftmax.h
index 26a92d7f8..391b4ba4a 100644
--- a/runtime/onert/core/include/ir/operation/LogSoftmax.h
+++ b/runtime/onert/core/include/ir/operation/LogSoftmax.h
@@ -48,7 +48,7 @@ public:
public:
void accept(OperationVisitor &v) const override;
- OpCode opcode() const final { return OpCode::Softmax; }
+ OpCode opcode() const final { return OpCode::LogSoftmax; }
public:
const Param &param() const { return _param; }
diff --git a/runtime/onert/core/include/ir/operation/Pad.h b/runtime/onert/core/include/ir/operation/Pad.h
index a48606196..00481cd50 100644
--- a/runtime/onert/core/include/ir/operation/Pad.h
+++ b/runtime/onert/core/include/ir/operation/Pad.h
@@ -33,7 +33,7 @@ public:
{
INPUT = 0,
PAD = 1,
- // VALUE = 2 Not allow padding value operand yet
+ VALUE = 2
};
public:
diff --git a/runtime/onert/core/include/ir/operation/Quantize.h b/runtime/onert/core/include/ir/operation/Quantize.h
new file mode 100644
index 000000000..2533ce432
--- /dev/null
+++ b/runtime/onert/core/include/ir/operation/Quantize.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_IR_OPERATION_QUANTIZE_H__
+#define __ONERT_IR_OPERATION_QUANTIZE_H__
+
+#include "ir/Operation.h"
+
+namespace onert
+{
+namespace ir
+{
+namespace operation
+{
+
+class Quantize : public Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ };
+
+public:
+ Quantize(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ OpCode opcode() const final { return OpCode::Quantize; }
+};
+
+} // namespace operation
+} // namespace ir
+} // namespace onert
+
+#endif // __ONERT_IR_OPERATION_QUANTIZE_H__
diff --git a/runtime/onert/core/include/ir/operation/ResizeBilinear.h b/runtime/onert/core/include/ir/operation/ResizeBilinear.h
index 2887ed845..29aa496d7 100644
--- a/runtime/onert/core/include/ir/operation/ResizeBilinear.h
+++ b/runtime/onert/core/include/ir/operation/ResizeBilinear.h
@@ -33,13 +33,15 @@ class ResizeBilinear : public Operation
public:
enum Input
{
- INPUT = 0
+ INPUT = 0,
};
struct Param
{
int32_t height_out;
int32_t width_out;
+ bool align_corners;
+ bool half_pixel_centers;
};
public:
diff --git a/runtime/onert/core/include/ir/operation/SplitV.h b/runtime/onert/core/include/ir/operation/SplitV.h
new file mode 100644
index 000000000..99a06ee7f
--- /dev/null
+++ b/runtime/onert/core/include/ir/operation/SplitV.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __ONERT_IR_OPERATION_SPLIT_V_H__
+#define __ONERT_IR_OPERATION_SPLIT_V_H__
+
+#include "ir/Operation.h"
+
+namespace onert
+{
+namespace ir
+{
+namespace operation
+{
+class SplitV : public Operation
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ SIZE_SPLITS = 1,
+ SPLIT_DIM = 2
+ };
+
+ struct Param
+ {
+ int num_splits;
+ };
+
+public:
+ SplitV(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param &param);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ OpCode opcode() const final { return OpCode::SplitV; }
+
+public:
+ const Param &param() const { return _param; }
+
+private:
+ Param _param;
+};
+} // namespace operation
+} // namespace ir
+} // namespace onert
+#endif // __ONERT_IR_OPERATION_SPLIT_V_H__
diff --git a/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h b/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h
new file mode 100644
index 000000000..112a748fd
--- /dev/null
+++ b/runtime/onert/core/include/ir/operation/StatelessRandomUniform.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__
+#define __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__
+
+#include <memory>
+
+#include "ir/Operation.h"
+
+namespace onert
+{
+namespace ir
+{
+namespace operation
+{
+
+class StatelessRandomUniform : public Operation
+{
+public:
+ enum Input
+ {
+ SHAPE = 0,
+ SEED = 1
+ };
+
+public:
+ StatelessRandomUniform(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ OpCode opcode() const final { return OpCode::StatelessRandomUniform; }
+};
+
+} // namespace operation
+} // namespace ir
+} // namespace onert
+
+#endif // __ONERT_IR_OPERATION_STATELESS_RANDOM_UNIFORM_H__
diff --git a/runtime/onert/core/include/util/ShapeInference.h b/runtime/onert/core/include/util/ShapeInference.h
index 0d4525144..a68c22b16 100644
--- a/runtime/onert/core/include/util/ShapeInference.h
+++ b/runtime/onert/core/include/util/ShapeInference.h
@@ -95,6 +95,9 @@ template <float *> ir::Shape inferRangeShape(float *start_val, float *limit_val,
template <typename T> ir::Shape inferRangeShape(T start_val, T limit_val, T delta_val);
+ir::Shape inferResizeBilinearShape(const ir::Shape &in_shape, const int32_t output_height,
+ const int32_t output_width);
+
ir::Shape inferSelectShape(const ir::Shape &input_cond_shape, const ir::Shape &input_true_shape,
const ir::Shape &input_false_shape);