summaryrefslogtreecommitdiff
path: root/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc')
-rw-r--r--runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc28
1 files changed, 13 insertions, 15 deletions
diff --git a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc
index 896e262ba..8dd0a01a5 100644
--- a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc
+++ b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc
@@ -29,7 +29,7 @@ namespace cpu
namespace ops
{
SpaceToBatchNDLayer::SpaceToBatchNDLayer()
- : _input(nullptr), _block_shape(nullptr), _padding(nullptr), _output(nullptr)
+ : _input(nullptr), _block_shape(nullptr), _padding(nullptr), _output(nullptr)
{
// DO NOTHING
}
@@ -38,7 +38,7 @@ SpaceToBatchNDLayer::SpaceToBatchNDLayer()
void SpaceToBatchNDLayer::checkDimension()
{
const int kSpatialDimensionNum = 2;
- if (_block_shape->dimension(0) != kSpatialDimensionNum)
+ if (_block_shape->getShape().dim(0) != kSpatialDimensionNum)
{
throw std::runtime_error("SpaceToBatchND : block_shape(block_size) tensor's rank is wrong\n");
}
@@ -47,18 +47,17 @@ void SpaceToBatchNDLayer::checkDimension()
// shape height and width.
for (int dim = 0; dim < kSpatialDimensionNum; ++dim)
{
- int final_dim_size =
- (_input->dimension(dim + 1) + reinterpret_cast<int32_t *>(_padding->buffer())[dim * 2] +
- reinterpret_cast<int32_t *>(_padding->buffer())[dim * 2 + 1]);
+ int final_dim_size = (_input->getShape().dim(dim + 1) + getBuffer<int32_t>(_padding)[dim * 2] +
+ getBuffer<int32_t>(_padding)[dim * 2 + 1]);
- if (final_dim_size % reinterpret_cast<int32_t *>(_block_shape->buffer())[dim] != 0)
+ if (final_dim_size % getBuffer<int32_t>(_block_shape)[dim] != 0)
{
throw std::runtime_error(
- "SpaceToBatchND : padded input's dimension is not a multiple of block size\n");
+ "SpaceToBatchND : padded input's dimension is not a multiple of block size\n");
}
- if ((int32_t)_output->dimension(dim + 1) !=
- final_dim_size / reinterpret_cast<int32_t *>(_block_shape->buffer())[dim])
+ if ((int32_t)_output->getShape().dim(dim + 1) !=
+ final_dim_size / getBuffer<int32_t>(_block_shape)[dim])
{
throw std::runtime_error("SpaceToBatchND : wrong output dimension\n");
}
@@ -66,7 +65,7 @@ void SpaceToBatchNDLayer::checkDimension()
}
template <> uint32_t SpaceToBatchNDLayer::getPad<float>() { return 0; }
-template <> uint32_t SpaceToBatchNDLayer::getPad<uint8_t>() { return _output->data_offset(); }
+template <> uint32_t SpaceToBatchNDLayer::getPad<uint8_t>() { return _output->data_zero_point(); }
template <typename T> void SpaceToBatchNDLayer::spaceToBatchND()
{
@@ -75,11 +74,10 @@ template <typename T> void SpaceToBatchNDLayer::spaceToBatchND()
nnfw::cker::SpaceToBatchParams params;
params.output_offset = getPad<T>();
- nnfw::cker::SpaceToBatchND(
- params, getTensorShape(_input), reinterpret_cast<const T *>(_input->buffer()),
- getTensorShape(_block_shape), reinterpret_cast<const int32_t *>(_block_shape->buffer()),
- getTensorShape(_padding), reinterpret_cast<const int32_t *>(_padding->buffer()),
- getTensorShape(_output), reinterpret_cast<T *>(_output->buffer()));
+ nnfw::cker::SpaceToBatchND(params, getShape(_input), getBuffer<T>(_input), getShape(_block_shape),
+ getBuffer<int32_t>(_block_shape), getShape(_padding),
+ getBuffer<int32_t>(_padding), getShape(_output),
+ getBuffer<T>(_output));
}
void SpaceToBatchNDLayer::configure(const IPortableTensor *input,