summaryrefslogtreecommitdiff
path: root/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc
diff options
context:
space:
mode:
authorChunseok Lee <chunseok.lee@samsung.com>2021-04-20 18:01:41 +0900
committerChunseok Lee <chunseok.lee@samsung.com>2021-04-20 18:01:41 +0900
commit589bb1db6db6784efe21b3fbbfbfdb79aaa5f14e (patch)
tree47a2b23ce4220e3a4150c8b12ed941555272fb0c /runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc
parent62529acabbafce7730601ed01d5709d7bc0d378a (diff)
downloadnnfw-589bb1db6db6784efe21b3fbbfbfdb79aaa5f14e.tar.gz
nnfw-589bb1db6db6784efe21b3fbbfbfdb79aaa5f14e.tar.bz2
nnfw-589bb1db6db6784efe21b3fbbfbfdb79aaa5f14e.zip
Diffstat (limited to 'runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc')
-rw-r--r--runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc43
1 files changed, 19 insertions, 24 deletions
diff --git a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc
index 3e1da5ec0..27b2cdf68 100644
--- a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc
+++ b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc
@@ -35,7 +35,7 @@ namespace ops
{
ElementwiseActivationLayer::ElementwiseActivationLayer()
- : _input(nullptr), _output(nullptr), _kernel()
+ : _input(nullptr), _output(nullptr), _kernel()
{
// DO NOTHING
}
@@ -43,9 +43,9 @@ ElementwiseActivationLayer::ElementwiseActivationLayer()
void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivationType op_type)
{
const auto input_scale = static_cast<double>(_input->data_scale());
- const auto input_zero_point = static_cast<int32_t>(_input->data_offset());
+ const auto input_zero_point = static_cast<int32_t>(_input->data_zero_point());
const auto output_scale = static_cast<double>(_output->data_scale());
- const auto output_zero_point = static_cast<int32_t>(_output->data_offset());
+ const auto output_zero_point = static_cast<int32_t>(_output->data_zero_point());
const float inverse_scale = 1 / output_scale;
int32_t maxval = std::numeric_limits<uint8_t>::max();
int32_t minval = std::numeric_limits<uint8_t>::min();
@@ -74,9 +74,9 @@ void ElementwiseActivationLayer::PopulateLookupTable(const ElementwiseActivation
void ElementwiseActivationLayer::EvalUsingLookupTable(const IPortableTensor *input,
IPortableTensor *output)
{
- const int size = MatchingFlatSize(getTensorShape(input), getTensorShape(output));
- const uint8_t *input_data = reinterpret_cast<const uint8_t *>(input->buffer());
- uint8_t *output_data = reinterpret_cast<uint8_t *>(output->buffer());
+ const int size = MatchingFlatSize(getShape(input), getShape(output));
+ const uint8_t *input_data = getBuffer<uint8_t>(input);
+ uint8_t *output_data = getBuffer<uint8_t>(output);
for (int i = 0; i < size; ++i)
{
@@ -97,8 +97,8 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab
if (input->data_type() == OperandType::FLOAT32)
{
_kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::ELU(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
+ nnfw::cker::ELU(getShape(input), getBuffer<float>(input), getShape(output),
+ getBuffer<float>(output));
};
}
else
@@ -116,9 +116,8 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab
else if (_input->data_type() == OperandType::FLOAT32)
{
_kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::Logistic(getTensorShape(input),
- reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
+ nnfw::cker::Logistic(getShape(input), getBuffer<float>(input), getShape(output),
+ getBuffer<float>(output));
};
}
else
@@ -132,23 +131,20 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab
if (alpha == std::numeric_limits<float>::infinity() && beta == 0.f)
{
_kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::ReLU(getTensorShape(input),
- reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
+ nnfw::cker::ReLU(getShape(input), getBuffer<float>(input), getShape(output),
+ getBuffer<float>(output));
};
}
else if (alpha == 6.f && beta == 0.f)
{
_kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::ReLU6(getTensorShape(input),
- reinterpret_cast<const float *>(input->buffer()),
- reinterpret_cast<float *>(output->buffer()));
+ nnfw::cker::ReLU6(getShape(input), getBuffer<float>(input), getBuffer<float>(output));
};
}
else
{
throw std::runtime_error(
- "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
+ "ElementwiseActivationLayer : This layer suppports only ReLU(0-inf) and ReLU6(0-6)");
}
}
else
@@ -166,8 +162,8 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab
else if (_input->data_type() == OperandType::FLOAT32)
{
_kernel = [](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::Tanh(getTensorShape(input), reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output), reinterpret_cast<float *>(output->buffer()));
+ nnfw::cker::Tanh(getShape(input), getBuffer<float>(input), getShape(output),
+ getBuffer<float>(output));
};
}
else
@@ -179,10 +175,9 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab
if (_input->data_type() == OperandType::FLOAT32)
{
_kernel = [alpha](const IPortableTensor *input, IPortableTensor *output) {
- nnfw::cker::LeakyReLU(nnfw::cker::LeakyReluParams{alpha}, getTensorShape(input),
- reinterpret_cast<const float *>(input->buffer()),
- getTensorShape(output),
- reinterpret_cast<float *>(output->buffer()));
+ nnfw::cker::LeakyReLU(nnfw::cker::LeakyReluParams{alpha}, getShape(input),
+ getBuffer<float>(input), getShape(output),
+ getBuffer<float>(output));
};
}
else