summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--runtimes/neurun/src/backend/acl_cl/StageGenerator.cc40
-rw-r--r--runtimes/neurun/src/backend/cpu/StageGenerator.cc44
-rw-r--r--runtimes/neurun/src/frontend/model.cc25
-rw-r--r--runtimes/neurun/src/model/operation/Conv2DNode.cc54
-rw-r--r--runtimes/neurun/src/model/operation/Conv2DNode.h10
-rw-r--r--tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun6
-rw-r--r--tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu6
7 files changed, 123 insertions, 62 deletions
diff --git a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
index 754d17ce8..043bf6b16 100644
--- a/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
+++ b/runtimes/neurun/src/backend/acl_cl/StageGenerator.cc
@@ -184,19 +184,12 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node)
const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto padding_index{node.param().padding_index};
const auto activation_index{node.param().activation_index};
const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
const auto ker_shape = _ctx.at(ker_index).shape().asKernel();
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
neurun::util::Stride stride;
stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
@@ -224,10 +217,35 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node)
param.bias_index = bias_index;
param.stride = stride;
- param.padding =
- (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
- : neurun::util::valid_padding();
+
+ // TODO : Extract this to a function
+ param.padding = [&]() {
+ if (!node.param().explicit_padding) // implicit padding
+ {
+ const auto padding_code_index{node.param().padding_code_index};
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W,
+ ker_shape.H)
+ : neurun::util::valid_padding();
+ }
+ else // explicit padding
+ {
+ neurun::util::Padding padding;
+ padding.left = _ctx.at({node.param().padding_left_index}).asScalar<int32_t>();
+ padding.right = _ctx.at({node.param().padding_right_index}).asScalar<int32_t>();
+ padding.top = _ctx.at({node.param().padding_top_index}).asScalar<int32_t>();
+ padding.bottom = _ctx.at({node.param().padding_bottom_index}).asScalar<int32_t>();
+
+ return padding;
+ }
+ }();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
diff --git a/runtimes/neurun/src/backend/cpu/StageGenerator.cc b/runtimes/neurun/src/backend/cpu/StageGenerator.cc
index 1591aa323..dd9bdd311 100644
--- a/runtimes/neurun/src/backend/cpu/StageGenerator.cc
+++ b/runtimes/neurun/src/backend/cpu/StageGenerator.cc
@@ -62,15 +62,8 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node)
const auto vstride_index{node.param().vstride_index};
const auto hstride_index{node.param().hstride_index};
- const auto padding_index{node.param().padding_index};
const auto activation_index{node.param().activation_index};
- const PaddingCode padding_type =
- static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
-
- assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
- (ANEURALNETWORKS_PADDING_VALID == padding_type));
-
util::Stride stride;
stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
@@ -108,12 +101,37 @@ void StageGenerator::visit(const model::operation::Conv2DNode &node)
param.bias_shape = ::neurun::backend::cpu::kernel::getShape(_ctx.at(bias_index));
param.stride = stride;
- param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
- _ctx.at(ofm_index).shape().asFeature(), stride,
- _ctx.at(ker_index).shape().asKernel().W,
- _ctx.at(ker_index).shape().asKernel().H)
- : util::valid_padding();
+
+ // TODO : Extract this to a function
+ param.padding = [&]() {
+ if (!node.param().explicit_padding) // implicit padding
+ {
+ const auto padding_code_index{node.param().padding_code_index};
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_code_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? neurun::util::same_padding(_ctx.at(ifm_index).shape().asFeature(),
+ _ctx.at(ofm_index).shape().asFeature(), stride,
+ _ctx.at(ker_index).shape().asKernel().W,
+ _ctx.at(ker_index).shape().asKernel().H)
+ : neurun::util::valid_padding();
+ }
+ else // explicit padding
+ {
+ neurun::util::Padding padding;
+ padding.left = _ctx.at({node.param().padding_left_index}).asScalar<int32_t>();
+ padding.right = _ctx.at({node.param().padding_right_index}).asScalar<int32_t>();
+ padding.top = _ctx.at({node.param().padding_top_index}).asScalar<int32_t>();
+ padding.bottom = _ctx.at({node.param().padding_bottom_index}).asScalar<int32_t>();
+
+ return padding;
+ }
+ }();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
diff --git a/runtimes/neurun/src/frontend/model.cc b/runtimes/neurun/src/frontend/model.cc
index 44c92e8f1..5124681eb 100644
--- a/runtimes/neurun/src/frontend/model.cc
+++ b/runtimes/neurun/src/frontend/model.cc
@@ -76,10 +76,16 @@ int ANeuralNetworksModel_addOperand(ANeuralNetworksModel *model,
return ANEURALNETWORKS_BAD_DATA;
}
}
- else if ((type->scale != 0.0f) || (type->zeroPoint != 0))
- {
- return ANEURALNETWORKS_BAD_DATA;
- }
+ // NOTE Validation of scale and zeroPoint would be skipped for a while.
+ // We do not know whether scalar type can have scale and zeroPoint.
+ // To pass ValidationTest and GeneratedTest, this validation code
+ // would not be implemented until we can define this issue clearly.
+ //
+ // scale and zeroPoint should be zero for scalars and non-fixed point tensors
+ // else if ((type->scale != 0.0f) || (type->zeroPoint != 0))
+ // {
+ // return ANEURALNETWORKS_BAD_DATA;
+ // }
// dimensionCount should be zero for scalars
if ((type->dimensionCount != 0) &&
@@ -278,16 +284,9 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
assert(inputCount == 7 || inputCount == 10);
assert(outputCount == 1);
- if (inputCount == 7)
- {
- using GraphNode = neurun::model::operation::Conv2DNode;
+ using GraphNode = neurun::model::operation::Conv2DNode;
- graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
- }
- else
- {
- throw std::runtime_error{"Explicit padding in Conv2D is not supported, yet"};
- }
+ graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
break;
}
diff --git a/runtimes/neurun/src/model/operation/Conv2DNode.cc b/runtimes/neurun/src/model/operation/Conv2DNode.cc
index 7eb2b183d..dd0b661af 100644
--- a/runtimes/neurun/src/model/operation/Conv2DNode.cc
+++ b/runtimes/neurun/src/model/operation/Conv2DNode.cc
@@ -32,26 +32,56 @@ void Conv2DNode::accept(NodeVisitor &&v) const { v.visit(*this); }
Conv2DNode::Conv2DNode(const model::operation::Node::InitParam &init_param)
: model::operation::Node{OperandConstraint::createExact(3u)}
{
- assert(init_param.input_count == 7 && init_param.output_count == 1);
+ assert(init_param.input_count == 7 || init_param.input_count == 10);
+ assert(init_param.output_count == 1);
- // Each input should be interpreted as follows:
- //
- //
// 0 -> IFM Tensor Index
// 1 -> Kernel Tensor Index
// 2 -> Bias Tensor Index
- // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
- // 4 -> Stride (width) Index
- // 5 -> Stride (height) INdex
- // 6 -> Activation Index
setInputs({init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]});
setOutputs({init_param.outputs[0]});
- _param.padding_index = operand::Index{init_param.inputs[3]};
- _param.hstride_index = operand::Index{init_param.inputs[4]};
- _param.vstride_index = operand::Index{init_param.inputs[5]};
- _param.activation_index = operand::Index{init_param.inputs[6]};
+ if (init_param.input_count == 7) // support implicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 4 -> Stride (width) Index
+ // 5 -> Stride (height) INdex
+ // 6 -> Activation Index
+
+ _param.explicit_padding = false;
+
+ _param.padding_code_index = operand::Index{init_param.inputs[3]};
+ _param.hstride_index = operand::Index{init_param.inputs[4]};
+ _param.vstride_index = operand::Index{init_param.inputs[5]};
+ _param.activation_index = operand::Index{init_param.inputs[6]};
+ }
+ else if (init_param.input_count == 10) // support explicit padding
+ {
+ // Each input should be interpreted as follows:
+ //
+ // 3 -> Padding_left index
+ // 4 -> Padding_right index
+ // 5 -> Padding_top index
+ // 6 -> Padding_bottom index
+ // 7 -> Stride (width) Index
+ // 8 -> Stride (height) INdex
+ // 9 -> Activation Index
+
+ _param.explicit_padding = true;
+
+ _param.padding_left_index = operand::Index{init_param.inputs[3]};
+ _param.padding_right_index = operand::Index{init_param.inputs[4]};
+ _param.padding_top_index = operand::Index{init_param.inputs[5]};
+ _param.padding_bottom_index = operand::Index{init_param.inputs[6]};
+
+ _param.hstride_index = operand::Index{init_param.inputs[7]};
+ _param.vstride_index = operand::Index{init_param.inputs[8]};
+
+ _param.activation_index = operand::Index{init_param.inputs[9]};
+ }
}
} // namespace operation
diff --git a/runtimes/neurun/src/model/operation/Conv2DNode.h b/runtimes/neurun/src/model/operation/Conv2DNode.h
index 34a95f0d9..58f72ab62 100644
--- a/runtimes/neurun/src/model/operation/Conv2DNode.h
+++ b/runtimes/neurun/src/model/operation/Conv2DNode.h
@@ -45,8 +45,16 @@ public:
operand::Index hstride_index;
operand::Index vstride_index;
- operand::Index padding_index;
+ operand::Index padding_code_index;
+
+ operand::Index padding_left_index;
+ operand::Index padding_right_index;
+ operand::Index padding_top_index;
+ operand::Index padding_bottom_index;
+
operand::Index activation_index;
+
+ bool explicit_padding;
};
public:
diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
index b766b773a..033f4586b 100644
--- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
+++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun
@@ -13,12 +13,6 @@ ValidationTestExecution.SetOutputFromMemory
ValidationTestExecution.StartCompute
ValidationTestExecution.EventWait
GeneratedTests.argmax*
-GeneratedTests.conv_float_channels
-GeneratedTests.conv_float_channels_weights_as_inputs
-GeneratedTests.conv_float_large
-GeneratedTests.conv_float_large_weights_as_inputs
-GeneratedTests.conv_float
-GeneratedTests.conv_float_weights_as_inputs
GeneratedTests.conv_quant8_channels
GeneratedTests.conv_quant8_channels_weights_as_inputs
GeneratedTests.conv_quant8_large
diff --git a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu
index 15ff36a9b..db98effc0 100644
--- a/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu
+++ b/tests/nnapi/nnapi_gtest.skip.armv7l-linux.neurun.cpu
@@ -27,12 +27,6 @@ GeneratedTests.avg_pool_quant8_1
GeneratedTests.avg_pool_quant8_2
GeneratedTests.avg_pool_quant8_3
GeneratedTests.avg_pool_quant8_4
-GeneratedTests.conv_float_channels
-GeneratedTests.conv_float_channels_weights_as_inputs
-GeneratedTests.conv_float_large
-GeneratedTests.conv_float_large_weights_as_inputs
-GeneratedTests.conv_float
-GeneratedTests.conv_float_weights_as_inputs
GeneratedTests.conv_quant8_channels
GeneratedTests.conv_quant8_channels_weights_as_inputs
GeneratedTests.conv_quant8_large