summaryrefslogtreecommitdiff
path: root/runtime/contrib/pure_arm_compute/src/compilation.cc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/contrib/pure_arm_compute/src/compilation.cc')
-rw-r--r--runtime/contrib/pure_arm_compute/src/compilation.cc159
1 files changed, 77 insertions, 82 deletions
diff --git a/runtime/contrib/pure_arm_compute/src/compilation.cc b/runtime/contrib/pure_arm_compute/src/compilation.cc
index 8cc86ebae..b97fab547 100644
--- a/runtime/contrib/pure_arm_compute/src/compilation.cc
+++ b/runtime/contrib/pure_arm_compute/src/compilation.cc
@@ -57,7 +57,7 @@
#include "misc/feature/IndexIterator.h"
#include "misc/tensor/IndexIterator.h"
-#include <cpp14/memory.h>
+#include <memory>
#include "compilation.h"
#include "model.h"
@@ -380,7 +380,7 @@ void ActivationBuilder::appendReLU(::arm_compute::ITensor *ifm_alloc)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
@@ -388,7 +388,7 @@ void ActivationBuilder::appendReLU(::arm_compute::ITensor *ifm_alloc)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, nullptr, act_info);
@@ -403,7 +403,7 @@ void ActivationBuilder::appendReLU1(::arm_compute::ITensor *ifm_alloc)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
@@ -411,7 +411,7 @@ void ActivationBuilder::appendReLU1(::arm_compute::ITensor *ifm_alloc)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, nullptr, act_info);
@@ -426,7 +426,7 @@ void ActivationBuilder::appendReLU6(::arm_compute::ITensor *ifm_alloc)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
@@ -434,7 +434,7 @@ void ActivationBuilder::appendReLU6(::arm_compute::ITensor *ifm_alloc)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, nullptr, act_info);
@@ -613,7 +613,7 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
{
if (::internal::arm_compute::isGpuMode())
{
- auto l = nnfw::cpp14::make_unique<::arm_compute::CLArithmeticAddition>();
+ auto l = std::make_unique<::arm_compute::CLArithmeticAddition>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
l->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
@@ -623,7 +623,7 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
}
else // NEON
{
- auto l = nnfw::cpp14::make_unique<::arm_compute::NEArithmeticAddition>();
+ auto l = std::make_unique<::arm_compute::NEArithmeticAddition>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
l->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE);
@@ -693,7 +693,7 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLArithmeticSubtraction>();
+ auto fn = std::make_unique<::arm_compute::CLArithmeticSubtraction>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
@@ -703,7 +703,7 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node)
}
else // NEON
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEArithmeticSubtraction>();
+ auto fn = std::make_unique<::arm_compute::NEArithmeticSubtraction>();
// TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
fn->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE);
@@ -767,14 +767,13 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node)
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
-
auto output_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
auto lhs_input_alloc = ctx.at(::internal::tflite::operand::Index{param.lhs_index});
auto rhs_input_alloc = ctx.at(::internal::tflite::operand::Index{param.rhs_index});
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPixelWiseMultiplication>();
+ auto fn = std::make_unique<::arm_compute::CLPixelWiseMultiplication>();
fn->configure(CAST_CL(lhs_input_alloc), CAST_CL(rhs_input_alloc), CAST_CL(output_alloc),
1.0, // scale
@@ -785,7 +784,7 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node)
}
else // NEON
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEPixelWiseMultiplication>();
+ auto fn = std::make_unique<::arm_compute::NEPixelWiseMultiplication>();
fn->configure(lhs_input_alloc, rhs_input_alloc, output_alloc,
1.0, // scale
@@ -856,7 +855,7 @@ void Planner::visit(const ::internal::tflite::op::Div::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLArithmeticDivision>();
+ auto fn = std::make_unique<::arm_compute::CLArithmeticDivision>();
fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc));
@@ -1321,7 +1320,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
+ auto fn = std::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
conv_info, param.multipler);
@@ -1330,7 +1329,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
+ auto fn = std::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler);
@@ -1464,7 +1463,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
+ auto fn = std::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
conv_info, param.multipler);
@@ -1473,7 +1472,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
+ auto fn = std::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler);
@@ -1527,7 +1526,7 @@ void Planner::visit(const ::internal::tflite::op::Dequantize::Node &node)
{
if (::internal::arm_compute::isGpuMode())
{
- auto l = nnfw::cpp14::make_unique<::arm_compute::CLCast>();
+ auto l = std::make_unique<::arm_compute::CLCast>();
l->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
fn = std::move(l);
@@ -2157,7 +2156,7 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
auto weight_alloc = ctx.at(::internal::tflite::operand::Index{param.weight_index});
auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
- auto fn = nnfw::cpp14::make_unique<arm_compute::CLFullyConnectedReshapingLayer>();
+ auto fn = std::make_unique<arm_compute::CLFullyConnectedReshapingLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(weight_alloc), CAST_CL(bias_alloc),
CAST_CL(output_alloc), needs_reshape, asTensorShape(reshape));
@@ -2209,7 +2208,7 @@ void Planner::visit(const ::internal::tflite::op::ResizeBilinear::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLScale>();
+ auto fn = std::make_unique<::arm_compute::CLScale>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc),
::arm_compute::InterpolationPolicy::BILINEAR,
@@ -2262,7 +2261,7 @@ void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
if (::internal::arm_compute::isGpuMode())
{
// GenericReshape first apply NCHW->NHWC permutation, and apply reshape
- auto fn = nnfw::cpp14::make_unique<GenericReshapeLayer>();
+ auto fn = std::make_unique<GenericReshapeLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
@@ -2270,7 +2269,7 @@ void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<GenericReshapeLayer>();
+ auto fn = std::make_unique<GenericReshapeLayer>();
fn->configure(input_alloc, output_alloc);
@@ -2316,7 +2315,7 @@ void Planner::visit(const ::internal::tflite::op::Squeeze::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReshapeLayer>();
+ auto fn = std::make_unique<::arm_compute::CLReshapeLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
@@ -2324,7 +2323,7 @@ void Planner::visit(const ::internal::tflite::op::Squeeze::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEReshapeLayer>();
+ auto fn = std::make_unique<::arm_compute::NEReshapeLayer>();
fn->configure(input_alloc, output_alloc);
@@ -2375,7 +2374,7 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSoftmaxLayer>();
+ auto fn = std::make_unique<::arm_compute::CLSoftmaxLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.scale);
@@ -2383,7 +2382,7 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NESoftmaxLayer>();
+ auto fn = std::make_unique<::arm_compute::NESoftmaxLayer>();
fn->configure(input_alloc, output_alloc, param.scale);
@@ -2518,7 +2517,7 @@ void Planner::visit(const ::internal::tflite::op::StridedSlice::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLStridedSlice>();
+ auto fn = std::make_unique<::arm_compute::CLStridedSlice>();
fn->configure(CAST_CL(inputData_alloc), CAST_CL(outputData_alloc), starts, ends, strides,
param.beginMask, param.endMask, param.shrinkAxisMask);
@@ -2645,7 +2644,7 @@ void Planner::visit(const ::internal::tflite::op::ReduceMin::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
+ auto fn = std::make_unique<::arm_compute::CLReduceOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ReduceOperation::MIN);
@@ -2772,7 +2771,7 @@ void Planner::visit(const ::internal::tflite::op::ReduceMax::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
+ auto fn = std::make_unique<::arm_compute::CLReduceOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ReduceOperation::MAX);
@@ -2823,7 +2822,7 @@ void Planner::visit(const ::internal::tflite::op::Cast::Node &node)
{
if (::internal::arm_compute::isGpuMode())
{
- auto l = nnfw::cpp14::make_unique<::arm_compute::CLCast>();
+ auto l = std::make_unique<::arm_compute::CLCast>();
l->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
fn = std::move(l);
@@ -2892,7 +2891,7 @@ void Planner::visit(const ::internal::tflite::op::TopKV2::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLTopKV2>();
+ auto fn = std::make_unique<::arm_compute::CLTopKV2>();
fn->configure(CAST_CL(input_alloc), param.k, CAST_CL(values_alloc), CAST_CL(indices_alloc));
@@ -2969,7 +2968,7 @@ void Planner::visit(const ::internal::tflite::op::Gather::Node &node)
{
std::unique_ptr<::arm_compute::IFunction> fn;
- auto l = nnfw::cpp14::make_unique<GenericGather>();
+ auto l = std::make_unique<GenericGather>();
l->configure(CAST_CL(ifm_alloc), CAST_CL(indices_alloc), CAST_CL(ofm_alloc), param.axis);
fn = std::move(l);
builder.append("Gather", std::move(fn));
@@ -3032,7 +3031,7 @@ void Planner::visit(const ::internal::tflite::op::PReLU::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPReLU>();
+ auto fn = std::make_unique<::arm_compute::CLPReLU>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(alpha_alloc), CAST_CL(ofm_alloc));
builder.append("PReLU", std::move(fn));
}
@@ -3082,7 +3081,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
@@ -3090,7 +3089,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
@@ -3136,7 +3135,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU1::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
@@ -3144,7 +3143,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU1::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
@@ -3190,7 +3189,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU6::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
@@ -3198,7 +3197,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU6::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
@@ -3244,7 +3243,7 @@ void Planner::visit(const ::internal::tflite::op::Tanh::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
@@ -3252,7 +3251,7 @@ void Planner::visit(const ::internal::tflite::op::Tanh::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
@@ -3298,7 +3297,7 @@ void Planner::visit(const ::internal::tflite::op::Logistic::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
@@ -3306,7 +3305,7 @@ void Planner::visit(const ::internal::tflite::op::Logistic::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(ifm_alloc, ofm_alloc, act_info);
@@ -3442,7 +3441,7 @@ void Planner::visit(const ::internal::tflite::op::Mean::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceMean>();
+ auto fn = std::make_unique<::arm_compute::CLReduceMean>();
fn->configure(CAST_CL(ifm_alloc), reduction_axis, param.keep_dims, CAST_CL(ofm_alloc));
@@ -3630,13 +3629,12 @@ void Planner::visit(const ::internal::tflite::op::Transpose::Node &node)
param.rank = _ctx.at(ifm_index).shape().rank();
auto stage = [param](const IAllocationContext &ctx, IExecutionBuilder &builder) {
-
auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
const auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPermute>();
+ auto fn = std::make_unique<::arm_compute::CLPermute>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc),
getARMComputePermutationVector(param.rank, param.pv));
@@ -3647,7 +3645,6 @@ void Planner::visit(const ::internal::tflite::op::Transpose::Node &node)
{
throw std::runtime_error("Not supported, yet");
}
-
};
_builder.addStage(stage);
@@ -3685,7 +3682,7 @@ void Planner::visit(const ::internal::tflite::op::Floor::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLFloor>();
+ auto fn = std::make_unique<::arm_compute::CLFloor>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
@@ -3693,7 +3690,7 @@ void Planner::visit(const ::internal::tflite::op::Floor::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEFloor>();
+ auto fn = std::make_unique<::arm_compute::NEFloor>();
fn->configure(ifm_alloc, ofm_alloc);
@@ -3766,7 +3763,7 @@ void Planner::visit(const ::internal::tflite::op::ArgMax::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLArgOperation>();
+ auto fn = std::make_unique<::arm_compute::CLArgOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ArgOperation::MAX);
@@ -3819,7 +3816,7 @@ void Planner::visit(const ::internal::tflite::op::SQRT::Node &node)
{
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), act_info);
@@ -3827,7 +3824,7 @@ void Planner::visit(const ::internal::tflite::op::SQRT::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(input_alloc, output_alloc, act_info);
@@ -3873,7 +3870,7 @@ void Planner::visit(const ::internal::tflite::op::RSQRT::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLRsqrtLayer>();
+ auto fn = std::make_unique<::arm_compute::CLRsqrtLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
@@ -3936,7 +3933,7 @@ void Planner::visit(const ::internal::tflite::op::Equal::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLComparison>();
+ auto fn = std::make_unique<::arm_compute::CLComparison>();
fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc),
::arm_compute::ComparisonOperation::Equal);
@@ -4046,7 +4043,7 @@ void Planner::visit(const ::internal::tflite::op::TransposeConv::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLTransposeConvLayer>();
+ auto fn = std::make_unique<::arm_compute::CLTransposeConvLayer>();
auto symmetric_tconv_info = asPadStrideInfo(param.padding, param.stride);
@@ -4111,7 +4108,7 @@ void Planner::visit(const ::internal::tflite::op::SquaredDifference::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLElementwiseSquaredDiff>();
+ auto fn = std::make_unique<::arm_compute::CLElementwiseSquaredDiff>();
fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc));
builder.append("SquaredDifference", std::move(fn));
@@ -4121,7 +4118,6 @@ void Planner::visit(const ::internal::tflite::op::SquaredDifference::Node &node)
// TODO Enable NEON Support
throw std::runtime_error("Not supported, yet");
}
-
};
_builder.addStage(stage);
@@ -4244,7 +4240,7 @@ void Planner::visit(const ::internal::tflite::op::Pad::Node &node)
{
if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPadLayer>();
+ auto fn = std::make_unique<::arm_compute::CLPadLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.padding_list,
param.pixel_value);
@@ -4314,7 +4310,7 @@ void Planner::visit(const ::internal::tflite::op::SpaceToDepth::Node &node)
{
if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSpaceToDepth>();
+ auto fn = std::make_unique<::arm_compute::CLSpaceToDepth>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
@@ -4448,7 +4444,7 @@ void Planner::visit(const ::internal::tflite::op::SpaceToBatchND::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSpaceToBatchND>();
+ auto fn = std::make_unique<::arm_compute::CLSpaceToBatchND>();
fn->configure(CAST_CL(input_alloc), CAST_CL(block_size_alloc), CAST_CL(padding_size_alloc),
CAST_CL(output_alloc));
@@ -4543,7 +4539,7 @@ void Planner::visit(const ::internal::tflite::op::BatchToSpaceNd::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBatchToSpaceLayer>();
+ auto fn = std::make_unique<::arm_compute::CLBatchToSpaceLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(block_size_alloc), CAST_CL(output_alloc));
builder.append("BatchToSpaceND", std::move(fn));
@@ -4608,7 +4604,7 @@ void Planner::visit(const ::internal::tflite::op::L2Normalization::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNormalizationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLNormalizationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), norm_info);
@@ -4616,7 +4612,7 @@ void Planner::visit(const ::internal::tflite::op::L2Normalization::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NENormalizationLayer>();
+ auto fn = std::make_unique<::arm_compute::NENormalizationLayer>();
fn->configure(ifm_alloc, ofm_alloc, norm_info);
@@ -4889,7 +4885,7 @@ void Planner::visit(const ::internal::tflite::op::EmbeddingLookup::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLEmbeddingLookup>();
+ auto fn = std::make_unique<::arm_compute::CLEmbeddingLookup>();
fn->configure(CAST_CL(values_alloc), CAST_CL(output_alloc), CAST_CL(lookups_alloc));
@@ -4985,7 +4981,7 @@ void Planner::visit(const ::internal::tflite::op::HashtableLookup::Node &node)
if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLHashtableLookup>();
+ auto fn = std::make_unique<::arm_compute::CLHashtableLookup>();
fn->configure(CAST_CL(lookups_alloc), CAST_CL(keys_alloc), CAST_CL(values_alloc),
CAST_CL(output_alloc), CAST_CL(hits_alloc));
@@ -5049,7 +5045,7 @@ void Planner::visit(const ::internal::tflite::op::LocalResponseNormalization::No
param.beta, param.bias, false);
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNormalizationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLNormalizationLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), norm_info);
@@ -5057,7 +5053,7 @@ void Planner::visit(const ::internal::tflite::op::LocalResponseNormalization::No
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NENormalizationLayer>();
+ auto fn = std::make_unique<::arm_compute::NENormalizationLayer>();
fn->configure(ifm_alloc, ofm_alloc, norm_info);
@@ -5121,7 +5117,7 @@ void Planner::visit(const ::internal::tflite::op::DepthToSpace::Node &node)
{
if (::internal::arm_compute::isGpuMode()) // GPU
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthToSpace>();
+ auto fn = std::make_unique<::arm_compute::CLDepthToSpace>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
@@ -5194,7 +5190,7 @@ void Planner::visit(const ::internal::tflite::op::Unpack::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLUnstack>();
+ auto fn = std::make_unique<::arm_compute::CLUnstack>();
std::vector<::arm_compute::ICLTensor *> outputs;
for (const auto &index : param.ofm_indexes)
{
@@ -5282,7 +5278,7 @@ void Planner::visit(const ::internal::tflite::op::Pack::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLStackLayer>();
+ auto fn = std::make_unique<::arm_compute::CLStackLayer>();
std::vector<::arm_compute::ICLTensor *> inputs;
for (const auto &index : param.ifm_indexes)
{
@@ -5340,7 +5336,7 @@ void Planner::visit(const ::internal::tflite::op::Neg::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNeg>();
+ auto fn = std::make_unique<::arm_compute::CLNeg>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
builder.append("Neg", std::move(fn));
@@ -5350,7 +5346,6 @@ void Planner::visit(const ::internal::tflite::op::Neg::Node &node)
// TODO Enable NEON Support
throw std::runtime_error("Not supported, yet");
}
-
};
_builder.addStage(stage);
}
@@ -5387,7 +5382,7 @@ void Planner::visit(const ::internal::tflite::op::Exp::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLExpLayer>();
+ auto fn = std::make_unique<::arm_compute::CLExpLayer>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
@@ -5512,7 +5507,7 @@ void Planner::visit(const ::internal::tflite::op::ReduceSum::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
+ auto fn = std::make_unique<::arm_compute::CLReduceOperation>();
fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
::arm_compute::ReduceOperation::SUM);
@@ -5563,7 +5558,7 @@ void Planner::visit(const ::internal::tflite::op::Abs::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::CLActivationLayer>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), act_info);
@@ -5571,7 +5566,7 @@ void Planner::visit(const ::internal::tflite::op::Abs::Node &node)
}
else
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
+ auto fn = std::make_unique<::arm_compute::NEActivationLayer>();
fn->configure(input_alloc, output_alloc, act_info);
@@ -5632,7 +5627,7 @@ void Planner::visit(const ::internal::tflite::op::NotEqual::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLComparison>();
+ auto fn = std::make_unique<::arm_compute::CLComparison>();
fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc),
::arm_compute::ComparisonOperation::NotEqual);
@@ -5701,7 +5696,7 @@ void Planner::visit(const ::internal::tflite::op::LogicalAnd::Node &node)
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBinaryLogicalOp>();
+ auto fn = std::make_unique<::arm_compute::CLBinaryLogicalOp>();
fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc),
::arm_compute::BinaryLogicalOperation::AND);
@@ -5750,7 +5745,7 @@ void Planner::visit(const ::internal::tflite::op::LogicalNot::Node &node)
auto input_alloc = ctx.at(::internal::tflite::operand::Index{param.input_index});
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBitwiseNot>();
+ auto fn = std::make_unique<::arm_compute::CLBitwiseNot>();
fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
@@ -5818,7 +5813,7 @@ void Planner::visit(const ::internal::tflite::op::LogicalOr::Node &node)
auto input2_alloc = ctx.at(::internal::tflite::operand::Index{param.input2_index});
if (::internal::arm_compute::isGpuMode())
{
- auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBinaryLogicalOp>();
+ auto fn = std::make_unique<::arm_compute::CLBinaryLogicalOp>();
fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc),
::arm_compute::BinaryLogicalOperation::OR);