diff options
Diffstat (limited to 'runtime/contrib/benchmark_acl')
-rw-r--r-- | runtime/contrib/benchmark_acl/.FORMATDENY | 0 | ||||
-rw-r--r-- | runtime/contrib/benchmark_acl/CMakeLists.txt | 24 | ||||
-rw-r--r-- | runtime/contrib/benchmark_acl/src/Benchmark.cpp | 74 | ||||
-rw-r--r-- | runtime/contrib/benchmark_acl/src/Benchmark.h | 82 | ||||
-rw-r--r-- | runtime/contrib/benchmark_acl/src/benchmark_googlenet.cpp | 242 | ||||
-rw-r--r-- | runtime/contrib/benchmark_acl/src/benchmark_inception_v3.cpp | 891 | ||||
-rw-r--r-- | runtime/contrib/benchmark_acl/src/benchmark_mobilenet.cpp | 265 |
7 files changed, 1578 insertions, 0 deletions
diff --git a/runtime/contrib/benchmark_acl/.FORMATDENY b/runtime/contrib/benchmark_acl/.FORMATDENY new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/runtime/contrib/benchmark_acl/.FORMATDENY diff --git a/runtime/contrib/benchmark_acl/CMakeLists.txt b/runtime/contrib/benchmark_acl/CMakeLists.txt new file mode 100644 index 000000000..528db4142 --- /dev/null +++ b/runtime/contrib/benchmark_acl/CMakeLists.txt @@ -0,0 +1,24 @@ +if(NOT BUILD_BENCHMARK_ACL) + return() +endif(NOT BUILD_BENCHMARK_ACL) + +nnas_find_package(ARMCompute REQUIRED) + +add_library(arm_compute_benchmark SHARED "src/Benchmark.cpp") +target_include_directories(arm_compute_benchmark PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) +target_link_libraries(arm_compute_benchmark arm_compute_graph) +install(TARGETS arm_compute_benchmark DESTINATION lib) + +# GoogLeNet benchmark +add_executable(benchmark_googlenet "src/benchmark_googlenet.cpp") +target_link_libraries(benchmark_googlenet arm_compute_benchmark) + +# GoogLeNet benchmark +add_executable(benchmark_inception_v3 "src/benchmark_inception_v3.cpp") +target_link_libraries(benchmark_inception_v3 arm_compute_benchmark) + +# MobileNet benchmark +add_executable(benchmark_mobilenet "src/benchmark_mobilenet.cpp") +target_link_libraries(benchmark_mobilenet arm_compute_benchmark) + +install(TARGETS benchmark_googlenet benchmark_inception_v3 benchmark_mobilenet DESTINATION bin) diff --git a/runtime/contrib/benchmark_acl/src/Benchmark.cpp b/runtime/contrib/benchmark_acl/src/Benchmark.cpp new file mode 100644 index 000000000..4a761ec76 --- /dev/null +++ b/runtime/contrib/benchmark_acl/src/Benchmark.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Benchmark.h" + +#include <cstdlib> + +Count::Count() : _value(1) +{ + auto env = std::getenv("COUNT"); + + if (env) + { + _value = std::strtol(env, NULL, 0); + } +} + +uint32_t Count::value(void) const { return _value; } + +#include <boost/accumulators/accumulators.hpp> +#include <boost/accumulators/statistics/stats.hpp> +#include <boost/accumulators/statistics/mean.hpp> + +#include <iostream> +#include <chrono> + +using namespace boost::accumulators; + +void run_benchmark(arm_compute::graph::frontend::Stream &graph) +{ + // NOTE Here the number of warming-up iterations is hardcoded + // TODO Decide the number of warming-up iterations appropriately + for (uint32_t n = 0; n < 3; ++n) + { + auto beg = std::chrono::steady_clock::now(); + graph.run(); + auto end = std::chrono::steady_clock::now(); + auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - beg); + + std::cout << "Warming-up " << n << ": " << elapsed.count() << "ms" << std::endl; + } + + accumulator_set<double, stats<tag::mean>> acc; + + const Count count; + + for (uint32_t n = 0; n < count.value(); ++n) + { + auto beg = std::chrono::steady_clock::now(); + graph.run(); + auto end = std::chrono::steady_clock::now(); + auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - beg); + + std::cout << "Iteration " << n << ": " << elapsed.count() << "ms" << std::endl; + + acc(elapsed.count()); + } + + std::cout << "--------" << std::endl; + std::cout << "Mean: " << mean(acc) << "ms" << std::endl; +} diff --git a/runtime/contrib/benchmark_acl/src/Benchmark.h b/runtime/contrib/benchmark_acl/src/Benchmark.h new file mode 100644 index 000000000..200f40952 --- /dev/null +++ b/runtime/contrib/benchmark_acl/src/Benchmark.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ACL_BENCHMARK_H__ +#define __ACL_BENCHMARK_H__ + +#include "arm_compute/graph/ITensorAccessor.h" +#include "arm_compute/graph.h" +#include "arm_compute/core/CL/OpenCL.h" + +struct InputAccessor final : public arm_compute::graph::ITensorAccessor +{ + InputAccessor() = default; + /** Allows instances to move constructed */ + InputAccessor(InputAccessor &&) = default; + + // Inherited methods overriden: + bool access_tensor(arm_compute::ITensor &tensor) override + { + return true; + } +}; + +struct OutputAccessor final : public arm_compute::graph::ITensorAccessor +{ + OutputAccessor() = default; + /** Allows instances to move constructed */ + OutputAccessor(OutputAccessor &&) = default; + + // Inherited methods overriden: + bool access_tensor(arm_compute::ITensor &tensor) override + { + return false; + } +}; + +template <typename T> std::unique_ptr<arm_compute::graph::ITensorAccessor> get_accessor() +{ + return std::unique_ptr<T>(new T()); +} + +class Count +{ +public: + Count(); + +public: + uint32_t value(void) const; + +private: + uint32_t _value; +}; + +inline arm_compute::graph::Target set_target_hint(int target) +{ + if(target == 1 && arm_compute::opencl_is_available()) + { + // If type of target is OpenCL, check if OpenCL is available and initialize the scheduler + return arm_compute::graph::Target::CL; + } + else + { + return arm_compute::graph::Target::NEON; + } +} + +void run_benchmark(arm_compute::graph::frontend::Stream &graph); + +#endif diff --git a/runtime/contrib/benchmark_acl/src/benchmark_googlenet.cpp b/runtime/contrib/benchmark_acl/src/benchmark_googlenet.cpp new file mode 100644 index 000000000..8b0fbfdac --- /dev/null +++ b/runtime/contrib/benchmark_acl/src/benchmark_googlenet.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph.h" + +#include "Benchmark.h" + +#include <cstdlib> +#include <tuple> + +using namespace arm_compute::graph::frontend; + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_input_accessor(void) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_random_accessor(float lower, float upper) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_weights_accessor(const std::string &path, const std::string &data_file, DataLayout file_layout = DataLayout::NCHW) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_output_accessor(void) +{ + return get_accessor<OutputAccessor>(); +} + +/** Example demonstrating how to implement Googlenet's network using the Compute Library's graph API + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) ) + */ +class GraphGooglenetExample +{ +public: + void do_setup(int argc, char **argv) + { + std::string data_path; /* Path to the trainable data */ + std::string image; /* Image data */ + std::string label; /* Label data */ + + const std::array<float, 3> mean_rgb{ { 122.68f, 116.67f, 104.01f } }; + // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON + const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0; + Target target_hint = set_target_hint(target); + FastMathHint fast_math_hint = FastMathHint::Disabled; + + // Parse arguments + if(argc < 2) + { + // Print help + std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 2) + { + std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 3) + { + data_path = argv[2]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No image provided: using random values\n\n"; + } + else if(argc == 4) + { + data_path = argv[2]; + image = argv[3]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n"; + std::cout << "No text file with labels provided: skipping output accessor\n\n"; + } + else if(argc == 5) + { + data_path = argv[2]; + image = argv[3]; + label = argv[4]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n"; + std::cout << "No fast math info provided: disabling fast math\n\n"; + } + else + { + data_path = argv[2]; + image = argv[3]; + label = argv[4]; + fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::Disabled : FastMathHint::Enabled; + } + + graph << target_hint + << fast_math_hint + << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32), + get_input_accessor()) + << ConvolutionLayer( + 7U, 7U, 64U, + get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv1/conv1_7x7_s2_w.npy"), + get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv1/conv1_7x7_s2_b.npy"), + PadStrideInfo(2, 2, 3, 3)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))) + << NormalizationLayer(NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f)) + << ConvolutionLayer( + 1U, 1U, 64U, + get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv2/conv2_3x3_reduce_w.npy"), + get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv2/conv2_3x3_reduce_b.npy"), + PadStrideInfo(1, 1, 0, 0)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + << ConvolutionLayer( + 3U, 3U, 192U, + get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv2/conv2_3x3_w.npy"), + get_weights_accessor(data_path, "/cnn_data/googlenet_model/conv2/conv2_3x3_b.npy"), + PadStrideInfo(1, 1, 1, 1)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + << NormalizationLayer(NormalizationLayerInfo(NormType::CROSS_MAP, 5, 0.0001f, 0.75f)) + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))); + graph << get_inception_node(data_path, "inception_3a", 64, std::make_tuple(96U, 128U), std::make_tuple(16U, 32U), 32U); + graph << get_inception_node(data_path, "inception_3b", 128, std::make_tuple(128U, 192U), std::make_tuple(32U, 96U), 64U); + graph << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))); + graph << get_inception_node(data_path, "inception_4a", 192, std::make_tuple(96U, 208U), std::make_tuple(16U, 48U), 64U); + graph << get_inception_node(data_path, "inception_4b", 160, std::make_tuple(112U, 224U), std::make_tuple(24U, 64U), 64U); + graph << get_inception_node(data_path, "inception_4c", 128, std::make_tuple(128U, 256U), std::make_tuple(24U, 64U), 64U); + graph << get_inception_node(data_path, "inception_4d", 112, std::make_tuple(144U, 288U), std::make_tuple(32U, 64U), 64U); + graph << get_inception_node(data_path, "inception_4e", 256, std::make_tuple(160U, 320U), std::make_tuple(32U, 128U), 128U); + graph << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))); + graph << get_inception_node(data_path, "inception_5a", 256, std::make_tuple(160U, 320U), std::make_tuple(32U, 128U), 128U); + graph << get_inception_node(data_path, "inception_5b", 384, std::make_tuple(192U, 384U), std::make_tuple(48U, 128U), 128U); + graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 7, PadStrideInfo(1, 1, 0, 0, DimensionRoundingType::CEIL))) + << FullyConnectedLayer( + 1000U, + get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_w.npy"), + get_weights_accessor(data_path, "/cnn_data/googlenet_model/loss3/loss3_classifier_b.npy")) + << SoftmaxLayer() + << OutputLayer(get_output_accessor()); + + // Finalize graph + GraphConfig config; + config.use_tuner = (target == 2); + graph.finalize(target_hint, config); + } + void do_run() + { + run_benchmark(graph); + } + +private: + Stream graph{ 0, "GoogleNet" }; + + ConcatLayer get_inception_node(const std::string &data_path, std::string &¶m_path, + unsigned int a_filt, + std::tuple<unsigned int, unsigned int> b_filters, + std::tuple<unsigned int, unsigned int> c_filters, + unsigned int d_filt) + { + std::string total_path = "/cnn_data/googlenet_model/" + param_path + "/" + param_path + "_"; + SubStream i_a(graph); + i_a << ConvolutionLayer( + 1U, 1U, a_filt, + get_weights_accessor(data_path, total_path + "1x1_w.npy"), + get_weights_accessor(data_path, total_path + "1x1_b.npy"), + PadStrideInfo(1, 1, 0, 0)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); + + SubStream i_b(graph); + i_b << ConvolutionLayer( + 1U, 1U, std::get<0>(b_filters), + get_weights_accessor(data_path, total_path + "3x3_reduce_w.npy"), + get_weights_accessor(data_path, total_path + "3x3_reduce_b.npy"), + PadStrideInfo(1, 1, 0, 0)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + << ConvolutionLayer( + 3U, 3U, std::get<1>(b_filters), + get_weights_accessor(data_path, total_path + "3x3_w.npy"), + get_weights_accessor(data_path, total_path + "3x3_b.npy"), + PadStrideInfo(1, 1, 1, 1)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); + + SubStream i_c(graph); + i_c << ConvolutionLayer( + 1U, 1U, std::get<0>(c_filters), + get_weights_accessor(data_path, total_path + "5x5_reduce_w.npy"), + get_weights_accessor(data_path, total_path + "5x5_reduce_b.npy"), + PadStrideInfo(1, 1, 0, 0)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)) + << ConvolutionLayer( + 5U, 5U, std::get<1>(c_filters), + get_weights_accessor(data_path, total_path + "5x5_w.npy"), + get_weights_accessor(data_path, total_path + "5x5_b.npy"), + PadStrideInfo(1, 1, 2, 2)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); + + SubStream i_d(graph); + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL))) + << ConvolutionLayer( + 1U, 1U, d_filt, + get_weights_accessor(data_path, total_path + "pool_proj_w.npy"), + get_weights_accessor(data_path, total_path + "pool_proj_b.npy"), + PadStrideInfo(1, 1, 0, 0)) + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); + + return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d)); + } +}; + +/** Main program for Googlenet + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) ) + */ +int main(int argc, char **argv) +{ + GraphGooglenetExample example; + + example.do_setup(argc, argv); + example.do_run(); + + return 0; +} diff --git a/runtime/contrib/benchmark_acl/src/benchmark_inception_v3.cpp b/runtime/contrib/benchmark_acl/src/benchmark_inception_v3.cpp new file mode 100644 index 000000000..382851f50 --- /dev/null +++ b/runtime/contrib/benchmark_acl/src/benchmark_inception_v3.cpp @@ -0,0 +1,891 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright (c) 2017-2018 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph.h" + +#include "Benchmark.h" + +#include <cstdlib> +#include <tuple> + +using namespace arm_compute::graph::frontend; + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_input_accessor(void) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_random_accessor(float lower, float upper) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_weights_accessor(const std::string &path, const std::string &data_file, DataLayout file_layout = DataLayout::NCHW) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_output_accessor(void) +{ + return get_accessor<OutputAccessor>(); +} + +/** Example demonstrating how to implement InceptionV3's network using the Compute Library's graph API + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels ) + */ +class InceptionV3Example +{ +public: + void do_setup(int argc, char **argv) + { + std::string data_path; /* Path to the trainable data */ + std::string image; /* Image data */ + std::string label; /* Label data */ + + // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON + const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0; + Target target_hint = set_target_hint(target); + FastMathHint fast_math_hint = FastMathHint::Disabled; + + // Parse arguments + if(argc < 2) + { + // Print help + std::cout << "Usage: " << argv[0] << " [target] [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 2) + { + std::cout << "Usage: " << argv[0] << " " << argv[1] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 3) + { + data_path = argv[2]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No image provided: using random values\n\n"; + } + else if(argc == 4) + { + data_path = argv[2]; + image = argv[3]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n"; + std::cout << "No text file with labels provided: skipping output accessor\n\n"; + } + else if(argc == 5) + { + data_path = argv[2]; + image = argv[3]; + label = argv[4]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n"; + std::cout << "No fast math info provided: disabling fast math\n\n"; + } + else + { + data_path = argv[2]; + image = argv[3]; + label = argv[4]; + fast_math_hint = (std::strtol(argv[5], nullptr, 1) == 0) ? FastMathHint::Disabled : FastMathHint::Enabled; + } + + graph << target_hint + << fast_math_hint + << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32), + get_input_accessor()) + << ConvolutionLayer(3U, 3U, 32U, + get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(2, 2, 0, 0)) + .set_name("Conv2d_1a_3x3/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name("Conv2d_1a_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_1a_3x3/Relu") + << ConvolutionLayer(3U, 3U, 32U, + get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0)) + .set_name("Conv2d_2a_3x3/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name("Conv2d_2a_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2a_3x3/Relu") + + << ConvolutionLayer(3U, 3U, 64U, + get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 1, 1)) + .set_name("Conv2d_2b_3x3/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name("Conv2d_2b_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_2b_3x3/Relu") + + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))).set_name("MaxPool_3a_3x3/MaxPool") + + << ConvolutionLayer(1U, 1U, 80U, + get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0)) + .set_name("Conv2d_3b_1x1/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name("Conv2d_3b_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_3b_1x1/Relu") + + << ConvolutionLayer(3U, 3U, 192U, + get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), PadStrideInfo(1, 1, 0, 0)) + .set_name("Conv2d_4a_3x3/convolution") + << BatchNormalizationLayer(get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name("Conv2d_4a_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name("Conv2d_4a_3x3/Relu") + + << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))).set_name("MaxPool_5a_3x3/MaxPool"); + + graph << get_inception_node_A(data_path, "Mixed_5b", 64U, std::make_tuple(48U, 64U), std::make_tuple(64U, 96U, 96U), + 32U) + .set_name("Mixed_5b/concat"); + graph << get_inception_node_A(data_path, "Mixed_5c", 64U, std::make_tuple(48U, 64U), std::make_tuple(64U, 96U, 96U), + 64U, true) + .set_name("Mixed_5c/concat"); + graph << get_inception_node_A(data_path, "Mixed_5d", 64U, std::make_tuple(48U, 64U), std::make_tuple(64U, 96U, 96U), + 64U) + .set_name("Mixed_5d/concat"); + + graph << get_inception_node_B(data_path, "Mixed_6a", 384U, std::make_tuple(64U, 96U, 96U)).set_name("Mixed_6a/concat"); + + graph << get_inception_node_C(data_path, "Mixed_6b", 192U, std::make_tuple(128U, 128U, 192U), + std::make_tuple(128U, 128U, 128U, 128U, 192U), 192U) + .set_name("Mixed_6b/concat"); + graph << get_inception_node_C(data_path, "Mixed_6c", 192U, std::make_tuple(160U, 160U, 192U), + std::make_tuple(160U, 160U, 160U, 160U, 192U), 192U) + .set_name("Mixed_6c/concat"); + graph << get_inception_node_C(data_path, "Mixed_6d", 192U, std::make_tuple(160U, 160U, 192U), + std::make_tuple(160U, 160U, 160U, 160U, 192U), 192U) + .set_name("Mixed_6d/concat"); + graph << get_inception_node_C(data_path, "Mixed_6e", 192U, std::make_tuple(192U, 192U, 192U), + std::make_tuple(192U, 192U, 192U, 192U, 192U), 192U) + .set_name("Mixed_6e/concat"); + + graph << get_inception_node_D(data_path, "Mixed_7a", std::make_tuple(192U, 320U), + std::make_tuple(192U, 192U, 192U, 192U)) + .set_name("Mixed_7a/concat"); + + graph << get_inception_node_E(data_path, "Mixed_7b", 320U, std::make_tuple(384U, 384U, 384U), + std::make_tuple(448U, 384U, 384U, 384U), 192U) + .set_name("Mixed_7b/concat"); + graph << get_inception_node_E(data_path, "Mixed_7c", 320U, std::make_tuple(384U, 384U, 384U), + std::make_tuple(448U, 384U, 384U, 384U), 192U, true) + .set_name("Mixed_7c/concat"); + + graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 8, PadStrideInfo(1, 1, 0, 0, DimensionRoundingType::CEIL))).set_name("Logits/AvgPool_1a_8x8/AvgPool") + << ConvolutionLayer(1U, 1U, 1001U, get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Logits_Conv2d_1c_1x1_weights.npy"), + get_weights_accessor(data_path, + "/cnn_data/inceptionv3_model/Logits_Conv2d_1c_1x1_biases.npy"), + PadStrideInfo(1, 1, 0, 0)) + .set_name("Logits/Conv2d_1c_1x1/convolution") + << ReshapeLayer(TensorShape(1001U)).set_name("Predictions/Reshape") + << SoftmaxLayer().set_name("Predictions/Softmax") + << OutputLayer(get_output_accessor()); + + // Finalize graph + GraphConfig config; + config.use_tuner = (target == 2); + graph.finalize(target_hint, config); + } + + void do_run() + { + run_benchmark(graph); + } + +private: + Stream graph{ 0, "InceptionV3" }; + +private: + ConcatLayer get_inception_node_A(const std::string &data_path, std::string &¶m_path, + unsigned int a_filt, + std::tuple<unsigned int, unsigned int> b_filters, + std::tuple<unsigned int, unsigned int, unsigned int> c_filters, + unsigned int d_filt, + bool is_name_different = false) + { + std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_"; + + // This is due to a naming issue in the tf model + std::string conv_id0 = "_0a_"; + std::string conv_id1 = "2d_0b_"; + if(is_name_different) + { + conv_id0 = "_0b_"; + conv_id1 = "_1_0c_"; + } + + SubStream i_a(graph); + i_a << ConvolutionLayer( + 1U, 1U, a_filt, + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_0a_1x1/Relu"); + + SubStream i_b(graph); + i_b << ConvolutionLayer( + 1U, 1U, std::get<0>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_1/Conv2d" + conv_id0 + "1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d" + conv_id0 + "1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d" + conv_id0 + "1x1/Relu") + << ConvolutionLayer( + 5U, 5U, std::get<1>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 2, 2)) + .set_name(param_path + "/Branch_1/Conv2d" + conv_id1 + "5x5/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d" + conv_id1 + "5x5/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d" + conv_id1 + "5x5/Relu"); + + SubStream i_c(graph); + i_c << ConvolutionLayer( + 1U, 1U, std::get<0>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_2/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0a_1x1/Relu") + << ConvolutionLayer( + 3U, 3U, std::get<1>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 1, 1)) + .set_name(param_path + "/Branch_2/Conv2d_0b_3x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0b_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0b_3x3/Relu") + << ConvolutionLayer( + 3U, 3U, std::get<2>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 1, 1)) + .set_name(param_path + "/Branch_2/Conv2d_0c_3x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0c_3x3/BatchNorm/batcnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0c_3x3/Relu"); + + SubStream i_d(graph); + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true)).set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") + << ConvolutionLayer( + 1U, 1U, d_filt, + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_3/Conv2d_0b_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_3/Conv2d_0b_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_3/Conv2d_0b_1x1/Relu"); + + return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d)); + } + + ConcatLayer get_inception_node_B(const std::string &data_path, std::string &¶m_path, + unsigned int a_filt, + std::tuple<unsigned int, unsigned int, unsigned int> b_filters) + { + std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_"; + SubStream i_a(graph); + i_a << ConvolutionLayer( + 3U, 3U, a_filt, + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(2, 2, 0, 0)) + .set_name(param_path + "/Branch_0/Conv2d_1a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_0/Conv2d_1a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_1a_1x1/Relu"); + + SubStream i_b(graph); + i_b << ConvolutionLayer( + 1U, 1U, std::get<0>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0a_1x1/Relu") + << ConvolutionLayer( + 3U, 3U, std::get<1>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 1, 1)) + .set_name(param_path + "/Branch_1/Conv2d_0b_3x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0b_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0b_3x3/Relu") + << ConvolutionLayer( + 3U, 3U, std::get<2>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(2, 2, 0, 0)) + .set_name(param_path + "/Branch_1/Conv2d_1a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_1a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_1a_1x1/Relu"); + + SubStream i_c(graph); + i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))).set_name(param_path + "/Branch_2/MaxPool_1a_3x3/MaxPool"); + + return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c)); + } + + ConcatLayer get_inception_node_C(const std::string &data_path, std::string &¶m_path, + unsigned int a_filt, + std::tuple<unsigned int, unsigned int, unsigned int> b_filters, + std::tuple<unsigned int, unsigned int, unsigned int, unsigned int, unsigned int> c_filters, + unsigned int d_filt) + { + std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_"; + SubStream i_a(graph); + i_a << ConvolutionLayer( + 1U, 1U, a_filt, + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_0a_1x1/Relu"); + + SubStream i_b(graph); + i_b << ConvolutionLayer( + 1U, 1U, std::get<0>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0a_1x1/Relu") + << ConvolutionLayer( + 7U, 1U, std::get<1>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 3, 0)) + .set_name(param_path + "/Branch_1/Conv2d_0b_1x7/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0b_1x7/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0b_1x7/Relu") + << ConvolutionLayer( + 1U, 7U, std::get<2>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 3)) + .set_name(param_path + "/Branch_1/Conv2d_0c_7x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0c_7x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_0c_7x1/Relu"); + + SubStream i_c(graph); + i_c << ConvolutionLayer( + 1U, 1U, std::get<0>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_2/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0a_1x1/Relu") + << ConvolutionLayer( + 1U, 7U, std::get<1>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 3)) + .set_name(param_path + "/Branch_2/Conv2d_0b_7x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0b_7x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0b_7x1/Relu") + << ConvolutionLayer( + 7U, 1U, std::get<2>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 3, 0)) + .set_name(param_path + "/Branch_2/Conv2d_0c_1x7/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0c_1x7/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0c_1x7/Relu") + << ConvolutionLayer( + 1U, 7U, std::get<3>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 3)) + .set_name(param_path + "/Branch_2/Conv2d_0d_7x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0d_7x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0d_7x1/Relu") + << ConvolutionLayer( + 7U, 1U, std::get<4>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 3, 0)) + .set_name(param_path + "/Branch_2/Conv2d_0e_1x7/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0e_1x7/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0e_1x7/Relu"); + + SubStream i_d(graph); + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true)).set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") + << ConvolutionLayer( + 1U, 1U, d_filt, + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_3/Conv2d_0b_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_3/Conv2d_0b_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_3/Conv2d_0b_1x1/Relu"); + + return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d)); + } + + ConcatLayer get_inception_node_D(const std::string &data_path, std::string &¶m_path, + std::tuple<unsigned int, unsigned int> a_filters, + std::tuple<unsigned int, unsigned int, unsigned int, unsigned int> b_filters) + { + std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_"; + SubStream i_a(graph); + i_a << ConvolutionLayer( + 1U, 1U, std::get<0>(a_filters), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_0a_1x1/Relu") + << ConvolutionLayer( + 3U, 3U, std::get<1>(a_filters), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(2, 2, 0, 0)) + .set_name(param_path + "/Branch_0/Conv2d_1a_3x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_0/Conv2d_1a_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_1a_3x3/Relu"); + + SubStream i_b(graph); + i_b << ConvolutionLayer( + 1U, 1U, std::get<0>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0a_1x1/Relu") + << ConvolutionLayer( + 7U, 1U, std::get<1>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 3, 0)) + .set_name(param_path + "/Branch_1/Conv2d_0b_1x7/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0b_1x7/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0b_1x7/Relu") + << ConvolutionLayer( + 1U, 7U, std::get<2>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 3)) + .set_name(param_path + "/Branch_1/Conv2d_0c_7x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0c_7x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0c_7x1/Relu") + << ConvolutionLayer( + 3U, 3U, std::get<3>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(2, 2, 0, 0)) + .set_name(param_path + "/Branch_1/Conv2d_1a_3x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_1a_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_1a_3x3/Relu"); + + SubStream i_c(graph); + i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL))).set_name(param_path + "/Branch_2/MaxPool_1a_3x3/MaxPool"); + + return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c)); + } + + ConcatLayer get_inception_node_E(const std::string &data_path, std::string &¶m_path, + unsigned int a_filt, + std::tuple<unsigned int, unsigned int, unsigned int> b_filters, + std::tuple<unsigned int, unsigned int, unsigned int, unsigned int> c_filters, + unsigned int d_filt, + bool is_name_different = false) + { + // This is due to a naming issue in the tf model + std::string conv_id = "_0b_"; + if(is_name_different) + { + conv_id = "_0c_"; + } + + std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_"; + SubStream i_a(graph); + i_a << ConvolutionLayer( + 1U, 1U, a_filt, + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_0/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_0/Conv2d_0a_1x1/Relu"); + + SubStream i_b(graph); + i_b << ConvolutionLayer( + 1U, 1U, std::get<0>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0a_1x1/Relu"); + + SubStream i_b1(static_cast<IStream &>(i_b)); + i_b1 << ConvolutionLayer( + 3U, 1U, std::get<1>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 1, 0)) + .set_name(param_path + "/Branch_1/Conv2d_0b_1x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d_0b_1x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d_0b_1x3/Relu"); + + SubStream i_b2(static_cast<IStream &>(i_b)); + i_b2 << ConvolutionLayer( + 1U, 3U, std::get<2>(b_filters), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 1)) + .set_name(param_path + "/Branch_1/Conv2d" + conv_id + "3x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_1/Conv2d" + conv_id + "3x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_1/Conv2d" + conv_id + "3x1/Relu"); + + // Merge b1 and b2 + i_b << ConcatLayer(std::move(i_b1), std::move(i_b2)).set_name(param_path + "/Branch_1/concat"); + + SubStream i_c(graph); + i_c << ConvolutionLayer( + 1U, 1U, std::get<0>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_2/Conv2d_0a_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0a_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0a_1x1/Relu") + << ConvolutionLayer( + 3U, 3U, std::get<1>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 1, 1)) + .set_name(param_path + "/Branch_2/Conv2d_0b_3x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0b_3x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0b_3x3/Relu"); + + SubStream i_c1(static_cast<IStream &>(i_c)); + i_c1 << ConvolutionLayer( + 3U, 1U, std::get<2>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 1, 0)) + .set_name(param_path + "/Branch_2/Conv2d_0c_1x3/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0c_1x3/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0c_1x3/Relu"); + + SubStream i_c2(static_cast<IStream &>(i_c)); + i_c2 << ConvolutionLayer( + 1U, 3U, std::get<3>(c_filters), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 1)) + .set_name(param_path + "/Branch_2/Conv2d_0d_3x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_2/Conv2d_0d_3x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_2/Conv2d_0d_3x1/Relu"); + + // Merge i_c1 and i_c2 + i_c << ConcatLayer(std::move(i_c1), std::move(i_c2)).set_name(param_path + "/Branch_2/concat"); + + SubStream i_d(graph); + i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true)).set_name(param_path + "/Branch_3/AvgPool_0a_3x3/AvgPool") + << ConvolutionLayer( + 1U, 1U, d_filt, + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_weights.npy"), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(1, 1, 0, 0)) + .set_name(param_path + "/Branch_3/Conv2d_0b_1x1/convolution") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"), + get_random_accessor(1.f, 1.f), + get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"), + 0.001f) + .set_name(param_path + "/Branch_3/Conv2d_0b_1x1/BatchNorm/batchnorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)).set_name(param_path + "/Branch_3/Conv2d_0b_1x1/Relu"); + + return ConcatLayer(std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d)); + } +}; + +/** Main program for Inception V3 + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), [optional] Path to the weights folder, [optional] image, [optional] labels, [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) ) + */ +int main(int argc, char **argv) +{ + InceptionV3Example example; + + example.do_setup(argc, argv); + example.do_run(); + + return 0; +} diff --git a/runtime/contrib/benchmark_acl/src/benchmark_mobilenet.cpp b/runtime/contrib/benchmark_acl/src/benchmark_mobilenet.cpp new file mode 100644 index 000000000..085be184e --- /dev/null +++ b/runtime/contrib/benchmark_acl/src/benchmark_mobilenet.cpp @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright (c) 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/graph.h" + +#include "Benchmark.h" + +#include <cstdlib> + +using namespace arm_compute::graph::frontend; + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_input_accessor(void) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_random_accessor(float lower, float upper) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_weights_accessor(const std::string &path, const std::string &data_file, DataLayout file_layout = DataLayout::NCHW) +{ + return get_accessor<InputAccessor>(); +} + +inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_output_accessor(void) +{ + return get_accessor<OutputAccessor>(); +} + +/** Example demonstrating how to implement MobileNet's network using the Compute Library's graph API + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL), [optional] Path to the weights folder, [optional] image, [optional] labels ) + */ +class GraphMobilenetExample +{ +public: + void do_setup(int argc, char **argv) + { + std::string data_path; /* Path to the trainable data */ + std::string image; /* Image data */ + std::string label; /* Label data */ + + // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON + const int target = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0; + Target target_hint = set_target_hint(target); + ConvolutionMethod convolution_hint = ConvolutionMethod::GEMM; + DepthwiseConvolutionMethod depthwise_convolution_hint = DepthwiseConvolutionMethod::Optimized3x3; + FastMathHint fast_math_hint = FastMathHint::Disabled; + + // Set model to execute. 0 (MobileNetV1_1.0_224), 1 (MobileNetV1_0.75_160) + int model_id = (argc > 2) ? std::strtol(argv[2], nullptr, 10) : 0; + ARM_COMPUTE_ERROR_ON_MSG(model_id > 1, "Invalid model ID. Model must be 0 (MobileNetV1_1.0_224) or 1 (MobileNetV1_0.75_160)"); + int layout_id = (argc > 3) ? std::strtol(argv[3], nullptr, 10) : 0; + ARM_COMPUTE_ERROR_ON_MSG(layout_id > 1, "Invalid layout ID. Layout must be 0 (NCHW) or 1 (NHWC)"); + + float depth_scale = (model_id == 0) ? 1.f : 0.75; + unsigned int spatial_size = (model_id == 0) ? 224 : 160; + std::string model_path = (model_id == 0) ? "/cnn_data/mobilenet_v1_1_224_model/" : "/cnn_data/mobilenet_v1_075_160_model/"; + TensorDescriptor input_descriptor_nchw = TensorDescriptor(TensorShape(spatial_size, spatial_size, 3U, 1U), DataType::F32); + TensorDescriptor input_descriptor_nhwc = TensorDescriptor(TensorShape(3U, spatial_size, spatial_size, 1U), DataType::F32).set_layout(DataLayout::NHWC); + TensorDescriptor input_descriptor = (layout_id == 0) ? input_descriptor_nchw : input_descriptor_nhwc; + + // Parse arguments + if(argc < 2) + { + // Print help + std::cout << "Usage: " << argv[0] << " [target] [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n"; + std::cout << "No data layout provided: using NCHW\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 2) + { + std::cout << "Usage: " << argv[0] << " " << argv[1] << " [model] [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No model ID provided: using MobileNetV1_1.0_224\n\n"; + std::cout << "No data layout provided: using NCHW\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 3) + { + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " [layout] [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No data layout provided: using NCHW\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 4) + { + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [path_to_data] [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No data folder provided: using random values\n\n"; + } + else if(argc == 5) + { + data_path = argv[4]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [image] [labels] [fast_math_hint]\n\n"; + std::cout << "No image provided: using random values\n\n"; + std::cout << "No text file with labels provided: skipping output accessor\n\n"; + } + else if(argc == 6) + { + data_path = argv[4]; + image = argv[5]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " [labels] [fast_math_hint]\n\n"; + std::cout << "No text file with labels provided: skipping output accessor\n\n"; + } + else if(argc == 7) + { + data_path = argv[4]; + image = argv[5]; + label = argv[6]; + std::cout << "Usage: " << argv[0] << " " << argv[1] << " " << argv[2] << " " << argv[3] << " " << argv[4] << " [fast_math_hint]\n\n"; + std::cout << "No fast math info provided: disabling fast math\n\n"; + } + else + { + data_path = argv[4]; + image = argv[5]; + label = argv[6]; + fast_math_hint = (std::strtol(argv[7], nullptr, 1) == 0) ? FastMathHint::Disabled : FastMathHint::Enabled; + } + + // Add model path to data path + if(!data_path.empty()) + { + data_path += model_path; + } + + graph << target_hint + << convolution_hint + << depthwise_convolution_hint + << fast_math_hint + << InputLayer(input_descriptor, + get_input_accessor()) + << ConvolutionLayer( + 3U, 3U, 32U * depth_scale, + get_weights_accessor(data_path, "Conv2d_0_weights.npy", DataLayout::NCHW), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::FLOOR)) + .set_name("Conv2d_0") + << BatchNormalizationLayer( + get_weights_accessor(data_path, "Conv2d_0_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, "Conv2d_0_BatchNorm_moving_variance.npy"), + get_weights_accessor(data_path, "Conv2d_0_BatchNorm_gamma.npy"), + get_weights_accessor(data_path, "Conv2d_0_BatchNorm_beta.npy"), + 0.001f) + .set_name("Conv2d_0/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f)).set_name("Conv2d_0/Relu6"); + graph << get_dwsc_node(data_path, "Conv2d_1", 64 * depth_scale, PadStrideInfo(1, 1, 1, 1), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_2", 128 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_3", 128 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_4", 256 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_5", 256 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_6", 512 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_7", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_8", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_9", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_10", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_11", 512 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_12", 1024 * depth_scale, PadStrideInfo(2, 2, 0, 1, 0, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << get_dwsc_node(data_path, "Conv2d_13", 1024 * depth_scale, PadStrideInfo(1, 1, 1, 1, 1, 1, DimensionRoundingType::CEIL), PadStrideInfo(1, 1, 0, 0)); + graph << PoolingLayer(PoolingLayerInfo(PoolingType::AVG)).set_name("Logits/AvgPool_1a") + << ConvolutionLayer( + 1U, 1U, 1001U, + get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_weights.npy", DataLayout::NCHW), + get_weights_accessor(data_path, "Logits_Conv2d_1c_1x1_biases.npy"), + PadStrideInfo(1, 1, 0, 0)) + .set_name("Logits/Conv2d_1c_1x1") + << ReshapeLayer(TensorShape(1001U)).set_name("Reshape") + << SoftmaxLayer().set_name("Softmax") + << OutputLayer(get_output_accessor()); + + // Finalize graph + GraphConfig config; + config.use_tuner = (target == 2); + graph.finalize(target_hint, config); + } + void do_run() + { + run_benchmark(graph); + } + +private: + Stream graph{ 0, "MobileNetV1" }; + + ConcatLayer get_dwsc_node(const std::string &data_path, std::string &¶m_path, + unsigned int conv_filt, + PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info) + { + std::string total_path = param_path + "_"; + SubStream sg(graph); + sg << DepthwiseConvolutionLayer( + 3U, 3U, + get_weights_accessor(data_path, total_path + "depthwise_depthwise_weights.npy", DataLayout::NCHW), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + dwc_pad_stride_info) + .set_name(total_path + "depthwise/depthwise") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_moving_variance.npy"), + get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_gamma.npy"), + get_weights_accessor(data_path, total_path + "depthwise_BatchNorm_beta.npy"), + 0.001f) + .set_name(total_path + "depthwise/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f)).set_name(total_path + "depthwise/Relu6") + << ConvolutionLayer( + 1U, 1U, conv_filt, + get_weights_accessor(data_path, total_path + "pointwise_weights.npy", DataLayout::NCHW), + std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr), + conv_pad_stride_info) + .set_name(total_path + "pointwise/Conv2D") + << BatchNormalizationLayer( + get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_moving_mean.npy"), + get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_moving_variance.npy"), + get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_gamma.npy"), + get_weights_accessor(data_path, total_path + "pointwise_BatchNorm_beta.npy"), + 0.001f) + .set_name(total_path + "pointwise/BatchNorm") + << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f)).set_name(total_path + "pointwise/Relu6"); + + return ConcatLayer(std::move(sg)); + } +}; + +/** Main program for MobileNetV1 + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments ( [optional] Target (0 = NEON, 1 = OpenCL, 2 = OpenCL with Tuner), + * [optional] Model ID (0 = MobileNetV1_1.0_224, 1 = MobileNetV1_0.75_160), + * [optional] Path to the weights folder, + * [optional] image, + * [optional] labels, + * [optional] data layout, + * [optional] Fast math for convolution layer (0 = DISABLED, 1 = ENABLED) ) + */ +int main(int argc, char **argv) +{ + GraphMobilenetExample example; + + example.do_setup(argc, argv); + example.do_run(); + + return 0; +} |