diff options
author | Anthony Barbier <anthony.barbier@arm.com> | 2017-06-23 15:42:00 +0100 |
---|---|---|
committer | Anthony Barbier <anthony.barbier@arm.com> | 2017-06-23 16:07:31 +0100 |
commit | dbdab85d6e0f96d3361a9e30310367d89953466c (patch) | |
tree | 0cc80d19fd8192de6eca2d28f7e4062aa9deecbf /examples | |
parent | 664d833b9d7b569db60b0f6d93e80f91f2c07c39 (diff) | |
download | armcl-dbdab85d6e0f96d3361a9e30310367d89953466c.tar.gz armcl-dbdab85d6e0f96d3361a9e30310367d89953466c.tar.bz2 armcl-dbdab85d6e0f96d3361a9e30310367d89953466c.zip |
arm_compute v17.06
Diffstat (limited to 'examples')
-rw-r--r-- | examples/SConscript | 70 | ||||
-rw-r--r-- | examples/cl_convolution.cpp | 6 | ||||
-rw-r--r-- | examples/cl_events.cpp | 6 | ||||
-rw-r--r-- | examples/neon_cnn.cpp | 230 | ||||
-rw-r--r-- | examples/neon_convolution.cpp | 6 | ||||
-rw-r--r-- | examples/neon_copy_objects.cpp | 4 | ||||
-rw-r--r-- | examples/neon_scale.cpp | 6 | ||||
-rw-r--r-- | examples/neoncl_scale_median_gaussian.cpp | 6 |
8 files changed, 317 insertions, 17 deletions
diff --git a/examples/SConscript b/examples/SConscript new file mode 100644 index 000000000..748f771ec --- /dev/null +++ b/examples/SConscript @@ -0,0 +1,70 @@ +# Copyright (c) 2017 ARM Limited. +# +# SPDX-License-Identifier: MIT +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import SCons +import os.path + +Import('env') +Import('arm_compute_a') +Import('arm_compute_so') + +if env['opencl']: + Import('opencl') + +examples_env = env.Clone() + +examples_env.Append(CPPPATH = ["#"]) +examples_env.Append(LIBPATH = ["#build/%s" % env['build_dir']]) +examples_env.Append(LIBPATH = ["#build/%s/opencl-1.2-stubs" % env['build_dir']]) + +# Build examples +utils = examples_env.Object("../utils/Utils.cpp") + +if env['os'] in ['android', 'bare_metal']: + arm_compute_lib = arm_compute_a + arm_compute_dependency = arm_compute_a +else: + arm_compute_lib = "arm_compute" + arm_compute_dependency = arm_compute_so + +if env['opencl'] and env['neon']: + for file in Glob("./neoncl_*.cpp"): + example = os.path.basename(os.path.splitext(str(file))[0]) + prog = examples_env.Program(example, ["{}.cpp".format(example), utils], LIBS = [arm_compute_lib, "OpenCL"]) + Depends(prog, [arm_compute_dependency, opencl]) + alias = examples_env.Alias(example, prog) + Default(alias) + +if env['opencl']: + for file in Glob("./cl_*.cpp"): + example = os.path.basename(os.path.splitext(str(file))[0]) + prog = examples_env.Program(example, ["{}.cpp".format(example), utils], LIBS = [arm_compute_lib, "OpenCL"]) + Depends(prog, [arm_compute_dependency, opencl]) + alias = examples_env.Alias(example, prog) + Default(alias) + +if env['neon']: + for file in Glob("./neon_*.cpp"): + example = os.path.basename(os.path.splitext(str(file))[0]) + prog = examples_env.Program(example, ["{}.cpp".format(example), utils], LIBS = [arm_compute_lib]) + Depends(prog, arm_compute_dependency) + alias = examples_env.Alias(example, prog) + Default(alias) diff --git a/examples/cl_convolution.cpp b/examples/cl_convolution.cpp index a021cdbae..06f6f144e 100644 --- a/examples/cl_convolution.cpp +++ b/examples/cl_convolution.cpp @@ -25,10 +25,10 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/CLFunctions.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "test_helpers/Utils.h" +#include "utils/Utils.h" using namespace arm_compute; -using namespace test_helpers; +using namespace utils; /** Gaussian 3x3 matrix */ @@ -114,5 +114,5 @@ void main_cl_convolution(int argc, const char **argv) */ int main(int argc, const char **argv) { - return test_helpers::run_example(argc, argv, main_cl_convolution); + return utils::run_example(argc, argv, main_cl_convolution); } diff --git a/examples/cl_events.cpp b/examples/cl_events.cpp index 5c39788b4..768f62062 100644 --- a/examples/cl_events.cpp +++ b/examples/cl_events.cpp @@ -25,10 +25,10 @@ #include "arm_compute/core/Types.h" #include "arm_compute/runtime/CL/CLFunctions.h" #include "arm_compute/runtime/CL/CLScheduler.h" -#include "test_helpers/Utils.h" +#include "utils/Utils.h" using namespace arm_compute; -using namespace test_helpers; +using namespace utils; void main_cl_events(int argc, const char **argv) { @@ -110,5 +110,5 @@ void main_cl_events(int argc, const char **argv) */ int main(int argc, const char **argv) { - return test_helpers::run_example(argc, argv, main_cl_events); + return utils::run_example(argc, argv, main_cl_events); } diff --git a/examples/neon_cnn.cpp b/examples/neon_cnn.cpp new file mode 100644 index 000000000..952ae4d48 --- /dev/null +++ b/examples/neon_cnn.cpp @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2016, 2017 ARM Limited. + * + * SPDX-License-Identifier: MIT + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to + * deal in the Software without restriction, including without limitation the + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + * sell copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "arm_compute/runtime/NEON/NEFunctions.h" + +#include "arm_compute/core/Types.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +void main_cnn(int argc, const char **argv) +{ + ARM_COMPUTE_UNUSED(argc); + ARM_COMPUTE_UNUSED(argv); + + // The src tensor should contain the input image + Tensor src; + + // The weights and biases tensors should be initialized with the values inferred with the training + Tensor weights0; + Tensor weights1; + Tensor weights2; + Tensor biases0; + Tensor biases1; + Tensor biases2; + + Tensor out_conv0; + Tensor out_conv1; + Tensor out_act0; + Tensor out_act1; + Tensor out_act2; + Tensor out_pool0; + Tensor out_pool1; + Tensor out_fc0; + Tensor out_softmax; + + NEConvolutionLayer conv0; + NEConvolutionLayer conv1; + NEPoolingLayer pool0; + NEPoolingLayer pool1; + NEFullyConnectedLayer fc0; + NEActivationLayer act0; + NEActivationLayer act1; + NEActivationLayer act2; + NESoftmaxLayer softmax; + + /* [Initialize tensors] */ + + // Initialize src tensor + constexpr unsigned int width_src_image = 32; + constexpr unsigned int height_src_image = 32; + constexpr unsigned int ifm_src_img = 1; + + const TensorShape src_shape(width_src_image, height_src_image, ifm_src_img); + src.allocator()->init(TensorInfo(src_shape, 1, DataType::F32)); + + // Initialize tensors of conv0 + constexpr unsigned int kernel_x_conv0 = 5; + constexpr unsigned int kernel_y_conv0 = 5; + constexpr unsigned int ofm_conv0 = 8; + + const TensorShape weights_shape_conv0(kernel_x_conv0, kernel_y_conv0, src_shape.z(), ofm_conv0); + const TensorShape biases_shape_conv0(weights_shape_conv0[3]); + const TensorShape out_shape_conv0(src_shape.x(), src_shape.y(), weights_shape_conv0[3]); + + weights0.allocator()->init(TensorInfo(weights_shape_conv0, 1, DataType::F32)); + biases0.allocator()->init(TensorInfo(biases_shape_conv0, 1, DataType::F32)); + out_conv0.allocator()->init(TensorInfo(out_shape_conv0, 1, DataType::F32)); + + // Initialize tensor of act0 + out_act0.allocator()->init(TensorInfo(out_shape_conv0, 1, DataType::F32)); + + // Initialize tensor of pool0 + TensorShape out_shape_pool0 = out_shape_conv0; + out_shape_pool0.set(0, out_shape_pool0.x() / 2); + out_shape_pool0.set(1, out_shape_pool0.y() / 2); + out_pool0.allocator()->init(TensorInfo(out_shape_pool0, 1, DataType::F32)); + + // Initialize tensors of conv1 + constexpr unsigned int kernel_x_conv1 = 3; + constexpr unsigned int kernel_y_conv1 = 3; + constexpr unsigned int ofm_conv1 = 16; + + const TensorShape weights_shape_conv1(kernel_x_conv1, kernel_y_conv1, out_shape_pool0.z(), ofm_conv1); + + const TensorShape biases_shape_conv1(weights_shape_conv1[3]); + const TensorShape out_shape_conv1(out_shape_pool0.x(), out_shape_pool0.y(), weights_shape_conv1[3]); + + weights1.allocator()->init(TensorInfo(weights_shape_conv1, 1, DataType::F32)); + biases1.allocator()->init(TensorInfo(biases_shape_conv1, 1, DataType::F32)); + out_conv1.allocator()->init(TensorInfo(out_shape_conv1, 1, DataType::F32)); + + // Initialize tensor of act1 + out_act1.allocator()->init(TensorInfo(out_shape_conv1, 1, DataType::F32)); + + // Initialize tensor of pool1 + TensorShape out_shape_pool1 = out_shape_conv1; + out_shape_pool1.set(0, out_shape_pool1.x() / 2); + out_shape_pool1.set(1, out_shape_pool1.y() / 2); + out_pool1.allocator()->init(TensorInfo(out_shape_pool1, 1, DataType::F32)); + + // Initialize tensor of fc0 + constexpr unsigned int num_labels = 128; + + const TensorShape weights_shape_fc0(out_shape_pool1.x() * out_shape_pool1.y() * out_shape_pool1.z(), num_labels); + const TensorShape biases_shape_fc0(num_labels); + const TensorShape out_shape_fc0(num_labels); + + weights2.allocator()->init(TensorInfo(weights_shape_fc0, 1, DataType::F32)); + biases2.allocator()->init(TensorInfo(biases_shape_fc0, 1, DataType::F32)); + out_fc0.allocator()->init(TensorInfo(out_shape_fc0, 1, DataType::F32)); + + // Initialize tensor of act2 + out_act2.allocator()->init(TensorInfo(out_shape_fc0, 1, DataType::F32)); + + // Initialize tensor of softmax + const TensorShape out_shape_softmax(out_shape_fc0.x()); + out_softmax.allocator()->init(TensorInfo(out_shape_softmax, 1, DataType::F32)); + + /* -----------------------End: [Initialize tensors] */ + + /* [Configure functions] */ + + // in:32x32x1: 5x5 convolution, 8 output features maps (OFM) + conv0.configure(&src, &weights0, &biases0, &out_conv0, PadStrideInfo()); + + // in:32x32x8, out:32x32x8, Activation function: relu + act0.configure(&out_conv0, &out_act0, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); + + // in:32x32x8, out:16x16x8 (2x2 pooling), Pool type function: Max + pool0.configure(&out_act0, &out_pool0, PoolingLayerInfo(PoolingType::MAX, 2)); + + // in:16x16x8: 3x3 convolution, 16 output features maps (OFM) + conv1.configure(&out_pool0, &weights1, &biases1, &out_conv1, PadStrideInfo()); + + // in:16x16x16, out:16x16x16, Activation function: relu + act1.configure(&out_conv1, &out_act1, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); + + // in:16x16x16, out:8x8x16 (2x2 pooling), Pool type function: Average + pool1.configure(&out_act1, &out_pool1, PoolingLayerInfo(PoolingType::AVG, 2)); + + // in:8x8x16, out:128 + fc0.configure(&out_pool1, &weights2, &biases2, &out_fc0); + + // in:128, out:128, Activation function: relu + act2.configure(&out_fc0, &out_act2, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU)); + + // in:128, out:128 + softmax.configure(&out_act2, &out_softmax); + + /* -----------------------End: [Configure functions] */ + + /* [Allocate tensors] */ + + // Now that the padding requirements are known we can allocate the images: + src.allocator()->allocate(); + weights0.allocator()->allocate(); + weights1.allocator()->allocate(); + weights2.allocator()->allocate(); + biases0.allocator()->allocate(); + biases1.allocator()->allocate(); + biases2.allocator()->allocate(); + out_conv0.allocator()->allocate(); + out_conv1.allocator()->allocate(); + out_act0.allocator()->allocate(); + out_act1.allocator()->allocate(); + out_act2.allocator()->allocate(); + out_pool0.allocator()->allocate(); + out_pool1.allocator()->allocate(); + out_fc0.allocator()->allocate(); + out_softmax.allocator()->allocate(); + + /* -----------------------End: [Allocate tensors] */ + + /* [Initialize weights and biases tensors] */ + + // Once the tensors have been allocated, the src, weights and biases tensors can be initialized + // ... + + /* -----------------------[Initialize weights and biases tensors] */ + + /* [Execute the functions] */ + + conv0.run(); + act0.run(); + pool0.run(); + conv1.run(); + act1.run(); + pool1.run(); + fc0.run(); + act2.run(); + softmax.run(); + + /* -----------------------End: [Execute the functions] */ +} + +/** Main program for cnn test + * + * The example implements the following CNN architecture: + * + * Input -> conv0:5x5 -> act0:relu -> pool:2x2 -> conv1:3x3 -> act1:relu -> pool:2x2 -> fc0 -> act2:relu -> softmax + * + * @param[in] argc Number of arguments + * @param[in] argv Arguments + */ +int main(int argc, const char **argv) +{ + return utils::run_example(argc, argv, main_cnn); +}
\ No newline at end of file diff --git a/examples/neon_convolution.cpp b/examples/neon_convolution.cpp index fc68aa24b..222c8f9a3 100644 --- a/examples/neon_convolution.cpp +++ b/examples/neon_convolution.cpp @@ -24,10 +24,10 @@ #include "arm_compute/runtime/NEON/NEFunctions.h" #include "arm_compute/core/Types.h" -#include "test_helpers/Utils.h" +#include "utils/Utils.h" using namespace arm_compute; -using namespace test_helpers; +using namespace utils; /** Gaussian 3x3 matrix */ @@ -113,5 +113,5 @@ void main_neon_convolution(int argc, const char **argv) */ int main(int argc, const char **argv) { - return test_helpers::run_example(argc, argv, main_neon_convolution); + return utils::run_example(argc, argv, main_neon_convolution); } diff --git a/examples/neon_copy_objects.cpp b/examples/neon_copy_objects.cpp index 3f53939c3..191f45555 100644 --- a/examples/neon_copy_objects.cpp +++ b/examples/neon_copy_objects.cpp @@ -25,7 +25,7 @@ #include "arm_compute/runtime/NEON/NEFunctions.h" #include "arm_compute/core/Types.h" -#include "test_helpers/Utils.h" +#include "utils/Utils.h" #include <cstring> #include <iostream> @@ -148,5 +148,5 @@ void main_neon_copy_objects(int argc, const char **argv) */ int main(int argc, const char **argv) { - return test_helpers::run_example(argc, argv, main_neon_copy_objects); + return utils::run_example(argc, argv, main_neon_copy_objects); } diff --git a/examples/neon_scale.cpp b/examples/neon_scale.cpp index c1435af08..75780c9bd 100644 --- a/examples/neon_scale.cpp +++ b/examples/neon_scale.cpp @@ -24,10 +24,10 @@ #include "arm_compute/runtime/NEON/NEFunctions.h" #include "arm_compute/core/Types.h" -#include "test_helpers/Utils.h" +#include "utils/Utils.h" using namespace arm_compute; -using namespace test_helpers; +using namespace utils; void main_neon_scale(int argc, const char **argv) { @@ -86,5 +86,5 @@ void main_neon_scale(int argc, const char **argv) */ int main(int argc, const char **argv) { - return test_helpers::run_example(argc, argv, main_neon_scale); + return utils::run_example(argc, argv, main_neon_scale); } diff --git a/examples/neoncl_scale_median_gaussian.cpp b/examples/neoncl_scale_median_gaussian.cpp index a4e44146e..a32ba6daf 100644 --- a/examples/neoncl_scale_median_gaussian.cpp +++ b/examples/neoncl_scale_median_gaussian.cpp @@ -26,10 +26,10 @@ #include "arm_compute/runtime/CL/CLFunctions.h" #include "arm_compute/runtime/CL/CLScheduler.h" #include "arm_compute/runtime/NEON/NEFunctions.h" -#include "test_helpers/Utils.h" +#include "utils/Utils.h" using namespace arm_compute; -using namespace test_helpers; +using namespace utils; /** Example demonstrating how to use both CL and NEON functions in the same pipeline * @@ -122,5 +122,5 @@ void main_neoncl_scale_median_gaussian(int argc, const char **argv) */ int main(int argc, const char **argv) { - return test_helpers::run_example(argc, argv, main_neoncl_scale_median_gaussian); + return utils::run_example(argc, argv, main_neoncl_scale_median_gaussian); } |