summaryrefslogtreecommitdiff
path: root/libs/ARMComputeEx/src/core/CL/kernels/CLActivationLayerExKernel.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'libs/ARMComputeEx/src/core/CL/kernels/CLActivationLayerExKernel.cpp')
-rw-r--r--libs/ARMComputeEx/src/core/CL/kernels/CLActivationLayerExKernel.cpp211
1 files changed, 211 insertions, 0 deletions
diff --git a/libs/ARMComputeEx/src/core/CL/kernels/CLActivationLayerExKernel.cpp b/libs/ARMComputeEx/src/core/CL/kernels/CLActivationLayerExKernel.cpp
new file mode 100644
index 000000000..1fdd2f98f
--- /dev/null
+++ b/libs/ARMComputeEx/src/core/CL/kernels/CLActivationLayerExKernel.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2016-2018 ARM Limited.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "arm_compute/core/CL/kernels/CLActivationLayerExKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibraryEx.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/UtilsEx.h"
+
+using namespace arm_compute;
+
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
+ const ActivationLayerInfoEx &act_info)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8,
+ DataType::F16, DataType::F32);
+
+ // Checks performed when output is configured
+ if ((output != nullptr) && (output->total_size() != 0))
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ }
+
+ return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+{
+ if (output != nullptr)
+ {
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output, *input);
+ }
+
+ const unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
+
+ Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+ bool window_changed = false;
+
+ if (output != nullptr)
+ {
+ AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+ AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+ window_changed = update_window_and_padding(win, input_access, output_access);
+ output_access.set_valid_region(win, input->valid_region());
+ }
+ else
+ {
+ window_changed = update_window_and_padding(
+ win, AccessWindowHorizontal(input, 0, num_elems_processed_per_iteration));
+ }
+
+ Status err = (window_changed)
+ ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!")
+ : Status{};
+ return std::make_pair(err, win);
+}
+} // namespace
+
+CLActivationLayerExKernel::CLActivationLayerExKernel()
+ : _input(nullptr), _output(nullptr), _run_in_place(false)
+{
+}
+
+void CLActivationLayerExKernel::configure(ICLTensor *input, ICLTensor *output,
+ ActivationLayerInfoEx act_info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+
+ _run_in_place = (output == nullptr) || (output == input);
+
+ if (output != nullptr)
+ {
+ // Output auto inizialitation if not yet initialized
+ auto_init_if_empty(*output->info(), *input->info()->clone());
+ }
+
+ ARM_COMPUTE_ERROR_THROW_ON(
+ validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr, act_info));
+
+ const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
+ const DataType dt = input->info()->data_type();
+ float a_const = act_info.a();
+ float b_const = act_info.b();
+ int a_const_int = 0;
+ int b_const_int = 0;
+
+ // Create quantized version of constants a, b if needed
+ if (is_data_type_quantized(dt))
+ {
+ a_const_int =
+ input->info()->quantization_info().quantize(a_const, RoundingPolicy::TO_NEAREST_UP);
+ b_const_int =
+ input->info()->quantization_info().quantize(b_const, RoundingPolicy::TO_NEAREST_UP);
+ }
+
+ // Set build options
+ std::set<std::string> build_opts;
+ build_opts.emplace(
+ ("-DACT=" + lower_string(string_from_activation_func_ex(act_info.activation()))));
+ build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(dt)));
+ build_opts.emplace(
+ ("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
+
+ if (is_data_type_quantized(dt))
+ {
+ build_opts.emplace(("-DA_VAL=" + support::cpp11::to_string(a_const_int)));
+ build_opts.emplace(("-DB_VAL=" + support::cpp11::to_string(b_const_int)));
+
+ const int o1 = input->info()->quantization_info().offset;
+ // Quantized value of 0 corresponds to the offset o1
+ build_opts.emplace(("-DCONST_0=" + support::cpp11::to_string(o1)));
+
+ // Set scale and offset of the input and output if they have different quantization info
+ if (is_data_type_quantized_asymmetric(dt) && output != nullptr)
+ {
+ const float s1 = input->info()->quantization_info().scale;
+ const float s2 = output->info()->quantization_info().scale;
+ const int o2 = output->info()->quantization_info().offset;
+
+ if (o1 != o2 || s1 != s2)
+ {
+ build_opts.emplace(("-DS1_VAL=" + float_to_string_with_full_precision(s1)));
+ build_opts.emplace(("-DS2_VAL=" + float_to_string_with_full_precision(s2)));
+ build_opts.emplace(("-DO1_VAL=" + support::cpp11::to_string(o1)));
+ build_opts.emplace(("-DO2_VAL=" + support::cpp11::to_string(o2)));
+ }
+ }
+ }
+ else
+ {
+ build_opts.emplace(("-DA_VAL=" + float_to_string_with_full_precision(a_const)));
+ build_opts.emplace(("-DB_VAL=" + float_to_string_with_full_precision(b_const)));
+ }
+
+ build_opts.emplace((_run_in_place) ? "-DIN_PLACE" : "");
+
+ // Create kernel
+ std::string kernel_name = std::string("activation_layer_ex");
+ _kernel =
+ static_cast<cl::Kernel>(CLKernelLibraryEx::get().create_kernel(kernel_name, build_opts));
+
+ // Make sure _kernel is initialized before calling the parent's configure
+ _input = input;
+ _output = output;
+
+ // Configure kernel window
+ auto win_config =
+ validate_and_configure_window(input->info(), (_run_in_place) ? nullptr : output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
+ ICLKernel::configure_internal(win_config.second);
+
+ // Set config_id for enabling LWS tuning
+ _config_id = "activation_layer_ex_";
+ _config_id += lower_string(string_from_data_type(dt));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(0));
+ _config_id += "_";
+ _config_id += support::cpp11::to_string(input->info()->dimension(1));
+}
+
+Status CLActivationLayerExKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ActivationLayerInfoEx &act_info)
+{
+ const bool run_in_place = (output == nullptr) || (output == input);
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, act_info));
+ ARM_COMPUTE_RETURN_ON_ERROR(
+ validate_and_configure_window(input->clone().get(),
+ (run_in_place) ? nullptr : output->clone().get())
+ .first);
+
+ return Status{};
+}
+
+void CLActivationLayerExKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICLKernel::window(), window);
+
+ Window collapsed = window.collapse_if_possible(ICLKernel::window(), Window::DimZ);
+ Window slice = collapsed.first_slice_window_3D();
+
+ do
+ {
+ unsigned int idx = 0;
+ add_3D_tensor_argument(idx, _input, slice);
+ if (!_run_in_place)
+ {
+ add_3D_tensor_argument(idx, _output, slice);
+ }
+ enqueue(queue, *this, slice, lws_hint());
+ } while (collapsed.slide_window_slice_3D(slice));
+}