summaryrefslogtreecommitdiff
path: root/contrib/convacl
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/convacl')
-rw-r--r--contrib/convacl/CMakeLists.txt20
-rw-r--r--contrib/convacl/src/io_accessor.cc110
-rw-r--r--contrib/convacl/src/io_accessor.h93
-rw-r--r--contrib/convacl/src/nnapi_acl_conv.cc239
4 files changed, 0 insertions, 462 deletions
diff --git a/contrib/convacl/CMakeLists.txt b/contrib/convacl/CMakeLists.txt
deleted file mode 100644
index ca6411211..000000000
--- a/contrib/convacl/CMakeLists.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-if(NOT BUILD_LABS)
- return()
-endif(NOT BUILD_LABS)
-
-if(NOT ${TARGET_ARCH_BASE} STREQUAL "arm")
- return()
-endif(NOT ${TARGET_ARCH_BASE} STREQUAL "arm")
-
-nnfw_find_package(ARMCompute REQUIRED)
-
-file(GLOB_RECURSE NNAPI_CONVACL_SRCS "src/*.cc")
-
-link_directories(${CMAKE_INSTALL_PREFIX}/lib)
-
-add_library(exp_convacl SHARED ${NNAPI_CONVACL_SRCS})
-target_include_directories(exp_convacl PUBLIC ${NNFW_INCLUDE_DIR})
-target_link_libraries(exp_convacl nnfw_util arm_compute_graph)
-
-# we need the library name to be 'neuralnetworks' and this will do the trick
-set_target_properties(exp_convacl PROPERTIES OUTPUT_NAME neuralnetworks)
diff --git a/contrib/convacl/src/io_accessor.cc b/contrib/convacl/src/io_accessor.cc
deleted file mode 100644
index b7fdee721..000000000
--- a/contrib/convacl/src/io_accessor.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "io_accessor.h"
-#include <arm_compute/core/Helpers.h>
-#include <ostream>
-
-bool InputAccessor::access_tensor(arm_compute::ITensor &tensor)
-{
- // Subtract the mean value from each channel
- arm_compute::Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape());
-
- execute_window_loop(window, [&](const arm_compute::Coordinates& id)
- {
- *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = _test_input;
- _test_input += _inc ? 1.0 : 0.0;
-
- std::cout << "Input " << id.y() << "," << id.x() << " = ";
- std::cout << *reinterpret_cast<float *>(tensor.ptr_to_element(id));
- std::cout << std::endl;
- });
- return true;
-}
-
-bool OutputAccessor::access_tensor(arm_compute::ITensor &tensor)
-{
- // Subtract the mean value from each channel
- arm_compute::Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape());
-
- execute_window_loop(window, [&](const arm_compute::Coordinates& id)
- {
- std::cout << "Output " << id.y() << "," << id.x() << " = ";
- std::cout << *reinterpret_cast<float *>(tensor.ptr_to_element(id));
- std::cout << std::endl;
- });
- return false; // end the network
-}
-
-bool WeightAccessor::access_tensor(arm_compute::ITensor &tensor)
-{
- // Subtract the mean value from each channel
- arm_compute::Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape());
-
- execute_window_loop(window, [&](const arm_compute::Coordinates& id)
- {
- *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = _test_weight;
- _test_weight += _inc ? 1.0 : 0.0;
-
- std::cout << "Weight " << id.y() << "," << id.x() << " = ";
- std::cout << *reinterpret_cast<float *>(tensor.ptr_to_element(id));
- std::cout << std::endl;
- });
- return true;
-}
-
-bool BiasAccessor::access_tensor(arm_compute::ITensor &tensor)
-{
- // Subtract the mean value from each channel
- arm_compute::Window window;
- window.use_tensor_dimensions(tensor.info()->tensor_shape());
-
- execute_window_loop(window, [&](const arm_compute::Coordinates& id)
- {
- *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = 0.0;
-
- std::cout << "Bias " << id.y() << "," << id.x() << " = ";
- std::cout << *reinterpret_cast<float *>(tensor.ptr_to_element(id));
- std::cout << std::endl;
- });
- return true;
-}
diff --git a/contrib/convacl/src/io_accessor.h b/contrib/convacl/src/io_accessor.h
deleted file mode 100644
index 4033020e0..000000000
--- a/contrib/convacl/src/io_accessor.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __IO_ACCESSOR_H__
-#define __IO_ACCESSOR_H__
-
-#include <arm_compute/graph/ITensorAccessor.h>
-
-class InputAccessor : public arm_compute::graph::ITensorAccessor
-{
-public:
- InputAccessor(bool inc) : _inc(inc) { _test_input = 1.0; }
- InputAccessor(InputAccessor&&) = default;
-
- // Inherited methods overriden:
- bool access_tensor(arm_compute::ITensor& tensor) override;
-
-private:
- bool _inc;
- float _test_input;
-};
-
-class OutputAccessor : public arm_compute::graph::ITensorAccessor
-{
-public:
- OutputAccessor() = default;
- OutputAccessor(OutputAccessor&&) = default;
-
- // Inherited methods overriden:
- bool access_tensor(arm_compute::ITensor& tensor) override;
-};
-
-class WeightAccessor : public arm_compute::graph::ITensorAccessor
-{
-public:
- WeightAccessor(bool inc) : _inc(inc) { _test_weight = 1.0; }
- WeightAccessor(WeightAccessor&&) = default;
-
- // Inherited methods overriden:
- bool access_tensor(arm_compute::ITensor& tensor) override;
-
-private:
- bool _inc;
- float _test_weight;
-};
-
-class BiasAccessor : public arm_compute::graph::ITensorAccessor
-{
-public:
- BiasAccessor() = default;
- BiasAccessor(BiasAccessor&&) = default;
-
- // Inherited methods overriden:
- bool access_tensor(arm_compute::ITensor& tensor) override;
-};
-
-#endif // __IO_ACCESSOR_H__
diff --git a/contrib/convacl/src/nnapi_acl_conv.cc b/contrib/convacl/src/nnapi_acl_conv.cc
deleted file mode 100644
index 091d19497..000000000
--- a/contrib/convacl/src/nnapi_acl_conv.cc
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <NeuralNetworks.h>
-#include <stdexcept>
-#include <iostream>
-#include <string>
-#include <map>
-#include <cassert>
-#include <memory>
-#include <boost/format.hpp>
-// ACL Headers
-#include <arm_compute/graph.h>
-
-#include "util/environment.h"
-#include "io_accessor.h"
-
-//
-// Asynchronous Event
-//
-struct ANeuralNetworksEvent
-{
-};
-
-int ANeuralNetworksEvent_wait(ANeuralNetworksEvent* event)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksEvent_free(ANeuralNetworksEvent* event)
-{
- delete event;
-}
-
-//
-// Memory
-//
-struct ANeuralNetworksMemory
-{
- // 1st approach - Store all the data inside ANeuralNetworksMemory object
- // 2nd approach - Store metadata only, and defer data loading as much as possible
-};
-
-int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t offset, ANeuralNetworksMemory** memory)
-{
- *memory = new ANeuralNetworksMemory;
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksMemory_free(ANeuralNetworksMemory* memory)
-{
- delete memory;
-}
-
-//
-// Model
-//
-struct ANeuralNetworksModel
-{
- // ANeuralNetworksModel should be a factory for Graph IR (a.k.a ISA Frontend)
- // TODO Record # of operands
- uint32_t numOperands;
-
- ANeuralNetworksModel() : numOperands(0)
- {
- // DO NOTHING
- }
-};
-
-int ANeuralNetworksModel_create(ANeuralNetworksModel** model)
-{
- *model = new ANeuralNetworksModel;
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksModel_free(ANeuralNetworksModel* model)
-{
- delete model;
-}
-
-int ANeuralNetworksModel_addOperand(ANeuralNetworksModel* model, const ANeuralNetworksOperandType *type)
-{
- model->numOperands += 1;
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel* model, int32_t index, const void* buffer, size_t length)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel* model, int32_t index, const ANeuralNetworksMemory* memory, size_t offset, size_t length)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_addOperation(ANeuralNetworksModel* model, ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_identifyInputsAndOutputs(ANeuralNetworksModel* model, uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount, const uint32_t* outputs)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksModel_finish(ANeuralNetworksModel* model)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-//
-// Compilation
-//
-struct ANeuralNetworksCompilation
-{
- // ANeuralNetworksCompilation should hold a compiled IR
-};
-
-int ANeuralNetworksCompilation_create(ANeuralNetworksModel* model, ANeuralNetworksCompilation** compilation)
-{
- *compilation = new ANeuralNetworksCompilation;
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksCompilation_finish(ANeuralNetworksCompilation* compilation)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-//
-// Execution
-//
-struct ANeuralNetworksExecution
-{
- // ANeuralNetworksExecution corresponds to NPU::Interp::Session
-
- arm_compute::graph::frontend::Stream graph{0, "ACL_CONV"};
-};
-
-int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, ANeuralNetworksExecution** execution)
-{
- std::cout << __FUNCTION__ << " +++" << std::endl;
- *execution = new ANeuralNetworksExecution;
-
- using arm_compute::DataType;
- using arm_compute::graph::Target;
- using arm_compute::graph::TensorDescriptor;
- using arm_compute::TensorShape;
- using arm_compute::graph::frontend::InputLayer;
- using arm_compute::graph::frontend::OutputLayer;
-
- ANeuralNetworksExecution* execlocal = *execution;
- arm_compute::graph::frontend::Stream& graph = execlocal->graph;
-
- Target target_hint = nnfw::util::get_env_int("NNFW_ACL_USENEON")
- ? Target::NEON : Target::CL;
- bool autoinc = nnfw::util::get_env_bool("NNFW_TEST_AUTOINC");
-
- graph << target_hint
- << InputLayer(TensorDescriptor(TensorShape(3U, 3U, 1U, 1U), DataType::F32),
- std::unique_ptr<InputAccessor>(new InputAccessor(autoinc)))
- << arm_compute::graph::frontend::ConvolutionLayer(
- 3U, 3U, 1U,
- std::unique_ptr<WeightAccessor>(new WeightAccessor(autoinc)),
- std::unique_ptr<BiasAccessor>(new BiasAccessor()),
- arm_compute::PadStrideInfo(1, 1, 0, 0))
- << OutputLayer(
- std::unique_ptr<OutputAccessor>(new OutputAccessor()));
-
- std::cout << __FUNCTION__ << " ---" << std::endl;
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-// ANeuralNetworksExecution_setInput and ANeuralNetworksExecution_setOutput specify HOST buffer for input/output
-int ANeuralNetworksExecution_setInput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_setOutput(ANeuralNetworksExecution* execution, int32_t index, const ANeuralNetworksOperandType* type, const void* buffer, size_t length)
-{
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event)
-{
- std::cout << __FUNCTION__ << " +++" << std::endl;
- *event = new ANeuralNetworksEvent;
-
- // graph.run() fails with segment fail when only target_hint is added.
- // after fix adding 'Tensor' we may call graph.run()
- arm_compute::graph::frontend::Stream& graph = execution->graph;
- graph.run();
-
- std::cout << __FUNCTION__ << " ---" << std::endl;
- return ANEURALNETWORKS_NO_ERROR;
-}
-
-void ANeuralNetworksExecution_free(ANeuralNetworksExecution* execution)
-{
- delete execution;
-}