summaryrefslogtreecommitdiff
path: root/libs/kernel/acl/src/util.h
diff options
context:
space:
mode:
Diffstat (limited to 'libs/kernel/acl/src/util.h')
-rw-r--r--libs/kernel/acl/src/util.h193
1 files changed, 0 insertions, 193 deletions
diff --git a/libs/kernel/acl/src/util.h b/libs/kernel/acl/src/util.h
deleted file mode 100644
index 48ed02783..000000000
--- a/libs/kernel/acl/src/util.h
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NNFW_KERNEL_ACL_UTIL_H__
-#define __NNFW_KERNEL_ACL_UTIL_H__
-#include <OperationsUtils.h>
-
-#include <cmath>
-#include <cassert>
-#include <functional>
-
-namespace nnfw {
-namespace kernel {
-namespace acl {
-namespace util {
-
-// TODO: make a separate module.
-class TensorWrapper {
-public:
- TensorWrapper(std::vector<uint32_t> dims,
- OperandType type = OperandType::FLOAT32,
- float scale = 1.0,
- int32_t offset = 0)
- :_shape{type, dims, scale, offset}
- {
-
- // currently, we support only FLOAT32 for now.
- assert( type == OperandType::FLOAT32);
-
- uint32_t size_bytes = sizeof(float);
-
- _num_elems = 1;
- for( auto& d: dims ) {
- _num_elems *= d;
- }
-
- _data = new uint8_t[_num_elems * size_bytes];
- }
-
- ~TensorWrapper() {
- delete [] _data;
- }
-
- const nnfw::rt::Shape shape() const {
- return _shape;
- }
-
- uint32_t num_elems() const { return _num_elems; }
-
- template<class T>
- T at(const uint32_t& idx) const {
- return reinterpret_cast<T*>(_data)[idx];
- }
-
- template<class T>
- T& at(const uint32_t& idx) {
- return reinterpret_cast<T*>(_data)[idx];
- }
-
- template<class T>
- T* ptr() { return reinterpret_cast<T*>(_data); }
-
- void initValue(float f) {
- for( uint32_t i = 0; i < _num_elems; ++i ) {
- at<float>(i) = f;
- }
- }
-
- typedef std::function<float(uint32_t n, uint32_t c, uint32_t h, uint32_t w)> funcInit4;
- void initValue(funcInit4 f) {
- assert(_shape.dimensions.size() == 4);
-
- int N = _shape.dimensions[0];
- int H = _shape.dimensions[1];
- int W = _shape.dimensions[2];
- int C = _shape.dimensions[3];
-
- for(int n = 0; n < N; ++n) {
- for(int h = 0; h < H; ++h) {
- for(int w = 0; w < W; ++w) {
- for(int c = 0; c < C; ++c) {
- uint32_t offset = n*H*W*C + h*W*C + w*C + c;
- at<float>(offset) = f(n,c,h,w);
- }
- }
- }
- }
- }
-
- typedef std::function<float(uint32_t c, uint32_t h, uint32_t w)> funcInit3;
- void initValue(funcInit3 f) {
- assert(_shape.dimensions.size() == 3);
-
- int C = _shape.dimensions[0];
- int H = _shape.dimensions[1];
- int W = _shape.dimensions[2];
-
- for(int h = 0; h < H; ++h) {
- for(int w = 0; w < W; ++w) {
- for(int c = 0; c < C; ++c) {
- uint32_t offset = h*W*C + w*C + c;
- at<float>(offset) = f(c,h,w);
- }
- }
- }
- }
-
- typedef std::function<float(uint32_t h, uint32_t w)> funcInit2;
- void initValue(funcInit2 f) {
- assert(_shape.dimensions.size() == 2);
-
- int H = _shape.dimensions[0];
- int W = _shape.dimensions[1];
-
- for(int h = 0; h < H; ++h) {
- for(int w = 0; w < W; ++w) {
- uint32_t offset = h*W + w;
- at<float>(offset) = f(h,w);
- }
- }
- }
-
- typedef std::function<float(uint32_t w)> funcInit1;
- void initValue(funcInit1 f) {
- assert(_shape.dimensions.size() == 1);
-
- int W = _shape.dimensions[0];
-
- for(int w = 0; w < W; ++w) {
- uint32_t offset = w;
- at<float>(offset) = f(w);
- }
- }
-
- void initValue(std::vector<float> v) {
- assert(v.size() == _num_elems);
- for( uint32_t i = 0; i < _num_elems; ++i ) {
- at<float>(i) = v[i];
- }
- }
-
- bool operator==(const TensorWrapper &t) const {
- // compare the shape
- assert(num_elems() == t.num_elems());
- assert(_shape.type == t.shape().type);
- assert(_shape.scale == t.shape().scale);
- assert(_shape.offset == t.shape().offset);
- assert(_shape.dimensions == t.shape().dimensions);
-
- // currently, we support only FLOAT32.
- assert(_shape.type == OperandType::FLOAT32);
-
- for( uint32_t i = 0; i < _num_elems; ++i ) {
- if( std::fabs(static_cast<float>(at<float>(i) - t.at<float>(i))) > 0.001f ) {
- std::cout << "Comparing [" << i << "] " << at<float>(i) << "," << t.at<float>(i) << std::endl;
- return false;
- }
- }
-
- return true;
- }
-
-private:
- nnfw::rt::Shape _shape;
- uint32_t _num_elems;
- uint8_t* _data;
-};
-
-void initData(float* data, int num, float value);
-bool compareData(const float* result, const float* expected, const nnfw::rt::Shape& shape);
-void initData_Increasing(float* data, int num, float value);
-
-void NCHW2NHWC(const float* nchw, float* nhwc, const nnfw::rt::Shape& shape);
-
-} // namespace util
-} // namespace acl
-} // namespace kernel
-} // namespace nnfw
-
-#endif // __NNFW_KERNEL_ACL_UTIL_H__