blob: f9e7b34d1bf757ea36310d36517d2d33fc4c00e1 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
/*
* Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ONERT_BACKEND_CPU_OPERAND_TENSOR_H__
#define __ONERT_BACKEND_CPU_OPERAND_TENSOR_H__
#include "Allocator.h"
#include <backend/ITensor.h>
#include <ir/OperandInfo.h>
namespace onert
{
namespace backend
{
namespace cpu
{
namespace operand
{
class Tensor : public ITensor
{
public:
Tensor() = delete;
public:
Tensor(const ir::OperandInfo &info)
: _info(info), _buffer(nullptr), _num_references(0), _allocator(nullptr)
{
// DO NOTHING
}
public:
// Only one of two method 'setBuffer' must be called once
void setBuffer(uint8_t *buffer)
{
assert(_buffer == nullptr && _allocator == nullptr);
_buffer = buffer;
}
void setBuffer(const std::shared_ptr<cpu_common::Allocator> &alloc)
{
assert(_buffer == nullptr && _allocator == nullptr);
_allocator = alloc;
}
ir::DataType data_type() const { return _info.typeInfo().type(); }
float scale() const { return _info.typeInfo().scale(); }
int32_t offset() const { return _info.typeInfo().offset(); }
public:
uint8_t *buffer() const override
{
if (_allocator != nullptr)
return _allocator->base();
else
return _buffer;
}
/**
* @brief Get dimension by index
*
* @param index Index to get diemension
* @return size_t Dimension at index
* @note N : dimension(0)
* H : dimension(1)
* W : dimension(2)
* C : dimension(3)
*/
size_t dimension(size_t index) const override { return _info.shape().dim(index); }
size_t num_dimensions() const override { return _info.shape().rank(); }
size_t total_size() const override { return _info.total_size(); }
size_t calcOffset(const ir::Coordinates &coords) const override;
ir::Layout layout() const override { return ir::Layout::NHWC; }
bool has_padding() const override { return false; }
void access(const std::function<void(ITensor &tensor)> &fn) final;
bool is_dynamic() const override { return _info.memAllocType() == ir::MemAllocType::DYNAMIC; }
void increase_ref()
{
assert(_buffer != nullptr || _allocator != nullptr);
++_num_references;
}
void decrease_ref()
{
assert(_buffer != nullptr || _allocator != nullptr);
assert(_num_references > 0);
--_num_references;
// Only constant tensor has allocator pointer
if (_num_references == 0)
{
if (_buffer != nullptr)
_buffer = nullptr;
else
{
_allocator->release();
_allocator = nullptr;
}
}
}
private:
ir::OperandInfo _info;
uint8_t *_buffer;
int32_t _num_references;
std::shared_ptr<cpu_common::Allocator> _allocator;
};
} // namespace operand
} // namespace cpu
} // namespace backend
} // namespace onert
#endif // __ONERT_BACKEND_CPU_OPERAND_TENSOR_H__
|