summaryrefslogtreecommitdiff
path: root/runtime/onert/backend/cpu/Tensor.h
blob: 2ad2ad0fb9ca94b0b1027f04ab22c036ef22e744 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
/*
 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __ONERT_BACKEND_CPU_TENSOR_H__
#define __ONERT_BACKEND_CPU_TENSOR_H__

#include <backend/cpu_common/Tensor.h>
#include <ir/Data.h>

namespace onert
{
namespace backend
{
namespace cpu
{

using Tensor = cpu_common::Tensor;

/**
 * @brief Class that uses data from external memory that is not managed by a backend
 *        instead of allocating and copying the data. ExternalTensor's data pointer points to
 *        an address of memory such as where memory is already allocated, or mmapped area.
 *        This is meaning that ExternalTensor can take all of types' ir::Data.
 *        To support this, assume below things no padding, always NHWC layout,
 *        constant tensor and not dynamic.
 */
class ExternalTensor : public Tensor
{
public:
  ExternalTensor() = delete;
  virtual ~ExternalTensor();

public:
  ExternalTensor(const ir::OperandInfo &info, const ir::Layout layout)
      : Tensor(info, layout, nullptr)
  {
    assert(_layout == ir::Layout::NHWC);
    assert(_info.isConstant());
    assert(_info.isDynamic() == false);
  }

public:
  /**
   * @brief     set Data to be shared from external so that this ExternalTensor will not be
   *            allocated on CPU backend
   * @param[in] data    data of Operand to be set
   */
  void setData(const std::shared_ptr<ir::Data> data)
  {
    assert(data != nullptr);
    _data = data;
    // Note. Some op such as cker::Conv could take buffer as nullptr.
    // That's why _buffer also would be used
    _buffer = const_cast<uint8_t *>(_data->base());
  }

public:
  uint8_t *buffer() const override { return _buffer; }

  bool is_constant() const override { return true; }
  bool is_dynamic() const override { return false; }
  void set_dynamic() override
  {
    throw std::runtime_error("This tensor does not support changing dynamic");
  }

  void setShape(const ir::Shape &) override
  {
    throw std::runtime_error("This tensor does not support changing shape");
  }

  void increase_ref() override { ++_num_references; }

  void decrease_ref() override
  {
    assert(_data != nullptr);
    assert(_num_references > 0);
    --_num_references;
    if (_num_references == 0)
    {
      _data.reset();
      _buffer = nullptr;
    }
  }

  /**
   * @brief Reset reference count to zero and release data
   */
  void reset_ref() override
  {
    assert(_data != nullptr);
    assert(_num_references > 0);
    _num_references = 0;

    _data.reset();
    _buffer = nullptr;
  }

  int32_t num_references() override { return _num_references; }

private:
  std::shared_ptr<const ir::Data> _data;
};

} // namespace cpu
} // namespace backend
} // namespace onert

#endif // __ONERT_BACKEND_CPU_TENSOR_H__