summaryrefslogtreecommitdiff
path: root/runtime/onert/backend/gpu_cl/ClMemoryManager.h
blob: 3bac0d51dbee7a16a3faae169b28e33980b84138 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
/*
 * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __ONERT_BACKEND_ACL_COMMON_MEMORY_MANAGER_H__
#define __ONERT_BACKEND_ACL_COMMON_MEMORY_MANAGER_H__

#include <cassert>

#include "ir/OperandIndexMap.h"
#include "ir/Shape.h"
#include "open_cl/ClContext.h"
#include "open_cl/InferenceContext.h"
#include "open_cl/Status.h"
#include "open_cl/StorageTypeUtil.h"
#include "open_cl/TensorType.h"
#include "util/logging.h"

namespace onert
{
namespace backend
{
namespace gpu_cl
{

template <typename T_ITensor, typename T_Tensor> class ClMemoryManager
{
public:
  ClMemoryManager(CLContext *context) : _context{context} {}

  virtual ~ClMemoryManager() = default;

  virtual void allocate(void)
  {
    for (const auto &tensor_entry : _tensors)
    {
      auto tensor = tensor_entry.second;
      const auto &t = tensor_reserver_.Get(tensor_entry.first.value());
      const auto &shape = t->shape;
      const auto &descriptor = t->descriptor;
      if (!CreateTensor(*_context, shape, descriptor, tensor->handle()).ok())
      {
        return;
      }
    }
  }

  virtual void deallocate(void)
  {
    // NYI
  }

  virtual void startLifetime(const ir::OperandIndex &)
  { /* DO NOTHING */
  }
  virtual void finishLifetime(const ir::OperandIndex &)
  { /* DO NOTHING */
  }

  void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info,
                   InferenceContext::CreateInferenceInfo create_info,
                   std::shared_ptr<Environment> environment, DeviceInfo &device_info)
  {
    ValueId max_id = 0;
    auto data_type = DeduceDataTypeFromPrecision(create_info.precision);
    const auto shape = info.shape();

    auto tensor = std::make_shared<T_Tensor>(shape.rank(), shape, environment);
    _tensors[ind] = tensor;

    BHWC t_shape;
    switch (shape.rank())
    {
      case 1:
        // B layout
        t_shape = BHWC(shape.dim(0), 1, 1, 1);
        break;
      case 2:
        // BC layout
        t_shape = BHWC(shape.dim(0), 1, 1, shape.dim(1));
        break;
      case 3:
        // BWC layout
        t_shape = BHWC(shape.dim(0), 1, shape.dim(1), shape.dim(2));
        break;
      case 4:
        // BHWC layout
        t_shape = BHWC(shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3));
        break;
      default:
        break;
    }

    TensorStorageType storage_type = create_info.storage_type;
    Layout layout = t_shape.b == 1 ? Layout::HWC : Layout::BHWC;

    ValueId id = ind.value();
    storage_type = SelectBestStorageType(device_info, t_shape, storage_type, data_type, layout);
    auto dummy = std::make_shared<InferenceContext::DummyTensor>();
    dummy->shape = t_shape;
    dummy->descriptor = TensorDescriptor{data_type, storage_type, layout};
    tensor_reserver_.Add(id, dummy);

    max_id = std::max(max_id, id);

    tensor_reserver_.SetNext(max_id + 1);
  }

  ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &tensors(void) { return _tensors; }

  InferenceContext::TensorReserver &tensorReservers(void) { return tensor_reserver_; }

private:
  ir::OperandIndexMap<std::shared_ptr<T_Tensor>> _tensors;
  InferenceContext::TensorReserver tensor_reserver_;
  CLContext *_context;
};

} // namespace gpu_cl
} // namespace backend
} // namespace onert

#endif // __ONERT_BACKEND_ACL_COMMON_MEMORY_MANAGER_H__