summaryrefslogtreecommitdiff
path: root/runtime/neurun/backend/srcn/Convert.cc
blob: 1d80b2c7c21d140fe79de7f75ae3ac40ec28bd66 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
/*
 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "Convert.h"

#include <cassert>
#include <cpp14/memory.h>
#include <ir/DataType.h>
#include "Swizzle.h"
#include <vector>

namespace neurun
{
namespace backend
{
namespace srcn
{

ir::Shape asKernelShape(const ir::Shape &shape, kernel::FilterLayout frontend_layout,
                        kernel::FilterLayout backend_layout)
{
  assert(shape.rank() == 4);
  if (frontend_layout == backend_layout)
  {
    return ir::Shape{shape.dim(0), shape.dim(1), shape.dim(2), shape.dim(3)};
  }

  const auto permutation = getFilterPermutation(frontend_layout, backend_layout);
  if (permutation.size() == 0)
  {
    throw std::runtime_error("Not supported FilterLayout");
  }
  return ir::Shape{shape.dim(permutation[0]), shape.dim(permutation[1]), shape.dim(permutation[2]),
                   shape.dim(permutation[3])};
}

ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout,
                        ir::Layout backend_layout)
{
  const uint32_t rank = shape.rank();

  ir::Shape ret(rank);
  for (uint32_t axis = 0; axis < rank; ++axis)
  {
    const auto ncnn_axis = ToNCNNAxis(rank, axis, frontend_layout, backend_layout);
    ret.dim(ncnn_axis) = shape.dim(axis);
  }

  return ret;
}

ir::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
                             ir::Layout frontend_layout, ir::Layout backend_layout)
{
  ir::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo);

  return info;
}

} // namespace srcn
} // namespace backend
} // namespace neurun