1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
|
// This file is included from AddOpsAndParams defined in nnapi_delegate.cc
// and contains lambda for extened implementation to original Tensorflow Lite.
auto add_resize_bilinear_params = [&add_scalar_int32, &interpreter, &augmented_inputs](void* data) {
auto builtin = reinterpret_cast<TfLiteResizeBilinearParams*>(data);
if (builtin->align_corners) {
FATAL("Resize bilinear does not support align corners in NNAPI");
}
TfLiteTensor* tensor = interpreter->tensor(augmented_inputs.back());
assert(tensor->type == kTfLiteInt32);
assert(tensor->bytes == sizeof(int)*2);
augmented_inputs.pop_back();
int height = ((int*)(tensor->data.raw))[1];
int width = ((int*)(tensor->data.raw))[0];
add_scalar_int32(height);
add_scalar_int32(width);
};
auto check_l2normalization_params = [interpreter, &node](void* data) {
auto builtin = reinterpret_cast<TfLiteL2NormParams*>(data);
if (builtin->activation != kTfLiteActNone) {
FATAL("NNAPI does not support L2Normalization with fused activations");
}
if ((node.inputs->size > 0) &&
(interpreter->tensor(node.inputs->data[0])->dims->size != 4)) {
FATAL("NNAPI only supports input rank 4 for L2Normalization");
}
};
auto add_transpose_conv_params = [&add_scalar_int32](void* data) {
auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(data);
add_scalar_int32(builtin->padding);
add_scalar_int32(builtin->stride_width);
add_scalar_int32(builtin->stride_height);
};
auto add_lrn_params = [&add_scalar_int32,
&add_scalar_float32](void* data) {
auto builtin = reinterpret_cast<TfLiteLocalResponseNormParams*>(data);
add_scalar_int32(builtin->radius);
add_scalar_float32(builtin->bias);
add_scalar_float32(builtin->alpha);
add_scalar_float32(builtin->beta);
};
auto add_strided_slice_params = [&add_scalar_int32](void* data) {
auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(data);
add_scalar_int32(builtin->begin_mask);
add_scalar_int32(builtin->end_mask);
// ellipsis_mask and new_axis_mask are not supported on nn runtime
// cf) tflite interpreter supports both operations
if (builtin->ellipsis_mask) {
FATAL("STRIDE_SLICE does not support ellipsis_mask in NNAPI");
}
if (builtin->new_axis_mask) {
FATAL("STRIDE_SLICE does not support new_axis_mask in NNAPI");
}
add_scalar_int32(builtin->shrink_axis_mask);
};
auto add_gather_ex_params = [&add_scalar_int32](void* data) {
auto builtin = reinterpret_cast<TfLiteGatherParams*>(data);
add_scalar_int32(builtin->axis);
if (builtin->axis != 0) {
FATAL("GATHER does not support axis>0 in NNAPI");
}
};
#ifndef OBS_BUILD
auto add_pack_ex_params = [&add_scalar_int32](void* data) {
auto builtin = reinterpret_cast<TfLitePackParams*>(data);
add_scalar_int32(builtin->values_count);
add_scalar_int32(builtin->axis);
};
auto add_unpack_ex_params = [&add_scalar_int32](void* data) {
auto builtin = reinterpret_cast<TfLiteUnpackParams*>(data);
add_scalar_int32(builtin->num);
add_scalar_int32(builtin->axis);
};
#endif
auto check_batch_to_space_params = [interpreter, &node, &augmented_inputs]() {
//If there are 3 inputs, check if crops is having default values {0, 0, 0, 0}
//Else unsupported by NNAPI
if(augmented_inputs.size() == 3)
{
const uint32_t crops_buffer_index = node.inputs->data[2];
const TfLiteTensor* crops = interpreter->tensor(crops_buffer_index);
const int *crops_value = crops->data.i32;
//Check if crops is having default values {0, 0, 0, 0}
if(crops_value[0] != 0 || crops_value[1] != 0 || crops_value[2] != 0 || crops_value[3] != 0)
{
FATAL("BATCH_TO_SPACE_ND does not support Explicit crops in NNAPI");
}
else
{
//Restrict crops input and pass only other two inputs
augmented_inputs.pop_back();
}
}
};
|