diff options
author | Chunseok Lee <chunseok.lee@samsung.com> | 2020-03-04 18:09:24 +0900 |
---|---|---|
committer | Chunseok Lee <chunseok.lee@samsung.com> | 2020-03-04 18:09:24 +0900 |
commit | 302e6564a7a76109e1178207e44e45a58631c477 (patch) | |
tree | 6cc4bd95e5e438331fc2c53234af4ed0e0f3bc20 /tests/nnapi/specs/skip/V1_1/fully_connected_float_4d_simple_relaxed.mod.py | |
parent | bd11b24234d7d43dfe05a81c520aa01ffad06e42 (diff) | |
download | nnfw-302e6564a7a76109e1178207e44e45a58631c477.tar.gz nnfw-302e6564a7a76109e1178207e44e45a58631c477.tar.bz2 nnfw-302e6564a7a76109e1178207e44e45a58631c477.zip |
Imported Upstream version 1.1.0upstream/1.1.0submit/tizen/20200304.094649submit/tizen/20200304.093946submit/tizen/20200304.092919accepted/tizen/unified/20200305.051107
Diffstat (limited to 'tests/nnapi/specs/skip/V1_1/fully_connected_float_4d_simple_relaxed.mod.py')
-rw-r--r-- | tests/nnapi/specs/skip/V1_1/fully_connected_float_4d_simple_relaxed.mod.py | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/tests/nnapi/specs/skip/V1_1/fully_connected_float_4d_simple_relaxed.mod.py b/tests/nnapi/specs/skip/V1_1/fully_connected_float_4d_simple_relaxed.mod.py new file mode 100644 index 000000000..2338c0bed --- /dev/null +++ b/tests/nnapi/specs/skip/V1_1/fully_connected_float_4d_simple_relaxed.mod.py @@ -0,0 +1,43 @@ +# +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# This test is for testing the input requirements of Fully Connected Op: +# the input's first dimension doesn't have to be the batch size, the +# input is reshaped as needed. + +model = Model() +in0 = Input("op1", "TENSOR_FLOAT32", "{4, 1, 5, 1}") +weights = Parameter("op2", "TENSOR_FLOAT32", "{3, 10}", [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 0 + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 1 + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, # u = 1 +]) +bias = Parameter("b0", "TENSOR_FLOAT32", "{3}", [1, 2, 3]) +out0 = Output("op3", "TENSOR_FLOAT32", "{2, 3}") +act = Int32Scalar("act", 0) +model = model.Operation("FULLY_CONNECTED", in0, weights, bias, act).To(out0) +model = model.RelaxedExecution(True) + +# Example 1. Input in operand 0, +input0 = {in0: # input 0 + [1, 2, 3, 4, 5, 6, 7, 8, -9, -10, + 1, 2, 3, 4, 5, 6, 7, -8, 9, -10]} +output0 = {out0: # output 0 + [24, 25, 26, + 58, 59, 60]} + +# Instantiate an example +Example((input0, output0)) |