diff options
Diffstat (limited to 'compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp')
-rw-r--r-- | compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp | 145 |
1 files changed, 137 insertions, 8 deletions
diff --git a/compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp b/compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp index 8077fcb5c..d194ce1a0 100644 --- a/compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp +++ b/compiler/luci-interpreter/src/kernels/FullyConnected.test.cpp @@ -26,7 +26,85 @@ namespace using namespace testing; -TEST(FullyConnectedTest, Float) +template <typename T> +void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape, + std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape, + std::initializer_list<float> input_data, std::initializer_list<float> weights_data, + std::initializer_list<float> bias_data, std::initializer_list<float> output_data) +{ + Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data); + Tensor weights_tensor = makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data); + Tensor bias_tensor = makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data); + Tensor output_tensor = makeOutputTensor(DataType::FLOAT32); + + FullyConnectedParams params{}; + params.activation = Activation::RELU; + + FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params); + kernel.configure(); + kernel.execute(); + + EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape)); + EXPECT_THAT(extractTensorData<T>(output_tensor), FloatArrayNear(output_data)); +} + +template <> +void Check<uint8_t>( + std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape, + std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape, + std::initializer_list<float> input_data, std::initializer_list<float> weights_data, + std::initializer_list<float> bias_data, std::initializer_list<float> output_data) +{ + const float quantized_tolerance = getTolerance(-127, 128, 255); + std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64); + std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128); + Tensor input_tensor = makeInputTensor<DataType::U8>(input_shape, input_quant_param.first, + input_quant_param.second, input_data); + Tensor weights_tensor = makeInputTensor<DataType::U8>(weights_shape, input_quant_param.first, + input_quant_param.second, weights_data); + Tensor bias_tensor = makeInputTensor<DataType::S32>( + bias_shape, input_quant_param.first * input_quant_param.first, 0, bias_data); + Tensor output_tensor = + makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second); + + FullyConnectedParams params{}; + params.activation = Activation::RELU; + + FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params); + kernel.configure(); + kernel.execute(); + + EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape)); + EXPECT_THAT(dequantizeTensorData(output_tensor), + FloatArrayNear(output_data, quantized_tolerance)); +} + +template <typename T> class FullyConnectedTest : public ::testing::Test +{ +}; + +using DataTypes = ::testing::Types<float, uint8_t>; +TYPED_TEST_CASE(FullyConnectedTest, DataTypes); + +TYPED_TEST(FullyConnectedTest, Simple) +{ + Check<TypeParam>({3, 2, 2, 1}, {3, 6}, {3}, {2, 3}, + { + -3, -5, 5, 4, 9, -2, // batch = 0 + -3, -2, -4, 9, -8, 1, // batch = 1 + }, + { + -3, -7, 4, -4, -6, 4, // unit = 0 + 3, 5, 2, 3, -3, -8, // unit = 1 + -3, 7, 4, 9, 0, -5, // unit = 2 + }, + {-1, -5, -8}, { + 0, 0, 32, // batch = 0 + 22, 11, 47, // batch = 1 + }); +} + +TEST(FullyConnectedTest, InvalidBiasType_NEG) { Shape input_shape{3, 2, 2, 1}; std::vector<float> input_data{ @@ -40,6 +118,34 @@ TEST(FullyConnectedTest, Float) -3, 7, 4, 9, 0, -5, // unit = 2 }; Shape bias_shape{3}; + std::vector<int32_t> bias_data{-1, -5, -8}; + + Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data); + Tensor weights_tensor = makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data); + Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_data); + Tensor output_tensor = makeOutputTensor(DataType::FLOAT32); + + FullyConnectedParams params{}; + params.activation = Activation::RELU; + + FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params); + EXPECT_ANY_THROW(kernel.configure()); +} + +TEST(FullyConnectedTest, InvalidWeightShapeDim_NEG) +{ + Shape input_shape{3, 2, 2, 1}; + std::vector<float> input_data{ + -3, -5, 5, 4, 9, -2, // batch = 0 + -3, -2, -4, 9, -8, 1, // batch = 1 + }; + Shape weights_shape{1, 3, 6}; + std::vector<float> weights_data{ + -3, -7, 4, -4, -6, 4, // unit = 0 + 3, 5, 2, 3, -3, -8, // unit = 1 + -3, 7, 4, 9, 0, -5, // unit = 2 + }; + Shape bias_shape{3}; std::vector<float> bias_data{-1, -5, -8}; Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data); @@ -51,15 +157,38 @@ TEST(FullyConnectedTest, Float) params.activation = Activation::RELU; FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params); - kernel.configure(); - kernel.execute(); + EXPECT_ANY_THROW(kernel.configure()); +} - std::vector<float> ref_output_data{ - 0, 0, 32, // batch = 0 - 22, 11, 47, // batch = 1 +TEST(FullyConnectedTest, BiasElementNumWeightDimMismatch_NEG) +{ + Shape input_shape{3, 2, 2, 1}; + std::vector<float> input_data{ + -3, -5, 5, 4, 9, -2, // batch = 0 + -3, -2, -4, 9, -8, 1, // batch = 1 }; - EXPECT_THAT(extractTensorData<float>(output_tensor), - ElementsAreArray(ArrayFloatNear(ref_output_data))); + Shape weights_shape{6, 3}; + std::vector<float> weights_data{ + -3, -7, 4, // unit = 0 + -4, -6, 4, // unit = 1 + 3, 5, 2, // unit = 2 + 3, -3, -8, // unit = 3 + -3, 7, 4, // unit = 4 + 9, 0, -5, // unit = 5 + }; + Shape bias_shape{3}; + std::vector<float> bias_data{-1, -5, -8}; + + Tensor input_tensor = makeInputTensor<DataType::FLOAT32>(input_shape, input_data); + Tensor weights_tensor = makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data); + Tensor bias_tensor = makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data); + Tensor output_tensor = makeOutputTensor(DataType::FLOAT32); + + FullyConnectedParams params{}; + params.activation = Activation::RELU; + + FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params); + EXPECT_ANY_THROW(kernel.configure()); } } // namespace |