/* * Copyright (C) 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Header-only library for various helpers of test harness * See frameworks/ml/nn/runtime/test/TestGenerated.cpp for how this is used. */ #ifndef ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H #define ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H #include #include #include #include #include #include #include // Fix for onert: define _Float16 for gnu compiler #if __GNUC__ && !__clang__ #if __ARM_FP16_FORMAT_IEEE || __ARM_FP16_FORMAT_ALTERNATIVE #define _Float16 __fp16 #else // __ARM_FP16_FORMAT_IEEE || __ARM_FP16_FORMAT_ALTERNATIVE #define _Float16 float #endif // __ARM_FP16_FORMAT_IEEE || __ARM_FP16_FORMAT_ALTERNATIVE #endif // __GNUC__ && !__clang__ namespace test_helper { constexpr const size_t gMaximumNumberOfErrorMessages = 10; // TODO: Figure out the build dependency to make including "CpuOperationUtils.h" work. inline void convertFloat16ToFloat32(const _Float16* input, std::vector* output) { for (size_t i = 0; i < output->size(); ++i) { (*output)[i] = static_cast(input[i]); } } // This class is a workaround for two issues our code relies on: // 1. sizeof(bool) is implementation defined. // 2. vector does not allow direct pointer access via the data() method. class bool8 { public: bool8() : mValue() {} /* implicit */ bool8(bool value) : mValue(value) {} inline operator bool() const { return mValue != 0; } private: uint8_t mValue; }; static_assert(sizeof(bool8) == 1, "size of bool8 must be 8 bits"); typedef std::map> OperandDimensions; typedef std::map> Float32Operands; typedef std::map> Int32Operands; typedef std::map> Quant8AsymmOperands; typedef std::map> Quant16SymmOperands; typedef std::map> Float16Operands; typedef std::map> Bool8Operands; typedef std::map> Quant8ChannelOperands; typedef std::map> Quant16AsymmOperands; typedef std::map> Quant8SymmOperands; struct MixedTyped { static constexpr size_t kNumTypes = 9; OperandDimensions operandDimensions; Float32Operands float32Operands; Int32Operands int32Operands; Quant8AsymmOperands quant8AsymmOperands; Quant16SymmOperands quant16SymmOperands; Float16Operands float16Operands; Bool8Operands bool8Operands; Quant8ChannelOperands quant8ChannelOperands; Quant16AsymmOperands quant16AsymmOperands; Quant8SymmOperands quant8SymmOperands; }; typedef std::pair MixedTypedExampleType; // Mixed-typed examples typedef struct { MixedTypedExampleType operands; // Specifies the RANDOM_MULTINOMIAL distribution tolerance. // If set to greater than zero, the input is compared as log-probabilities // to the output and must be within this tolerance to pass. float expectedMultinomialDistributionTolerance = 0.0; } MixedTypedExample; // Go through all index-value pairs of a given input type template inline void for_each(const std::map>& idx_and_data, std::function&)> execute) { for (auto& i : idx_and_data) { execute(i.first, i.second); } } // non-const variant of for_each template inline void for_each(std::map>& idx_and_data, std::function&)> execute) { for (auto& i : idx_and_data) { execute(i.first, i.second); } } // Go through all index-value pairs of a given input type template inline void for_each(const std::map>& golden, std::map>& test, std::function&, std::vector&)> execute) { for_each(golden, [&test, &execute](int index, const std::vector& g) { auto& t = test[index]; execute(index, g, t); }); } // Go through all index-value pairs of a given input type template inline void for_each( const std::map>& golden, const std::map>& test, std::function&, const std::vector&)> execute) { for_each(golden, [&test, &execute](int index, const std::vector& g) { auto t = test.find(index); ASSERT_NE(t, test.end()); execute(index, g, t->second); }); } // internal helper for for_all template inline void for_all_internal(std::map>& idx_and_data, std::function execute_this) { for_each(idx_and_data, [&execute_this](int idx, std::vector& m) { execute_this(idx, static_cast(m.data()), m.size() * sizeof(T)); }); } // Go through all index-value pairs of all input types // expects a functor that takes (int index, void *raw data, size_t sz) inline void for_all(MixedTyped& idx_and_data, std::function execute_this) { for_all_internal(idx_and_data.float32Operands, execute_this); for_all_internal(idx_and_data.int32Operands, execute_this); for_all_internal(idx_and_data.quant8AsymmOperands, execute_this); for_all_internal(idx_and_data.quant16SymmOperands, execute_this); for_all_internal(idx_and_data.float16Operands, execute_this); for_all_internal(idx_and_data.bool8Operands, execute_this); for_all_internal(idx_and_data.quant8ChannelOperands, execute_this); for_all_internal(idx_and_data.quant16AsymmOperands, execute_this); for_all_internal(idx_and_data.quant8SymmOperands, execute_this); static_assert(9 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but for_all function wasn't updated"); } // Const variant of internal helper for for_all template inline void for_all_internal(const std::map>& idx_and_data, std::function execute_this) { for_each(idx_and_data, [&execute_this](int idx, const std::vector& m) { execute_this(idx, static_cast(m.data()), m.size() * sizeof(T)); }); } // Go through all index-value pairs (const variant) // expects a functor that takes (int index, const void *raw data, size_t sz) inline void for_all(const MixedTyped& idx_and_data, std::function execute_this) { for_all_internal(idx_and_data.float32Operands, execute_this); for_all_internal(idx_and_data.int32Operands, execute_this); for_all_internal(idx_and_data.quant8AsymmOperands, execute_this); for_all_internal(idx_and_data.quant16SymmOperands, execute_this); for_all_internal(idx_and_data.float16Operands, execute_this); for_all_internal(idx_and_data.bool8Operands, execute_this); for_all_internal(idx_and_data.quant8ChannelOperands, execute_this); for_all_internal(idx_and_data.quant16AsymmOperands, execute_this); for_all_internal(idx_and_data.quant8SymmOperands, execute_this); static_assert( 9 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but const for_all function wasn't updated"); } // Helper template - resize test output per golden template inline void resize_accordingly_(const std::map>& golden, std::map>& test) { for_each(golden, test, [](int, const std::vector& g, std::vector& t) { t.resize(g.size()); }); } template <> inline void resize_accordingly_(const OperandDimensions& golden, OperandDimensions& test) { for_each( golden, test, [](int, const std::vector& g, std::vector& t) { t = g; }); } inline void resize_accordingly(const MixedTyped& golden, MixedTyped& test) { resize_accordingly_(golden.operandDimensions, test.operandDimensions); resize_accordingly_(golden.float32Operands, test.float32Operands); resize_accordingly_(golden.int32Operands, test.int32Operands); resize_accordingly_(golden.quant8AsymmOperands, test.quant8AsymmOperands); resize_accordingly_(golden.quant16SymmOperands, test.quant16SymmOperands); resize_accordingly_(golden.float16Operands, test.float16Operands); resize_accordingly_(golden.bool8Operands, test.bool8Operands); resize_accordingly_(golden.quant8ChannelOperands, test.quant8ChannelOperands); resize_accordingly_(golden.quant16AsymmOperands, test.quant16AsymmOperands); resize_accordingly_(golden.quant8SymmOperands, test.quant8SymmOperands); static_assert(9 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but resize_accordingly function wasn't " "updated"); } template void filter_internal(const std::map>& golden, std::map>* filtered, std::function is_ignored) { for_each(golden, [filtered, &is_ignored](int index, const std::vector& m) { auto& g = *filtered; if (!is_ignored(index)) g[index] = m; }); } inline MixedTyped filter(const MixedTyped& golden, std::function is_ignored) { MixedTyped filtered; filter_internal(golden.operandDimensions, &filtered.operandDimensions, is_ignored); filter_internal(golden.float32Operands, &filtered.float32Operands, is_ignored); filter_internal(golden.int32Operands, &filtered.int32Operands, is_ignored); filter_internal(golden.quant8AsymmOperands, &filtered.quant8AsymmOperands, is_ignored); filter_internal(golden.quant16SymmOperands, &filtered.quant16SymmOperands, is_ignored); filter_internal(golden.float16Operands, &filtered.float16Operands, is_ignored); filter_internal(golden.bool8Operands, &filtered.bool8Operands, is_ignored); filter_internal(golden.quant8ChannelOperands, &filtered.quant8ChannelOperands, is_ignored); filter_internal(golden.quant16AsymmOperands, &filtered.quant16AsymmOperands, is_ignored); filter_internal(golden.quant8SymmOperands, &filtered.quant8SymmOperands, is_ignored); static_assert(9 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but compare function wasn't updated"); return filtered; } // Compare results template void compare_(const std::map>& golden, const std::map>& test, std::function cmp) { for_each(golden, test, [&cmp](int index, const std::vector& g, const std::vector& t) { for (unsigned int i = 0; i < g.size(); i++) { SCOPED_TRACE(testing::Message() << "When comparing output " << index << " element " << i); cmp(g[i], t[i]); } }); } // TODO: Allow passing accuracy criteria from spec. // Currently we only need relaxed accuracy criteria on mobilenet tests, so we return the quant8 // tolerance simply based on the current test name. inline int getQuant8AllowedError() { const ::testing::TestInfo* const testInfo = ::testing::UnitTest::GetInstance()->current_test_info(); const std::string testCaseName = testInfo->test_case_name(); const std::string testName = testInfo->name(); // We relax the quant8 precision for all tests with mobilenet: // - CTS/VTS GeneratedTest and DynamicOutputShapeTest with mobilenet // - VTS CompilationCachingTest and CompilationCachingSecurityTest except for TOCTOU tests if (testName.find("mobilenet") != std::string::npos || (testCaseName.find("CompilationCaching") != std::string::npos && testName.find("TOCTOU") == std::string::npos)) { return 2; } else { return 1; } } inline void compare(const MixedTyped& golden, const MixedTyped& test, float fpAtol = 1e-5f, float fpRtol = 1e-5f) { int quant8AllowedError = getQuant8AllowedError(); for_each( golden.operandDimensions, test.operandDimensions, [](int index, const std::vector& g, const std::vector& t) { SCOPED_TRACE(testing::Message() << "When comparing dimensions for output " << index); EXPECT_EQ(g, t); }); size_t totalNumberOfErrors = 0; compare_(golden.float32Operands, test.float32Operands, [&totalNumberOfErrors, fpAtol, fpRtol](float expected, float actual) { // Compute the range based on both absolute tolerance and relative tolerance float fpRange = fpAtol + fpRtol * std::abs(expected); if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(expected, actual, fpRange); } if (std::abs(expected - actual) > fpRange) { totalNumberOfErrors++; } }); compare_(golden.int32Operands, test.int32Operands, [&totalNumberOfErrors](int32_t expected, int32_t actual) { if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_EQ(expected, actual); } if (expected != actual) { totalNumberOfErrors++; } }); compare_(golden.quant8AsymmOperands, test.quant8AsymmOperands, [&totalNumberOfErrors, quant8AllowedError](uint8_t expected, uint8_t actual) { if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(expected, actual, quant8AllowedError); } if (std::abs(expected - actual) > quant8AllowedError) { totalNumberOfErrors++; } }); compare_(golden.quant16SymmOperands, test.quant16SymmOperands, [&totalNumberOfErrors](int16_t expected, int16_t actual) { if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(expected, actual, 1); } if (std::abs(expected - actual) > 1) { totalNumberOfErrors++; } }); compare_<_Float16>(golden.float16Operands, test.float16Operands, [&totalNumberOfErrors, fpAtol, fpRtol](_Float16 expected, _Float16 actual) { // Compute the range based on both absolute tolerance and relative // tolerance float fpRange = fpAtol + fpRtol * std::abs(static_cast(expected)); if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(expected, actual, fpRange); } if (std::abs(static_cast(expected - actual)) > fpRange) { totalNumberOfErrors++; } }); compare_(golden.bool8Operands, test.bool8Operands, [&totalNumberOfErrors](bool expected, bool actual) { if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_EQ(expected, actual); } if (expected != actual) { totalNumberOfErrors++; } }); compare_(golden.quant8ChannelOperands, test.quant8ChannelOperands, [&totalNumberOfErrors, &quant8AllowedError](int8_t expected, int8_t actual) { if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(expected, actual, quant8AllowedError); } if (std::abs(static_cast(expected) - static_cast(actual)) > quant8AllowedError) { totalNumberOfErrors++; } }); compare_(golden.quant16AsymmOperands, test.quant16AsymmOperands, [&totalNumberOfErrors](int16_t expected, int16_t actual) { if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(expected, actual, 1); } if (std::abs(expected - actual) > 1) { totalNumberOfErrors++; } }); compare_(golden.quant8SymmOperands, test.quant8SymmOperands, [&totalNumberOfErrors, quant8AllowedError](int8_t expected, int8_t actual) { if (totalNumberOfErrors < gMaximumNumberOfErrorMessages) { EXPECT_NEAR(expected, actual, quant8AllowedError); } if (std::abs(static_cast(expected) - static_cast(actual)) > quant8AllowedError) { totalNumberOfErrors++; } }); static_assert(9 == MixedTyped::kNumTypes, "Number of types in MixedTyped changed, but compare function wasn't updated"); EXPECT_EQ(size_t{0}, totalNumberOfErrors); } // Calculates the expected probability from the unnormalized log-probability of // each class in the input and compares it to the actual ocurrence of that class // in the output. inline void expectMultinomialDistributionWithinTolerance(const MixedTyped& test, const MixedTypedExample& example) { // TODO: These should be parameters but aren't currently preserved in the example. const int kBatchSize = 1; const int kNumClasses = 1024; const int kNumSamples = 128; std::vector output = test.int32Operands.at(0); std::vector class_counts; class_counts.resize(kNumClasses); for (int index : output) { class_counts[index]++; } std::vector input; Float32Operands float32Operands = example.operands.first.float32Operands; if (!float32Operands.empty()) { input = example.operands.first.float32Operands.at(0); } else { std::vector<_Float16> inputFloat16 = example.operands.first.float16Operands.at(0); input.resize(inputFloat16.size()); convertFloat16ToFloat32(inputFloat16.data(), &input); } for (int b = 0; b < kBatchSize; ++b) { float probability_sum = 0; const int batch_index = kBatchSize * b; for (int i = 0; i < kNumClasses; ++i) { probability_sum += expf(input[batch_index + i]); } for (int i = 0; i < kNumClasses; ++i) { float probability = static_cast(class_counts[i]) / static_cast(kNumSamples); float probability_expected = expf(input[batch_index + i]) / probability_sum; EXPECT_THAT(probability, ::testing::FloatNear(probability_expected, example.expectedMultinomialDistributionTolerance)); } } } }; // namespace test_helper #endif // ANDROID_ML_NN_TOOLS_TEST_GENERATOR_TEST_HARNESS_H