summaryrefslogtreecommitdiff
path: root/libs/kernel/acl/src/FullyConnected.test.h
diff options
context:
space:
mode:
Diffstat (limited to 'libs/kernel/acl/src/FullyConnected.test.h')
-rw-r--r--libs/kernel/acl/src/FullyConnected.test.h266
1 files changed, 266 insertions, 0 deletions
diff --git a/libs/kernel/acl/src/FullyConnected.test.h b/libs/kernel/acl/src/FullyConnected.test.h
new file mode 100644
index 000000000..01bbff802
--- /dev/null
+++ b/libs/kernel/acl/src/FullyConnected.test.h
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <OperationsUtils.h>
+#include <kernel/acl/nnfw_kernel_acl.h>
+#include <kernel/acl/FullyConnected.h>
+
+// TODO: fix include path in CMakeFiles
+#include "util.h"
+
+#ifndef ACL_TEST
+#error "ACL_TEST should be defined first!"
+#endif // ACL_TEST
+
+#ifndef ACL_CORE_FUNC_NAME
+#error "ACL_CORE_FUNC_NAME should be defined first!"
+#endif // ACL_CORE_FUNC_NAME
+
+using namespace nnfw::kernel::acl;
+using fullyConnectedFloat32T = bool (*)(const float* inputData, const nnfw::rt::Shape& inputShape,
+ const float* weightsData, const nnfw::rt::Shape& weightsShape,
+ const float* biasData, const nnfw::rt::Shape& biasShape,
+ int32_t activation,
+ float* outputData, const nnfw::rt::Shape& outputShape);
+
+ACL_TEST(KernelACL_TC, fcFloat32_1) {
+
+ util::TensorWrapper input({1,1,1,100});
+ util::TensorWrapper weights({1,100});
+ util::TensorWrapper bias({1});
+ util::TensorWrapper output({1,1});
+
+ int32_t activation = static_cast<int32_t>(FusedActivationFunc::RELU);
+
+ input.initValue([](uint32_t n, uint32_t c, uint32_t h, uint32_t w) {
+ return 1.f;
+ });
+ weights.initValue([](uint32_t h, uint32_t w) {
+ return 1.f;
+ });
+ bias.initValue([](uint32_t w) {
+ return 0.f;
+ });
+ output.initValue([](uint32_t h, uint32_t w) {
+ return 0.f;
+ });
+
+ bool bret = ACL_CORE_FUNC_NAME(input.ptr<float>(), input.shape(),
+ weights.ptr<float>(), weights.shape(),
+ bias.ptr<float>(), bias.shape(),
+ activation,
+ output.ptr<float>(), output.shape());
+
+ EXPECT_EQ(bret, true);
+
+ util::TensorWrapper expected({1,1});
+ expected.initValue([](uint32_t h, uint32_t w) {
+ return 100.f;
+ });
+
+ EXPECT_EQ(output, expected);
+}
+
+ACL_TEST(KernelACL_TC, fcFloat32_relu) {
+
+ util::TensorWrapper input({1,1,1,100});
+ util::TensorWrapper weights({1,100});
+ util::TensorWrapper bias({1});
+ util::TensorWrapper output({1,1});
+
+ int32_t activation = static_cast<int32_t>(FusedActivationFunc::RELU);
+
+ input.initValue([](uint32_t n, uint32_t c, uint32_t h, uint32_t w) {
+ return 1.f;
+ });
+ weights.initValue([](uint32_t h, uint32_t w) {
+ return -1.f;
+ });
+ bias.initValue([](uint32_t w) {
+ return 0.f;
+ });
+ output.initValue([](uint32_t h, uint32_t w) {
+ return 0.f;
+ });
+
+ bool bret = ACL_CORE_FUNC_NAME(input.ptr<float>(), input.shape(),
+ weights.ptr<float>(), weights.shape(),
+ bias.ptr<float>(), bias.shape(),
+ activation,
+ output.ptr<float>(), output.shape());
+
+ EXPECT_EQ(bret, true);
+
+ util::TensorWrapper expected({1,1});
+ expected.initValue([](uint32_t h, uint32_t w) {
+ return 0.f;
+ });
+
+ EXPECT_EQ(output, expected);
+}
+
+ACL_TEST(KernelACL_TC, fcFloat32_conv_fc) {
+ uint32_t input_n = 1;
+ uint32_t input_c = 5;
+ uint32_t input_h = 4;
+ uint32_t input_w = 4;
+ uint32_t weight_n = 6;
+
+ int32_t activation = static_cast<int32_t>(FusedActivationFunc::RELU);
+
+ util::TensorWrapper input({input_n, input_h, input_w, input_c});
+ util::TensorWrapper weight({weight_n, input_c*input_h*input_w});
+ util::TensorWrapper bias({weight_n});
+ util::TensorWrapper output({1, weight_n});
+
+ input.initValue([&](uint32_t n, uint32_t c, uint32_t h, uint32_t w) {
+ uint32_t N = input_n;
+ uint32_t H = input_h;
+ uint32_t W = input_w;
+ uint32_t C = input_c;
+
+ return n*H*W*C + h*W*C + w*C + c;
+ });
+
+ weight.initValue([&](uint32_t h, uint32_t w) {
+ uint32_t H = weight_n;
+ uint32_t W = input_c*input_h*input_w;
+
+ return h*W + w;
+ });
+
+ bias.initValue([](uint32_t w) {
+ return 0.f;
+ });
+
+ output.initValue([](uint32_t h, uint32_t w) {
+ return 0.f;
+ });
+
+ bool bret = ACL_CORE_FUNC_NAME(input.ptr<float>(), input.shape(),
+ weight.ptr<float>(), weight.shape(),
+ bias.ptr<float>(), bias.shape(),
+ activation,
+ output.ptr<float>(), output.shape());
+
+ EXPECT_EQ(bret, true);
+
+ util::TensorWrapper expected({1, weight_n});
+ expected.initValue({
+ 167480.f,
+ 420280.f,
+ 673080.f,
+ 925880.f,
+ 1178680.f,
+ 1431480.f});
+
+ EXPECT_EQ(output, expected);
+}
+
+ACL_TEST(KernelACL_TC, fcFloat32_fc_fc) {
+ uint32_t input_n = 6;
+ uint32_t weight_n = 6;
+
+ int32_t activation = static_cast<int32_t>(FusedActivationFunc::RELU);
+
+ util::TensorWrapper input({1, input_n});
+ util::TensorWrapper weight({weight_n, input_n});
+ util::TensorWrapper bias({weight_n});
+ util::TensorWrapper output({1, weight_n});
+
+ input.initValue([&](uint32_t h, uint32_t w) {
+ // not use h because h = 0.
+ return (float)w;
+ });
+
+ weight.initValue([&](uint32_t h, uint32_t w) {
+ uint32_t H = weight_n;
+ uint32_t W = input_n;
+
+ return (float)(h*W + w);
+ });
+
+ bias.initValue([](uint32_t w) {
+ return 0.f;
+ });
+
+ output.initValue([](uint32_t h, uint32_t w) {
+ return 0.f;
+ });
+
+ bool bret = ACL_CORE_FUNC_NAME(input.ptr<float>(), input.shape(),
+ weight.ptr<float>(), weight.shape(),
+ bias.ptr<float>(), bias.shape(),
+ activation,
+ output.ptr<float>(), output.shape());
+
+ EXPECT_EQ(bret, true);
+
+ util::TensorWrapper expected({1, weight_n});
+ expected.initValue({
+ 55.f,
+ 145.f,
+ 235.f,
+ 325.f,
+ 415.f,
+ 505.f,
+ });
+
+ EXPECT_EQ(output, expected);
+}
+
+ACL_TEST(KernelACL_TC, fcFloat32_inceptionv3) {
+
+ uint32_t input_c = 2048;
+ uint32_t weight_n = 1008;
+
+ util::TensorWrapper input({1,1,1,input_c});
+ util::TensorWrapper weight({weight_n,input_c});
+ util::TensorWrapper bias({weight_n});
+ util::TensorWrapper output({1, weight_n});
+
+ int32_t activation = static_cast<int32_t>(FusedActivationFunc::RELU);
+
+ input.initValue([&](uint32_t n, uint32_t c, uint32_t h, uint32_t w) {
+ return 1.f;
+ });
+ weight.initValue([&](uint32_t h, uint32_t w) {
+ return (float)h;
+ });
+ bias.initValue([](uint32_t w) {
+ return 0.f;
+ });
+ output.initValue([](uint32_t h, uint32_t w) {
+ return 0.f;
+ });
+
+ bool bret = ACL_CORE_FUNC_NAME(input.ptr<float>(), input.shape(),
+ weight.ptr<float>(), weight.shape(),
+ bias.ptr<float>(), bias.shape(),
+ activation,
+ output.ptr<float>(), output.shape());
+
+ EXPECT_EQ(bret, true);
+
+ util::TensorWrapper expected({1, weight_n});
+ expected.initValue([&](uint32_t h, uint32_t w) {
+ return w*input_c;
+ });
+
+ EXPECT_EQ(output, expected);
+}
+