summaryrefslogtreecommitdiff
path: root/compute/cker/src/train
diff options
context:
space:
mode:
Diffstat (limited to 'compute/cker/src/train')
-rw-r--r--compute/cker/src/train/FullyConnected.test.cc83
-rw-r--r--compute/cker/src/train/Loss.test.cc201
-rw-r--r--compute/cker/src/train/Relu.test.cc107
3 files changed, 391 insertions, 0 deletions
diff --git a/compute/cker/src/train/FullyConnected.test.cc b/compute/cker/src/train/FullyConnected.test.cc
new file mode 100644
index 000000000..37c2d4a97
--- /dev/null
+++ b/compute/cker/src/train/FullyConnected.test.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/train/operation/FullyConnected.h>
+
+#include <gtest/gtest.h>
+#include <vector>
+
+TEST(CKer_Operation, FullyConnectedBiasGrad)
+{
+ {
+ // Shape: {2, 4}
+ std::vector<float> incoming_backward = {-1, 2, -3, 4, 5, -6, -7, 8};
+ // Shape: {4}
+ std::vector<float> expected_bias_backward = {4, -4, -10, 12};
+ std::vector<float> bias_backward(4);
+
+ nnfw::cker::train::FullyConnectedBiasGrad(
+ nnfw::cker::Shape{2, 4}, incoming_backward.data(),
+ nnfw::cker::Shape{static_cast<int>(bias_backward.size())}, bias_backward.data());
+
+ for (size_t i = 0; i < bias_backward.size(); ++i)
+ ASSERT_EQ(bias_backward[i], expected_bias_backward[i]);
+ }
+
+ {
+ // Shape: {3, 3}
+ std::vector<float> incoming_backward = {-1, 2, -3, 4, 5, -6, -7, 8, 9};
+ // Shape: {3}
+ std::vector<float> expected_bias_backward = {-4, 15, 0};
+ std::vector<float> bias_backward(3);
+
+ nnfw::cker::train::FullyConnectedBiasGrad(
+ nnfw::cker::Shape{3, 3}, incoming_backward.data(),
+ nnfw::cker::Shape{static_cast<int>(bias_backward.size())}, bias_backward.data());
+
+ for (size_t i = 0; i < bias_backward.size(); ++i)
+ ASSERT_EQ(bias_backward[i], expected_bias_backward[i]);
+ }
+
+ {
+ // Shape: {1, 2, 2, 3}
+ std::vector<float> incoming_backward = {-1, 2, -3, 4, 5, -6, -7, 8, 9, -10, -11, 12};
+ // Shape: {3}
+ std::vector<float> expected_bias_backward = {-14, 4, 12};
+ std::vector<float> bias_backward(3);
+
+ nnfw::cker::train::FullyConnectedBiasGrad(
+ nnfw::cker::Shape{1, 2, 2, 3}, incoming_backward.data(),
+ nnfw::cker::Shape{static_cast<int>(bias_backward.size())}, bias_backward.data());
+
+ for (size_t i = 0; i < bias_backward.size(); ++i)
+ ASSERT_EQ(bias_backward[i], expected_bias_backward[i]);
+ }
+}
+
+TEST(CKer_Operation, neg_FullyConnectedBiasGrad)
+{
+ {
+ // Unmatched shape
+ // Shape: {2, 4}
+ std::vector<float> incoming_backward = {-1, 2, -3, 4, 5, -6, -7, 8};
+ // Shape: {3}
+ std::vector<float> bias_backward(3);
+ EXPECT_ANY_THROW(nnfw::cker::train::FullyConnectedBiasGrad(
+ nnfw::cker::Shape{2, 4}, incoming_backward.data(),
+ nnfw::cker::Shape{static_cast<int>(bias_backward.size())},
+ bias_backward.data()););
+ }
+}
diff --git a/compute/cker/src/train/Loss.test.cc b/compute/cker/src/train/Loss.test.cc
new file mode 100644
index 000000000..98568f47a
--- /dev/null
+++ b/compute/cker/src/train/Loss.test.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/train/operation/Loss.h>
+
+#include <gtest/gtest.h>
+#include <vector>
+
+TEST(CKer_Operation, LossMSE)
+{
+ {
+ // Shape: {1, 10} -> m_rows:10, m_cols:1
+ std::vector<int> y_pred = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ std::vector<int> y_true = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ std::vector<int> output(1);
+ std::vector<int> expected = {1};
+
+ nnfw::cker::train::MSE(nnfw::cker::Shape{1, 10}, y_pred.data(), nnfw::cker::Shape{1, 10},
+ y_true.data(), nnfw::cker::Shape{1}, output.data());
+
+ EXPECT_EQ(output[0], expected[0]);
+ }
+
+ {
+ // Shape: {1, 10} -> m_rows:10, m_cols:1
+ std::vector<float> y_pred = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
+ std::vector<float> y_true = {0., 1., 2., 3., 4., 5., 6., 7., 8., 9.};
+ std::vector<float> output(1);
+ std::vector<float> expected = {1.0};
+
+ nnfw::cker::train::MSE(nnfw::cker::Shape{1, 10}, y_pred.data(), nnfw::cker::Shape{1, 10},
+ y_true.data(), nnfw::cker::Shape{1}, output.data());
+
+ EXPECT_FLOAT_EQ(output[0], expected[0]);
+ }
+
+ {
+ // Shape: {2, 3} -> m_rows:3, m_cols:2
+ std::vector<float> y_pred = {27.2, 31.8, 51.9, 10.2, 34.2, 12.4};
+ std::vector<float> y_true = {31.3, 40.3, 29.7, 12.9, 25.8, 11.9};
+ std::vector<float> output(1);
+ std::vector<float> expected = {110.0};
+
+ nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3}, y_pred.data(), nnfw::cker::Shape{2, 3},
+ y_true.data(), nnfw::cker::Shape{1}, output.data());
+
+ EXPECT_FLOAT_EQ(output[0], expected[0]);
+ }
+
+ {
+ // Shape: {2, 3, 4} -> m_rows:4, m_cols:6
+ std::vector<float> y_pred = {1., 2., 3., 4., 1., 2., 3., 4., 1., 2., 3., 4.,
+ 1., 2., 3., 4., 1., 2., 3., 4., 1., 2., 3., 4.};
+ std::vector<float> y_true = {1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.,
+ 1., 1., 1., 1., 2., 2., 2., 2., 3., 3., 3., 3.};
+ std::vector<float> output(1);
+ std::vector<float> expected = {2.1666667};
+
+ nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3, 4}, y_pred.data(), nnfw::cker::Shape{2, 3, 4},
+ y_true.data(), nnfw::cker::Shape{1}, output.data());
+
+ EXPECT_FLOAT_EQ(output[0], expected[0]);
+ }
+}
+
+TEST(CKer_Operation, neg_LossMSE)
+{
+ {
+ // Invalid expected value
+ std::vector<float> y_pred = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
+ std::vector<float> y_true = {0., 1., 2., 3., 4., 5., 6., 7., 8., 9.};
+ std::vector<float> output(1);
+ std::vector<float> expected = {-1.0};
+
+ nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3, 4}, y_pred.data(), nnfw::cker::Shape{2, 3, 4},
+ y_true.data(), nnfw::cker::Shape{1}, output.data());
+
+ EXPECT_NE(output[0], expected[0]);
+ }
+
+ {
+ // Invalid output shape
+ std::vector<float> y_pred = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
+ std::vector<float> y_true = {0., 1., 2., 3., 4., 5., 6., 7., 8., 9.};
+ std::vector<float> output(3);
+ std::vector<float> expected = {1.0};
+
+ EXPECT_ANY_THROW(nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3, 4}, y_pred.data(),
+ nnfw::cker::Shape{2, 3, 4}, y_true.data(),
+ nnfw::cker::Shape{3}, output.data()));
+ }
+
+ {
+ // Different y_pread and y_true shape
+ std::vector<float> y_pred = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
+ std::vector<float> y_true = {0., 1., 2., 3., 4., 5.};
+ std::vector<float> output(1);
+ std::vector<float> expected = {1.0};
+
+ EXPECT_ANY_THROW(nnfw::cker::train::MSE(nnfw::cker::Shape{2, 3, 4}, y_pred.data(),
+ nnfw::cker::Shape{2, 3}, y_true.data(),
+ nnfw::cker::Shape{1}, output.data()));
+ }
+}
+
+TEST(CKer_Operation, LossMSEGrad)
+{
+ {
+ // Shape: {1, 10} -> m_rows:10, m_cols:1
+ std::vector<int> y_pred = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ std::vector<int> y_true = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ std::vector<int> deriv_y_pred(10);
+ std::vector<int> expected = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ nnfw::cker::train::MSEGrad(nnfw::cker::Shape{1, 10}, y_pred.data(), nnfw::cker::Shape{1, 10},
+ y_true.data(), nnfw::cker::Shape{1, 10}, deriv_y_pred.data());
+
+ for (size_t i = 0; i < deriv_y_pred.size(); ++i)
+ EXPECT_EQ(deriv_y_pred[i], expected[i]);
+ }
+
+ {
+ // Shape: {1, 10} -> m_rows:10, m_cols:1
+ std::vector<float> y_pred = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
+ std::vector<float> y_true = {0., 1., 2., 3., 4., 5., 6., 7., 8., 9.};
+ std::vector<float> deriv_y_pred(10);
+ std::vector<float> expected = {0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2};
+
+ nnfw::cker::train::MSEGrad(nnfw::cker::Shape{1, 10}, y_pred.data(), nnfw::cker::Shape{1, 10},
+ y_true.data(), nnfw::cker::Shape{1, 10}, deriv_y_pred.data());
+
+ for (size_t i = 0; i < deriv_y_pred.size(); ++i)
+ EXPECT_FLOAT_EQ(deriv_y_pred[i], expected[i]);
+ }
+
+ {
+ // Shape: {2, 3} -> m_rows:3, m_cols:2
+ std::vector<float> y_pred = {27.2, 31.8, 51.9, 10.2, 34.2, 12.4};
+ std::vector<float> y_true = {31.3, 40.3, 29.7, 12.9, 25.8, 11.9};
+ std::vector<float> deriv_y_pred(6);
+ std::vector<float> expected = {-1.3666667, -2.8333333, 7.4, -0.9, 2.8, 0.1666667};
+
+ nnfw::cker::train::MSEGrad(nnfw::cker::Shape{2, 3}, y_pred.data(), nnfw::cker::Shape{2, 3},
+ y_true.data(), nnfw::cker::Shape{2, 3}, deriv_y_pred.data());
+
+ for (size_t i = 0; i < deriv_y_pred.size(); ++i)
+ EXPECT_FLOAT_EQ(deriv_y_pred[i], expected[i]);
+ }
+}
+
+TEST(CKer_Operation, neg_LossMSEGrad)
+{
+ {
+ // Invalid expected value
+ std::vector<float> y_pred = {27.2, 31.8, 51.9, 10.2, 34.2, 12.4};
+ std::vector<float> y_true = {31.3, 40.3, 29.7, 12.9, 25.8, 11.9};
+ std::vector<float> deriv_y_pred(6);
+ std::vector<float> expected = {1., 1., 1., 1., 1., 1.};
+
+ nnfw::cker::train::MSEGrad(nnfw::cker::Shape{2, 3}, y_pred.data(), nnfw::cker::Shape{2, 3},
+ y_true.data(), nnfw::cker::Shape{2, 3}, deriv_y_pred.data());
+
+ for (size_t i = 0; i < deriv_y_pred.size(); ++i)
+ EXPECT_NE(deriv_y_pred[i], expected[i]);
+ }
+
+ {
+ // Different y_pred and y_true shape
+ std::vector<float> y_pred = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
+ std::vector<float> y_true = {0., 1., 2., 3., 4., 5.};
+ std::vector<float> deriv_y_pred(10);
+
+ EXPECT_ANY_THROW(nnfw::cker::train::MSEGrad(nnfw::cker::Shape{1, 10}, y_pred.data(),
+ nnfw::cker::Shape{2, 3}, y_true.data(),
+ nnfw::cker::Shape{1, 10}, deriv_y_pred.data()));
+ }
+
+ {
+ // Different y_pred and deriv_y_pred shape
+ std::vector<float> y_pred = {1., 2., 3., 4., 5., 6., 7., 8., 9., 10.};
+ std::vector<float> y_true = {0., 1., 2., 3., 4., 5., 6., 7., 8., 9.};
+ std::vector<float> deriv_y_pred(6);
+
+ EXPECT_ANY_THROW(nnfw::cker::train::MSEGrad(nnfw::cker::Shape{1, 10}, y_pred.data(),
+ nnfw::cker::Shape{1, 10}, y_true.data(),
+ nnfw::cker::Shape{2, 3}, deriv_y_pred.data()));
+ }
+}
diff --git a/compute/cker/src/train/Relu.test.cc b/compute/cker/src/train/Relu.test.cc
new file mode 100644
index 000000000..d94411038
--- /dev/null
+++ b/compute/cker/src/train/Relu.test.cc
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/operation/ReLU.h>
+#include <cker/train/operation/ReLU.h>
+
+#include <gtest/gtest.h>
+#include <vector>
+
+namespace
+{
+
+template <typename T> class ReluOpVerifier
+{
+public:
+ ReluOpVerifier(const std::vector<T> &input, const std::vector<T> &expected_output,
+ const std::vector<T> &backprop_output,
+ const std::vector<T> &expected_backprop_input)
+ : _input{input}, _expected_output{expected_output}, _backprop_output{backprop_output},
+ _expected_backprop_input{expected_backprop_input}
+ {
+ EXPECT_TRUE(input.size() == expected_output.size());
+ _output.resize(_expected_output.size());
+ _backprop_input.resize(_expected_backprop_input.size());
+ }
+
+public:
+ void verifyExpected()
+ {
+ nnfw::cker::ReLU(nnfw::cker::Shape{static_cast<int>(_input.size())}, _input.data(),
+ nnfw::cker::Shape{static_cast<int>(_output.size())}, _output.data());
+
+ for (size_t i = 0; i < _output.size(); ++i)
+ ASSERT_EQ(_output[i], _expected_output[i]);
+
+ if (_backprop_output.size() > 0)
+ {
+ nnfw::cker::train::ReLUGrad(
+ nnfw::cker::Shape{static_cast<int>(_output.size())}, _output.data(),
+ nnfw::cker::Shape{static_cast<int>(_backprop_output.size())}, _backprop_output.data(),
+ nnfw::cker::Shape{static_cast<int>(_backprop_input.size())}, _backprop_input.data());
+
+ for (size_t i = 0; i < _backprop_input.size(); ++i)
+ ASSERT_EQ(_backprop_input[i], _expected_backprop_input[i]);
+ }
+ }
+
+private:
+ std::vector<T> _input;
+ std::vector<T> _output;
+ std::vector<T> _expected_output;
+ std::vector<T> _backprop_output;
+ std::vector<T> _backprop_input;
+ std::vector<T> _expected_backprop_input;
+};
+
+} // namespace
+
+TEST(CKer_Operation, ReLU)
+{
+ {
+ std::vector<float> input_forward = {-1, 2, 3, -4};
+ std::vector<float> expected_forward = {0, 2, 3, 0};
+ std::vector<float> incoming_backward = {-5, 6, -7, 8};
+ std::vector<float> expected_backward = {0, 6, -7, 0};
+ ReluOpVerifier<float> verifier{input_forward, expected_forward, incoming_backward,
+ expected_backward};
+ verifier.verifyExpected();
+ }
+
+ {
+ std::vector<float> input_forward = {0, -1, 2, 3, -4, 5, 6, -7};
+ std::vector<float> expected_forward = {0, 0, 2, 3, 0, 5, 6, 0};
+ std::vector<float> incoming_backward = {8, -9, 10, 11, -12, -13, 14, -15};
+ std::vector<float> expected_backward = {0, 0, 10, 11, 0, -13, 14, 0};
+ ReluOpVerifier<float> verifier{input_forward, expected_forward, incoming_backward,
+ expected_backward};
+ verifier.verifyExpected();
+ }
+}
+
+TEST(CKer_Operation, neg_ReLU)
+{
+ {
+ // Unmatched shape
+ std::vector<float> input_forward = {0, -1, 2, 3, -4};
+ std::vector<float> expected_forward = {0, 0, 2, 3, 0};
+ std::vector<float> incoming_backward = {-5, 6, -7, 8};
+ std::vector<float> expected_backward = {0, 6, -7, 0};
+ ReluOpVerifier<float> verifier{input_forward, expected_forward, incoming_backward,
+ expected_backward};
+ EXPECT_ANY_THROW(verifier.verifyExpected());
+ }
+}