From 90073196d984ca030485d451242b93d1759d6037 Mon Sep 17 00:00:00 2001
From: icodo98 <rsb98759@gmail.com>
Date: Wed, 25 Sep 2024 11:20:06 +0900
Subject: [PATCH] [onert/cker] Add cker for avgpool

ONE-DCO-1.0-Signed-off-by: JuYoung Lee rsb98759@gmail.com
---
 .../include/cker/train/operation/AvgPool.h    | 150 +++++++++
 compute/cker/src/train/AvgPool.test.cc        | 286 ++++++++++++++++++
 2 files changed, 436 insertions(+)
 create mode 100644 compute/cker/include/cker/train/operation/AvgPool.h
 create mode 100644 compute/cker/src/train/AvgPool.test.cc

diff --git a/compute/cker/include/cker/train/operation/AvgPool.h b/compute/cker/include/cker/train/operation/AvgPool.h
new file mode 100644
index 00000000000..c16c5daddba
--- /dev/null
+++ b/compute/cker/include/cker/train/operation/AvgPool.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NNFW_CKER_TRAIN_OPERATION_AVGPOOL_H__
+#define __NNFW_CKER_TRAIN_OPERATION_AVGPOOL_H__
+
+#include "cker/Shape.h"
+#include "cker/Utils.h"
+#include "cker/eigen/Utils.h"
+
+#include <Eigen/Core>
+
+namespace nnfw
+{
+namespace cker
+{
+namespace train
+{
+inline void AvgPool2D(const PoolParams &params, const Shape &input_shape, const float *input_data,
+                      const Shape &output_shape, float *output_data)
+{
+  assert(input_shape.DimensionsCount() == 4);
+  assert(output_shape.DimensionsCount() == 4);
+  const int batches = MatchingDim(input_shape, 0, output_shape, 0);
+  const int input_height = input_shape.Dims(1);
+  const int input_width = input_shape.Dims(2);
+  const int output_height = output_shape.Dims(1);
+  const int output_width = output_shape.Dims(2);
+  const int stride_height = params.stride_height;
+  const int stride_width = params.stride_width;
+
+  // TODO(benoitjacob) make this a proper reference impl without Eigen!
+  const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
+  auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
+
+  // Prefill the output to 0.
+  out_mat.setZero();
+
+  for (int b = 0; b < batches; ++b)
+  {
+    for (int h = 0; h < output_height; ++h)
+    {
+      for (int w = 0; w < output_width; ++w)
+      {
+        // (h_start, h_end) * (w_start, w_end) is input range
+        // that output is projected from.
+        int h_start = h * stride_height - params.padding_values.height;
+        int h_end = std::min(h_start + params.filter_height, input_height);
+        h_start = h_start < 0 ? 0 : h_start;
+
+        int w_start = w * stride_width - params.padding_values.width;
+        int w_end = std::min(w_start + params.filter_width, input_width);
+        w_start = w_start < 0 ? 0 : w_start;
+
+        int count = (h_end - h_start) * (w_end - w_start);
+        if (h_end <= 0 || w_end <= 0 || count <= 0 || h_start >= input_height ||
+            w_start >= input_width)
+          continue;
+
+        int out_offset = NodeOffset(b, h, w, output_height, output_width);
+        for (int ph = h_start; ph < h_end; ++ph)
+        {
+          for (int pw = w_start; pw < w_end; ++pw)
+          {
+            int in_offset = NodeOffset(b, ph, pw, input_height, input_width);
+            out_mat.col(out_offset) += in_mat.col(in_offset);
+          }
+        }
+        out_mat.col(out_offset) /= count;
+      }
+    }
+  }
+
+  out_mat.cwiseMin(params.float_activation_min).cwiseMax(params.float_activation_max);
+}
+
+inline void AvgPool2DGrad(const PoolParams &params, const Shape &incoming_shape,
+                          const float *incoming_data, const Shape &grad_shape, float *grad_data)
+{
+  assert(grad_shape.DimensionsCount() == 4);
+  assert(incoming_shape.DimensionsCount() == 4);
+
+  const int batches = MatchingDim(incoming_shape, 0, grad_shape, 0);
+  const int grad_height = grad_shape.Dims(1);
+  const int grad_width = grad_shape.Dims(2);
+  const int incoming_height = incoming_shape.Dims(1);
+  const int incoming_width = incoming_shape.Dims(2);
+  const int stride_height = params.stride_height;
+  const int stride_width = params.stride_width;
+
+  // initialize grad_data
+  std::fill(grad_data, grad_data + grad_shape.FlatSize(), 0.0);
+
+  const auto incoming_mat = MapAsMatrixWithLastDimAsRows(incoming_data, incoming_shape);
+  auto grad_mat = MapAsMatrixWithLastDimAsRows(grad_data, grad_shape);
+
+  for (int b = 0; b < batches; ++b)
+  {
+    for (int h = 0; h < incoming_height; ++h)
+    {
+      for (int w = 0; w < incoming_width; ++w)
+      {
+        // (h_start, h_end) * (w_start, w_end) is input range
+        // that output is projected from.
+        int h_start = h * stride_height - params.padding_values.height;
+        int h_end = std::min(h_start + params.filter_height, grad_height);
+        h_start = h_start < 0 ? 0 : h_start;
+
+        int w_start = w * stride_width - params.padding_values.width;
+        int w_end = std::min(w_start + params.filter_width, grad_width);
+        w_start = w_start < 0 ? 0 : w_start;
+
+        int count = (h_end - h_start) * (w_end - w_start);
+
+        if (h_end <= 0 || w_end <= 0 || count <= 0 || h_start >= grad_height ||
+            w_start >= grad_width)
+          continue;
+
+        int incoming_offset = NodeOffset(b, h, w, incoming_height, incoming_width);
+        for (int ph = h_start; ph < h_end; ++ph)
+        {
+          for (int pw = w_start; pw < w_end; ++pw)
+          {
+            int grad_offset = NodeOffset(b, ph, pw, grad_height, grad_width);
+            grad_mat.col(grad_offset) += incoming_mat.col(incoming_offset) / count;
+          }
+        }
+      }
+    }
+  }
+}
+
+} // namespace train
+} // namespace cker
+} // namespace nnfw
+
+#endif // __NNFW_CKER_TRAIN_OPERATION_AVGPOOL_H__
diff --git a/compute/cker/src/train/AvgPool.test.cc b/compute/cker/src/train/AvgPool.test.cc
new file mode 100644
index 00000000000..307ac118ab9
--- /dev/null
+++ b/compute/cker/src/train/AvgPool.test.cc
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cker/eigen/Utils.h>
+#include <cker/operation/AveragePool.h>
+#include <cker/train/operation/AvgPool.h>
+#include <cker/Shape.h>
+
+#include <gtest/gtest.h>
+#include <vector>
+
+namespace
+{
+using namespace nnfw::cker;
+
+template <typename T> class AvgPoolOpVerifier
+{
+private:
+  const PoolParams _op_params;
+  const Shape _in_shape;
+  const Shape _out_shape;
+
+public:
+  AvgPoolOpVerifier(const nnfw::cker::PoolParams &op_params, const Shape &in_shape,
+                    const Shape &out_shape)
+    : _op_params(op_params), _in_shape(in_shape), _out_shape(out_shape)
+  {
+  }
+
+public:
+  void verifyForward(const std::vector<T> input, const std::vector<T> expected_output,
+                     bool expect_eq = true)
+  {
+    assert(input.size() == _in_shape.FlatSize());
+    assert(expected_output.size() == _out_shape.FlatSize());
+
+    std::vector<T> cacluated_output(_out_shape.FlatSize());
+    nnfw::cker::train::AvgPool2D(_op_params, _in_shape, input.data(), _out_shape,
+                                 cacluated_output.data());
+
+    if (expect_eq)
+      for (size_t i = 0; i < expected_output.size(); i++)
+      {
+        EXPECT_FLOAT_EQ(expected_output[i], cacluated_output[i]);
+      }
+    else
+      EXPECT_NE(expected_output, cacluated_output);
+  }
+
+  void verifyBackward(const std::vector<T> incoming_data, const std::vector<T> expected_grad_data,
+                      bool expect_eq = true)
+  {
+    assert(incoming_data.size() == _out_shape.FlatSize());
+    assert(expected_grad_data.size() == _in_shape.FlatSize());
+
+    std::vector<T> calcuated_grad(_in_shape.FlatSize());
+    nnfw::cker::train::AvgPool2DGrad(_op_params, _out_shape, incoming_data.data(), _in_shape,
+                                     calcuated_grad.data());
+
+    if (expect_eq)
+    {
+      for (size_t i = 0; i < expected_grad_data.size(); i++)
+      {
+        EXPECT_FLOAT_EQ(expected_grad_data[i], calcuated_grad[i]);
+      }
+    }
+
+    else
+      EXPECT_NE(expected_grad_data, calcuated_grad);
+  }
+};
+
+} // namespace
+
+TEST(CKer_Operation, AvgPool2D)
+{
+  // Depth 1 case
+  {
+    nnfw::cker::PoolParams op_param;
+    {
+      op_param.stride_height = 1;
+      op_param.stride_width = 1;
+      op_param.filter_height = 2;
+      op_param.filter_width = 2;
+      op_param.padding_values.height = 0;
+      op_param.padding_values.width = 0;
+    }
+    nnfw::cker::Shape in = {1, 3, 3, 1};
+    nnfw::cker::Shape out = {1, 2, 2, 1};
+
+    AvgPoolOpVerifier<float> verifier(op_param, in, out);
+
+    /**
+     *  input(index) :                         output(arg-count):
+     *
+     *  10(0)  15(1)   2(2)
+     *   7(3)   8(4)   9(5)   - (forward) ->    10(4)   8.5(4)
+     *  10(6)   1(7)   0(8)                    6.5(4)   4.5(4)
+     */
+
+    std::vector<float> input = {10, 15, 2, 7, 8, 9, 10, 1, 0};
+    std::vector<float> expected_output = {10, 8.5, 6.5, 4.5};
+    verifier.verifyForward(input, expected_output);
+
+    /**
+     *  output_deriv:                     input_deriv:
+     * (randomly filled)
+     *
+     *   0.4   0.4                        0.1   0.2   0.1
+     *   0.4   0.4     - (backward) ->    0.2   0.4   0.2
+     *                                    0.1   0.2   0.1
+     */
+
+    std::vector<float> output_deriv = {0.4, 0.4, 0.4, 0.4};
+    std::vector<float> expected_input_deriv = {0.1, 0.2, 0.1, 0.2, 0.4, 0.2, 0.1, 0.2, 0.1};
+    verifier.verifyBackward(output_deriv, expected_input_deriv);
+  }
+
+  // Depth 2 case
+  {
+    nnfw::cker::PoolParams op_param;
+    {
+      op_param.stride_height = 1;
+      op_param.stride_width = 1;
+      op_param.filter_height = 3;
+      op_param.filter_width = 3;
+      op_param.padding_values.height = 0;
+      op_param.padding_values.width = 0;
+    }
+    nnfw::cker::Shape in = {1, 3, 3, 2};
+    nnfw::cker::Shape out = {1, 1, 1, 2};
+
+    AvgPoolOpVerifier<float> verifier(op_param, in, out);
+
+    /**
+     *  depth[0]
+     *  input(index) :                     output(index):
+     *
+     *  10(0)  15(1)  2(2)
+     *  10(3)  12(4)  17(5)   -(forward)->     16(0)
+     *  50(6)  30(7)  -2(8)
+     *
+     *
+     *  depth[1]
+     *  input(index):                      output(index):
+     *
+     *  -1(0)  2(1)  3(2)
+     *  8(3)   9(4)  2(5)    -(forward)->       4(0)
+     *  4(6)   2(7)  7(8)
+     */
+
+    std::vector<float> input(in.FlatSize());
+    auto input_mat = MapAsMatrixWithLastDimAsRows(input.data(), in);
+    input_mat << /* depth0 */ 10, 15, 2, 10, 12, 17, 50, 30, -2,
+      /* depth1 */ -1, 2, 3, 8, 9, 2, 4, 2, 7;
+    std::vector<float> expected_output = {16, 4};
+    verifier.verifyForward(input, expected_output);
+
+    /**
+     * depth[0]
+     * ouput_deriv:                 input_deriv:
+     *
+     *                             0.02  0.02  0.02
+     *    0.18     -(backward)->   0.02  0.02  0.02
+     *                             0.02  0.02  0.02
+     *
+     *
+     * depth[1]
+     * output_deriv:                input_deriv:
+     *                              0.04  0.04  0.04
+     *    0.36     -(backward)->    0.04  0.04  0.04
+     *                              0.04  0.04  0.04
+     */
+
+    std::vector<float> output_deriv = {0.18, 0.36};
+    std::vector<float> expected_input_deriv(in.FlatSize());
+    auto input_deriv_mat = MapAsMatrixWithLastDimAsRows(expected_input_deriv.data(), in);
+    input_deriv_mat << /* depth0 */ 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02,
+      /* depth1 */ 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04, 0.04;
+    verifier.verifyBackward(output_deriv, expected_input_deriv);
+  }
+
+  // with padding case
+  {
+    nnfw::cker::PoolParams op_param;
+    {
+      op_param.stride_height = 2;
+      op_param.stride_width = 2;
+      op_param.filter_height = 2;
+      op_param.filter_width = 2;
+      op_param.padding_values.height = 2;
+      op_param.padding_values.width = 2;
+    }
+    nnfw::cker::Shape in = {1, 2, 2, 1};
+    nnfw::cker::Shape out = {1, 3, 3, 1};
+
+    AvgPoolOpVerifier<float> verifier(op_param, in, out);
+
+    /**
+     * input_with_padding:             expected_output:
+     *
+     *    4   8                              0   0   0
+     *    9   2            -(forward)->      0  5.75 0
+     *                                       0   0   0
+     */
+
+    std::vector<float> input = {4, 8, 9, 2};
+    std::vector<float> expected_output = {0, 0, 0, 0, 5.75, 0, 0, 0, 0};
+    verifier.verifyForward(input, expected_output);
+
+    /**
+     * output_deriv:                    input_deriv:
+     *
+     *  0.1   0.1   0.1                     0.1   0.1
+     *  0.1   0.4   0.3   -(backward)->     0.1   0.1
+     *  0.5   0.1   0.1
+     */
+    std::vector<float> output_deriv = {0.1, 0.1, 0.1, 0.1, 0.4, 0.3, 0.5, 0.1, 0.1};
+    std::vector<float> expected_input_deriv = {0.1, 0.1, 0.1, 0.1};
+    verifier.verifyBackward(output_deriv, expected_input_deriv);
+  }
+}
+
+TEST(CKer_Operation, neg_AvgPool)
+{
+  // Invalid expected value
+  {
+    nnfw::cker::PoolParams op_param;
+    {
+      op_param.stride_height = 1;
+      op_param.stride_width = 1;
+      op_param.filter_height = 2;
+      op_param.filter_width = 2;
+      op_param.padding_values.height = 0;
+      op_param.padding_values.width = 0;
+    }
+    nnfw::cker::Shape in = {1, 2, 2, 1};
+    nnfw::cker::Shape out = {1, 1, 1, 1};
+
+    AvgPoolOpVerifier<float> verifier(op_param, in, out);
+
+    std::vector<float> input = {0, 0, 0, 0};
+    std::vector<float> expected_output = {-1};
+
+    verifier.verifyForward(input, expected_output, false);
+  }
+
+  // Invalid expected value
+  {
+    nnfw::cker::PoolParams op_param;
+    {
+      op_param.stride_height = 2;
+      op_param.stride_width = 2;
+      op_param.filter_height = 2;
+      op_param.filter_width = 2;
+      op_param.padding_values.height = 1;
+      op_param.padding_values.width = 1;
+    }
+
+    nnfw::cker::Shape in = {1, 2, 2, 1};
+    nnfw::cker::Shape out = {1, 2, 2, 1};
+
+    AvgPoolOpVerifier<float> verifier(op_param, in, out);
+
+    std::vector<float> input = {0, 0, 0, 0};
+    std::vector<float> expected_output = {0, 0, 0, 0};
+    verifier.verifyForward(input, expected_output);
+
+    std::vector<float> output_deriv = {0.1, 0.1, 0.1, 0.2};
+    std::vector<float> expected_input_deriv = {0.1, 0.1, 0.1, 0.1};
+    verifier.verifyBackward(output_deriv, expected_input_deriv, false);
+  }
+}