diff options
Diffstat (limited to 'runtimes/pure_arm_compute/src/internal/Sinks.h')
-rw-r--r-- | runtimes/pure_arm_compute/src/internal/Sinks.h | 35 |
1 files changed, 29 insertions, 6 deletions
diff --git a/runtimes/pure_arm_compute/src/internal/Sinks.h b/runtimes/pure_arm_compute/src/internal/Sinks.h index e8a7d5966..7317c67c1 100644 --- a/runtimes/pure_arm_compute/src/internal/Sinks.h +++ b/runtimes/pure_arm_compute/src/internal/Sinks.h @@ -14,6 +14,12 @@ * limitations under the License. */ +/** + * @file Sinks.h + * @brief This file contains TensorSink class + * @ingroup COM_AI_RUNTIME + */ + #ifndef __INTERNAL_SINKS_H__ #define __INTERNAL_SINKS_H__ @@ -28,29 +34,46 @@ #include "internal/nnapi/tensor/View.h" #include "internal/arm_compute/tensor/View.h" -#include "util/tensor/IndexIterator.h" +#include "misc/tensor/IndexIterator.h" +/** + * @brief Class to store NN model output data for general-shaped tensors. + * This is for pulling data to internal tensor from other tensor. + * @tparam T Type of the data elements + */ template <typename T> class TensorSink final : public Sink { public: - TensorSink(const nnfw::util::tensor::Shape &shape, T *base, const size_t size) + /** + * @brief Construct a TensorSink object + * + * @param[in] shape general-shaped tensor dimensions + * @param[in] base Base pointer of the actual data + * @param[in] size Size of the data + */ + TensorSink(const nnfw::misc::tensor::Shape &shape, T *base, const size_t size) : _shape{shape}, _base{base}, _size{size} { // DO NOTHING } public: + /** + * @brief Pull the data into the internal structure + * @param[in] tensor The tensor which contains source data + * @return N/A + */ void pull(::arm_compute::ITensor &tensor) const override { const ::internal::arm_compute::tensor::View<T> from{&tensor}; ::internal::nnapi::tensor::View<T> into{_shape, _base, _size}; - using ::nnfw::util::tensor::iterate; - using ::nnfw::util::tensor::Index; + using ::nnfw::misc::tensor::iterate; + using ::nnfw::misc::tensor::Index; const uint32_t rank = _shape.rank(); - ::nnfw::util::tensor::iterate(_shape) << [&](const Index &raw) { + ::nnfw::misc::tensor::iterate(_shape) << [&](const Index &raw) { Index permuted(raw.rank()); for (uint32_t axis = 0; axis < rank; ++axis) @@ -64,7 +87,7 @@ public: } private: - const nnfw::util::tensor::Shape _shape; + const nnfw::misc::tensor::Shape _shape; private: T *const _base; |