summaryrefslogtreecommitdiff
path: root/utils
diff options
context:
space:
mode:
authorAnthony Barbier <anthony.barbier@arm.com>2017-10-23 18:55:17 +0100
committerAnthony Barbier <anthony.barbier@arm.com>2017-10-23 18:58:11 +0100
commit8a3da6f91f90c566b844d568f4ec43b946915af8 (patch)
treed8265ab6b77cdabaa856b623fc8c1ade64867c03 /utils
parent2aab3165ae3104c8f3018157b62d4febdc94f2b7 (diff)
downloadarmcl-8a3da6f91f90c566b844d568f4ec43b946915af8.tar.gz
armcl-8a3da6f91f90c566b844d568f4ec43b946915af8.tar.bz2
armcl-8a3da6f91f90c566b844d568f4ec43b946915af8.zip
Update AlexNet example with accessors
Diffstat (limited to 'utils')
-rw-r--r--utils/GraphUtils.cpp111
-rw-r--r--utils/GraphUtils.h56
-rw-r--r--utils/Utils.h82
3 files changed, 238 insertions, 11 deletions
diff --git a/utils/GraphUtils.cpp b/utils/GraphUtils.cpp
index bdd831075..bcfc0f799 100644
--- a/utils/GraphUtils.cpp
+++ b/utils/GraphUtils.cpp
@@ -34,8 +34,10 @@
#include "arm_compute/core/PixelValue.h"
#include "libnpy/npy.hpp"
+#include <algorithm>
+#include <iomanip>
+#include <ostream>
#include <random>
-#include <sstream>
using namespace arm_compute::graph_utils;
@@ -48,16 +50,8 @@ bool PPMWriter::access_tensor(ITensor &tensor)
{
std::stringstream ss;
ss << _name << _iterator << ".ppm";
- if(dynamic_cast<Tensor *>(&tensor) != nullptr)
- {
- arm_compute::utils::save_to_ppm(dynamic_cast<Tensor &>(tensor), ss.str());
- }
-#ifdef ARM_COMPUTE_CL
- else if(dynamic_cast<CLTensor *>(&tensor) != nullptr)
- {
- arm_compute::utils::save_to_ppm(dynamic_cast<CLTensor &>(tensor), ss.str());
- }
-#endif /* ARM_COMPUTE_CL */
+
+ arm_compute::utils::save_to_ppm(tensor, ss.str());
_iterator++;
if(_maximum == 0)
@@ -87,6 +81,101 @@ bool DummyAccessor::access_tensor(ITensor &tensor)
return ret;
}
+PPMAccessor::PPMAccessor(const std::string &ppm_path, bool bgr, float mean_r, float mean_g, float mean_b)
+ : _ppm_path(ppm_path), _bgr(bgr), _mean_r(mean_r), _mean_g(mean_g), _mean_b(mean_b)
+{
+}
+
+bool PPMAccessor::access_tensor(ITensor &tensor)
+{
+ utils::PPMLoader ppm;
+ const float mean[3] =
+ {
+ _bgr ? _mean_b : _mean_r,
+ _mean_g,
+ _bgr ? _mean_r : _mean_b
+ };
+
+ // Open PPM file
+ ppm.open(_ppm_path);
+
+ // Fill the tensor with the PPM content (BGR)
+ ppm.fill_planar_tensor(tensor, _bgr);
+
+ // Subtract the mean value from each channel
+ Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ const float value = *reinterpret_cast<float *>(tensor.ptr_to_element(id)) - mean[id.z()];
+ *reinterpret_cast<float *>(tensor.ptr_to_element(id)) = value;
+ });
+
+ return true;
+}
+
+TopNPredictionsAccessor::TopNPredictionsAccessor(const std::string &labels_path, size_t top_n, std::ostream &output_stream)
+ : _labels(), _output_stream(output_stream), _top_n(top_n)
+{
+ _labels.clear();
+
+ std::ifstream ifs;
+
+ try
+ {
+ ifs.exceptions(std::ifstream::badbit);
+ ifs.open(labels_path, std::ios::in | std::ios::binary);
+
+ for(std::string line; !std::getline(ifs, line).fail();)
+ {
+ _labels.emplace_back(line);
+ }
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Accessing %s: %s", labels_path.c_str(), e.what());
+ }
+}
+
+bool TopNPredictionsAccessor::access_tensor(ITensor &tensor)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::F32);
+ ARM_COMPUTE_ERROR_ON(_labels.size() != tensor.info()->dimension(0));
+
+ // Get the predicted class
+ std::vector<float> classes_prob;
+ std::vector<size_t> index;
+
+ const auto output_net = reinterpret_cast<float *>(tensor.buffer() + tensor.info()->offset_first_element_in_bytes());
+ const size_t num_classes = tensor.info()->dimension(0);
+
+ classes_prob.resize(num_classes);
+ index.resize(num_classes);
+
+ std::copy(output_net, output_net + num_classes, classes_prob.begin());
+
+ // Sort results
+ std::iota(std::begin(index), std::end(index), static_cast<size_t>(0));
+ std::sort(std::begin(index), std::end(index),
+ [&](size_t a, size_t b)
+ {
+ return classes_prob[a] > classes_prob[b];
+ });
+
+ _output_stream << "---------- Top " << _top_n << " predictions ----------" << std::endl
+ << std::endl;
+ for(size_t i = 0; i < _top_n; ++i)
+ {
+ _output_stream << std::fixed << std::setprecision(4)
+ << classes_prob[index.at(i)]
+ << " - [id = " << index.at(i) << "]"
+ << ", " << _labels[index.at(i)] << std::endl;
+ }
+
+ return false;
+}
+
RandomAccessor::RandomAccessor(PixelValue lower, PixelValue upper, std::random_device::result_type seed)
: _lower(lower), _upper(upper), _seed(seed)
{
diff --git a/utils/GraphUtils.h b/utils/GraphUtils.h
index 5c370e5eb..39b3f115b 100644
--- a/utils/GraphUtils.h
+++ b/utils/GraphUtils.h
@@ -29,6 +29,8 @@
#include "arm_compute/graph/Types.h"
#include <random>
+#include <string>
+#include <vector>
namespace arm_compute
{
@@ -76,6 +78,60 @@ private:
unsigned int _maximum;
};
+/** PPM accessor class */
+class PPMAccessor final : public graph::ITensorAccessor
+{
+public:
+ /** Constructor
+ *
+ * @param[in] ppm_path Path to PPM file
+ * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false)
+ * @param[in] mean_r (Optional) Red mean value to be subtracted from red channel
+ * @param[in] mean_g (Optional) Green mean value to be subtracted from green channel
+ * @param[in] mean_b (Optional) Blue mean value to be subtracted from blue channel
+ */
+ PPMAccessor(const std::string &ppm_path, bool bgr = true, float mean_r = 0.0f, float mean_g = 0.0f, float mean_b = 0.0f);
+ /** Allow instances of this class to be move constructed */
+ PPMAccessor(PPMAccessor &&) = default;
+
+ // Inherited methods overriden:
+ bool access_tensor(ITensor &tensor) override;
+
+private:
+ const std::string &_ppm_path;
+ const bool _bgr;
+ const float _mean_r;
+ const float _mean_g;
+ const float _mean_b;
+};
+
+/** Result accessor class */
+class TopNPredictionsAccessor final : public graph::ITensorAccessor
+{
+public:
+ /** Constructor
+ *
+ * @param[in] labels_path Path to labels text file.
+ * @param[in] top_n (Optional) Number of output classes to print
+ * @param[out] output_stream (Optional) Output stream
+ */
+ TopNPredictionsAccessor(const std::string &labels_path, size_t top_n = 5, std::ostream &output_stream = std::cout);
+ /** Allow instances of this class to be move constructed */
+ TopNPredictionsAccessor(TopNPredictionsAccessor &&) = default;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ TopNPredictionsAccessor(const TopNPredictionsAccessor &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ TopNPredictionsAccessor &operator=(const TopNPredictionsAccessor &) = delete;
+
+ // Inherited methods overriden:
+ bool access_tensor(ITensor &tensor) override;
+
+private:
+ std::vector<std::string> _labels;
+ std::ostream &_output_stream;
+ size_t _top_n;
+};
+
/** Random accessor class */
class RandomAccessor final : public graph::ITensorAccessor
{
diff --git a/utils/Utils.h b/utils/Utils.h
index b0e1abeb5..c88de0e16 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -263,6 +263,88 @@ public:
}
}
+ /** Fill a tensor with 3 planes (one for each channel) with the content of the currently open PPM file.
+ *
+ * @note If the image is a CLImage, the function maps and unmaps the image
+ *
+ * @param[in,out] tensor Tensor with 3 planes to fill (Must be allocated, and of matching dimensions with the opened PPM). Data types supported: U8/F32
+ * @param[in] bgr (Optional) Fill the first plane with blue channel (default = false)
+ */
+ template <typename T>
+ void fill_planar_tensor(T &tensor, bool bgr = false)
+ {
+ ARM_COMPUTE_ERROR_ON(!is_open());
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&tensor, 1, DataType::U8, DataType::F32);
+ ARM_COMPUTE_ERROR_ON(tensor.info()->dimension(0) != _width || tensor.info()->dimension(1) != _height || tensor.info()->dimension(2) != 3);
+
+ try
+ {
+ // Map buffer if creating a CLTensor
+ map(tensor, true);
+
+ // Check if the file is large enough to fill the image
+ const size_t current_position = _fs.tellg();
+ _fs.seekg(0, std::ios_base::end);
+ const size_t end_position = _fs.tellg();
+ _fs.seekg(current_position, std::ios_base::beg);
+
+ ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size(),
+ "Not enough data in file");
+ ARM_COMPUTE_UNUSED(end_position);
+
+ // Iterate through every pixel of the image
+ arm_compute::Window window;
+ window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, _width, 1));
+ window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, _height, 1));
+ window.set(arm_compute::Window::DimZ, arm_compute::Window::Dimension(0, 1, 1));
+
+ arm_compute::Iterator out(&tensor, window);
+
+ unsigned char red = 0;
+ unsigned char green = 0;
+ unsigned char blue = 0;
+
+ size_t stride_z = tensor.info()->strides_in_bytes()[2];
+
+ arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
+ {
+ red = _fs.get();
+ green = _fs.get();
+ blue = _fs.get();
+
+ switch(tensor.info()->data_type())
+ {
+ case arm_compute::DataType::U8:
+ {
+ *(out.ptr() + 0 * stride_z) = bgr ? blue : red;
+ *(out.ptr() + 1 * stride_z) = green;
+ *(out.ptr() + 2 * stride_z) = bgr ? red : blue;
+ break;
+ }
+ case arm_compute::DataType::F32:
+ {
+ *reinterpret_cast<float *>(out.ptr() + 0 * stride_z) = static_cast<float>(bgr ? blue : red);
+ *reinterpret_cast<float *>(out.ptr() + 1 * stride_z) = static_cast<float>(green);
+ *reinterpret_cast<float *>(out.ptr() + 2 * stride_z) = static_cast<float>(bgr ? red : blue);
+ break;
+ }
+ default:
+ {
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
+ }
+ },
+ out);
+
+ // Unmap buffer if creating a CLTensor
+ unmap(tensor);
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Loading PPM file: %s", e.what());
+ }
+ }
+
private:
std::ifstream _fs;
unsigned int _width, _height;