summaryrefslogtreecommitdiff
path: root/utils/Utils.h
diff options
context:
space:
mode:
authorAnthony Barbier <Anthony.barbier@arm.com>2017-12-14 23:48:46 +0000
committerAnthony Barbier <anthony.barbier@arm.com>2018-01-24 10:01:21 +0000
commit8140e1e155d3430992fa46e04ef8938ff09ffd2d (patch)
tree9bcf86d01635bfc73e8debd1bda75e6f75b8b406 /utils/Utils.h
parent8a3da6f91f90c566b844d568f4ec43b946915af8 (diff)
downloadarmcl-8140e1e155d3430992fa46e04ef8938ff09ffd2d.tar.gz
armcl-8140e1e155d3430992fa46e04ef8938ff09ffd2d.tar.bz2
armcl-8140e1e155d3430992fa46e04ef8938ff09ffd2d.zip
arm_compute v17.12
Diffstat (limited to 'utils/Utils.h')
-rw-r--r--utils/Utils.h450
1 files changed, 398 insertions, 52 deletions
diff --git a/utils/Utils.h b/utils/Utils.h
index c88de0e16..cfc9c8cb1 100644
--- a/utils/Utils.h
+++ b/utils/Utils.h
@@ -30,17 +30,26 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/Window.h"
#include "arm_compute/runtime/Tensor.h"
+#include "libnpy/npy.hpp"
#include "support/ToolchainSupport.h"
#ifdef ARM_COMPUTE_CL
#include "arm_compute/core/CL/OpenCL.h"
+#include "arm_compute/runtime/CL/CLDistribution1D.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#endif /* ARM_COMPUTE_CL */
+#ifdef ARM_COMPUTE_GC
+#include "arm_compute/runtime/GLES_COMPUTE/GCTensor.h"
+#endif /* ARM_COMPUTE_GC */
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
+#include <random>
+#include <string>
+#include <tuple>
+#include <vector>
namespace arm_compute
{
@@ -80,6 +89,66 @@ void draw_detection_rectangle(arm_compute::ITensor *tensor, const arm_compute::D
*/
std::tuple<unsigned int, unsigned int, int> parse_ppm_header(std::ifstream &fs);
+/** Parse the npy header from an input file stream. At the end of the execution,
+ * the file position pointer will be located at the first pixel stored in the npy file
+ *
+ * @param[in] fs Input file stream to parse
+ *
+ * @return The width and height stored in the header of the NPY file
+ */
+std::tuple<std::vector<unsigned long>, bool, std::string> parse_npy_header(std::ifstream &fs);
+
+/** Obtain numpy type string from DataType.
+ *
+ * @param[in] data_type Data type.
+ *
+ * @return numpy type string.
+ */
+inline std::string get_typestring(DataType data_type)
+{
+ // Check endianness
+ const unsigned int i = 1;
+ const char *c = reinterpret_cast<const char *>(&i);
+ std::string endianness;
+ if(*c == 1)
+ {
+ endianness = std::string("<");
+ }
+ else
+ {
+ endianness = std::string(">");
+ }
+ const std::string no_endianness("|");
+
+ switch(data_type)
+ {
+ case DataType::U8:
+ return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
+ case DataType::S8:
+ return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
+ case DataType::U16:
+ return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
+ case DataType::S16:
+ return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
+ case DataType::U32:
+ return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
+ case DataType::S32:
+ return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
+ case DataType::U64:
+ return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
+ case DataType::S64:
+ return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
+ case DataType::F32:
+ return endianness + "f" + support::cpp11::to_string(sizeof(float));
+ case DataType::F64:
+ return endianness + "f" + support::cpp11::to_string(sizeof(double));
+ case DataType::SIZET:
+ return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
+ default:
+ ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ }
+}
+
/** Maps a tensor if needed
*
* @param[in] tensor Tensor to be mapped
@@ -121,8 +190,48 @@ inline void unmap(CLTensor &tensor)
{
tensor.unmap();
}
+
+/** Maps a distribution if needed
+ *
+ * @param[in] distribution Distribution to be mapped
+ * @param[in] blocking Specified if map is blocking or not
+ */
+inline void map(CLDistribution1D &distribution, bool blocking)
+{
+ distribution.map(blocking);
+}
+
+/** Unmaps a distribution if needed
+ *
+ * @param distribution Distribution to be unmapped
+ */
+inline void unmap(CLDistribution1D &distribution)
+{
+ distribution.unmap();
+}
#endif /* ARM_COMPUTE_CL */
+#ifdef ARM_COMPUTE_GC
+/** Maps a tensor if needed
+ *
+ * @param[in] tensor Tensor to be mapped
+ * @param[in] blocking Specified if map is blocking or not
+ */
+inline void map(GCTensor &tensor, bool blocking)
+{
+ tensor.map(blocking);
+}
+
+/** Unmaps a tensor if needed
+ *
+ * @param tensor Tensor to be unmapped
+ */
+inline void unmap(GCTensor &tensor)
+{
+ tensor.unmap();
+}
+#endif /* ARM_COMPUTE_GC */
+
/** Class to load the content of a PPM file into an Image
*/
class PPMLoader
@@ -191,7 +300,7 @@ public:
ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&image, arm_compute::Format::U8, arm_compute::Format::RGB888);
try
{
- // Map buffer if creating a CLTensor
+ // Map buffer if creating a CLTensor/GCTensor
map(image, true);
// Check if the file is large enough to fill the image
@@ -254,7 +363,7 @@ public:
ARM_COMPUTE_ERROR("Unsupported format");
}
- // Unmap buffer if creating a CLTensor
+ // Unmap buffer if creating a CLTensor/GCTensor
unmap(image);
}
catch(const std::ifstream::failure &e)
@@ -345,11 +454,178 @@ public:
}
}
+ /** Return the width of the currently open PPM file.
+ */
+ unsigned int width() const
+ {
+ return _width;
+ }
+
+ /** Return the height of the currently open PPM file.
+ */
+ unsigned int height() const
+ {
+ return _height;
+ }
+
private:
std::ifstream _fs;
unsigned int _width, _height;
};
+class NPYLoader
+{
+public:
+ NPYLoader()
+ : _fs(), _shape(), _fortran_order(false), _typestring()
+ {
+ }
+
+ /** Open a NPY file and reads its metadata
+ *
+ * @param[in] npy_filename File to open
+ */
+ void open(const std::string &npy_filename)
+ {
+ ARM_COMPUTE_ERROR_ON(is_open());
+ try
+ {
+ _fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
+ _fs.open(npy_filename, std::ios::in | std::ios::binary);
+
+ std::tie(_shape, _fortran_order, _typestring) = parse_npy_header(_fs);
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Accessing %s: %s", npy_filename.c_str(), e.what());
+ }
+ }
+ /** Return true if a NPY file is currently open */
+ bool is_open()
+ {
+ return _fs.is_open();
+ }
+
+ /** Return true if a NPY file is in fortran order */
+ bool is_fortran()
+ {
+ return _fortran_order;
+ }
+
+ /** Initialise the tensor's metadata with the dimensions of the NPY file currently open
+ *
+ * @param[out] tensor Tensor to initialise
+ * @param[in] dt Data type to use for the tensor
+ */
+ template <typename T>
+ void init_tensor(T &tensor, arm_compute::DataType dt)
+ {
+ ARM_COMPUTE_ERROR_ON(!is_open());
+ ARM_COMPUTE_ERROR_ON(dt != arm_compute::DataType::F32);
+
+ // Use the size of the input NPY tensor
+ TensorShape shape;
+ shape.set_num_dimensions(_shape.size());
+ for(size_t i = 0; i < _shape.size(); ++i)
+ {
+ shape.set(i, _shape.at(i));
+ }
+
+ arm_compute::TensorInfo tensor_info(shape, 1, dt);
+ tensor.allocator()->init(tensor_info);
+ }
+
+ /** Fill a tensor with the content of the currently open NPY file.
+ *
+ * @note If the tensor is a CLTensor, the function maps and unmaps the tensor
+ *
+ * @param[in,out] tensor Tensor to fill (Must be allocated, and of matching dimensions with the opened NPY).
+ */
+ template <typename T>
+ void fill_tensor(T &tensor)
+ {
+ ARM_COMPUTE_ERROR_ON(!is_open());
+ ARM_COMPUTE_ERROR_ON_FORMAT_NOT_IN(&tensor, arm_compute::DataType::F32);
+ try
+ {
+ // Map buffer if creating a CLTensor
+ map(tensor, true);
+
+ // Check if the file is large enough to fill the tensor
+ const size_t current_position = _fs.tellg();
+ _fs.seekg(0, std::ios_base::end);
+ const size_t end_position = _fs.tellg();
+ _fs.seekg(current_position, std::ios_base::beg);
+
+ ARM_COMPUTE_ERROR_ON_MSG((end_position - current_position) < tensor.info()->tensor_shape().total_size() * tensor.info()->element_size(),
+ "Not enough data in file");
+ ARM_COMPUTE_UNUSED(end_position);
+
+ // Check if the typestring matches the given one
+ std::string expect_typestr = get_typestring(tensor.info()->data_type());
+ ARM_COMPUTE_ERROR_ON_MSG(_typestring != expect_typestr, "Typestrings mismatch");
+
+ // Validate tensor shape
+ ARM_COMPUTE_ERROR_ON_MSG(_shape.size() != tensor.shape().num_dimensions(), "Tensor ranks mismatch");
+ if(_fortran_order)
+ {
+ for(size_t i = 0; i < _shape.size(); ++i)
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != _shape[i], "Tensor dimensions mismatch");
+ }
+ }
+ else
+ {
+ for(size_t i = 0; i < _shape.size(); ++i)
+ {
+ ARM_COMPUTE_ERROR_ON_MSG(tensor.shape()[i] != _shape[_shape.size() - i - 1], "Tensor dimensions mismatch");
+ }
+ }
+
+ switch(tensor.info()->data_type())
+ {
+ case arm_compute::DataType::F32:
+ {
+ // Read data
+ if(tensor.info()->padding().empty())
+ {
+ // If tensor has no padding read directly from stream.
+ _fs.read(reinterpret_cast<char *>(tensor.buffer()), tensor.info()->total_size());
+ }
+ else
+ {
+ // If tensor has padding accessing tensor elements through execution window.
+ Window window;
+ window.use_tensor_dimensions(tensor.info()->tensor_shape());
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ _fs.read(reinterpret_cast<char *>(tensor.ptr_to_element(id)), tensor.info()->element_size());
+ });
+ }
+
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
+
+ // Unmap buffer if creating a CLTensor
+ unmap(tensor);
+ }
+ catch(const std::ifstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Loading NPY file: %s", e.what());
+ }
+ }
+
+private:
+ std::ifstream _fs;
+ std::vector<unsigned long> _shape;
+ bool _fortran_order;
+ std::string _typestring;
+};
+
/** Template helper function to save a tensor image to a PPM file.
*
* @note Only U8 and RGB888 formats supported.
@@ -378,7 +654,7 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename)
fs << "P6\n"
<< width << " " << height << " 255\n";
- // Map buffer if creating a CLTensor
+ // Map buffer if creating a CLTensor/GCTensor
map(tensor, true);
switch(tensor.info()->format())
@@ -421,7 +697,7 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename)
ARM_COMPUTE_ERROR("Unsupported format");
}
- // Unmap buffer if creating a CLTensor
+ // Unmap buffer if creating a CLTensor/GCTensor
unmap(tensor);
}
catch(const std::ofstream::failure &e)
@@ -430,6 +706,83 @@ void save_to_ppm(T &tensor, const std::string &ppm_filename)
}
}
+/** Template helper function to save a tensor image to a NPY file.
+ *
+ * @note Only F32 data type supported.
+ * @note Only works with 2D tensors.
+ * @note If the input tensor is a CLTensor, the function maps and unmaps the image
+ *
+ * @param[in] tensor The tensor to save as NPY file
+ * @param[in] npy_filename Filename of the file to create.
+ * @param[in] fortran_order If true, save matrix in fortran order.
+ */
+template <typename T>
+void save_to_npy(T &tensor, const std::string &npy_filename, bool fortran_order)
+{
+ ARM_COMPUTE_ERROR_ON_DATA_TYPE_NOT_IN(&tensor, arm_compute::DataType::F32);
+ ARM_COMPUTE_ERROR_ON(tensor.info()->num_dimensions() > 2);
+
+ std::ofstream fs;
+
+ try
+ {
+ fs.exceptions(std::ofstream::failbit | std::ofstream::badbit | std::ofstream::eofbit);
+ fs.open(npy_filename, std::ios::out | std::ios::binary);
+
+ const unsigned int width = tensor.info()->tensor_shape()[0];
+ const unsigned int height = tensor.info()->tensor_shape()[1];
+ std::vector<npy::ndarray_len_t> shape(2);
+
+ if(!fortran_order)
+ {
+ shape[0] = height, shape[1] = width;
+ }
+ else
+ {
+ shape[0] = width, shape[1] = height;
+ }
+
+ // Map buffer if creating a CLTensor
+ map(tensor, true);
+
+ switch(tensor.info()->data_type())
+ {
+ case arm_compute::DataType::F32:
+ {
+ std::vector<float> tmp; /* Used only to get the typestring */
+ npy::Typestring typestring_o{ tmp };
+ std::string typestring = typestring_o.str();
+
+ std::ofstream stream(npy_filename, std::ofstream::binary);
+ npy::write_header(stream, typestring, fortran_order, shape);
+
+ arm_compute::Window window;
+ window.set(arm_compute::Window::DimX, arm_compute::Window::Dimension(0, width, 1));
+ window.set(arm_compute::Window::DimY, arm_compute::Window::Dimension(0, height, 1));
+
+ arm_compute::Iterator in(&tensor, window);
+
+ arm_compute::execute_window_loop(window, [&](const arm_compute::Coordinates & id)
+ {
+ stream.write(reinterpret_cast<const char *>(in.ptr()), sizeof(float));
+ },
+ in);
+
+ break;
+ }
+ default:
+ ARM_COMPUTE_ERROR("Unsupported format");
+ }
+
+ // Unmap buffer if creating a CLTensor
+ unmap(tensor);
+ }
+ catch(const std::ofstream::failure &e)
+ {
+ ARM_COMPUTE_ERROR("Writing %s: (%s)", npy_filename.c_str(), e.what());
+ }
+}
+
/** Load the tensor with pre-trained data from a binary file
*
* @param[in] tensor The tensor to be filled. Data type supported: F32.
@@ -453,7 +806,7 @@ void load_trained_data(T &tensor, const std::string &filename)
throw std::runtime_error("Could not load binary data: " + filename);
}
- // Map buffer if creating a CLTensor
+ // Map buffer if creating a CLTensor/GCTensor
map(tensor, true);
Window window;
@@ -473,10 +826,8 @@ void load_trained_data(T &tensor, const std::string &filename)
},
in);
-#ifdef ARM_COMPUTE_CL
- // Unmap buffer if creating a CLTensor
+ // Unmap buffer if creating a CLTensor/GCTensor
unmap(tensor);
-#endif /* ARM_COMPUTE_CL */
}
catch(const std::ofstream::failure &e)
{
@@ -484,56 +835,51 @@ void load_trained_data(T &tensor, const std::string &filename)
}
}
-/** Obtain numpy type string from DataType.
- *
- * @param[in] data_type Data type.
- *
- * @return numpy type string.
- */
-inline std::string get_typestring(DataType data_type)
+template <typename T>
+void fill_random_tensor(T &tensor, float lower_bound, float upper_bound)
{
- // Check endianness
- const unsigned int i = 1;
- const char *c = reinterpret_cast<const char *>(&i);
- std::string endianness;
- if(*c == 1)
- {
- endianness = std::string("<");
- }
- else
- {
- endianness = std::string(">");
- }
- const std::string no_endianness("|");
+ std::random_device rd;
+ std::mt19937 gen(rd());
- switch(data_type)
+ TensorShape shape(tensor.info()->dimension(0), tensor.info()->dimension(1));
+
+ Window window;
+ window.set(Window::DimX, Window::Dimension(0, shape.x(), 1));
+ window.set(Window::DimY, Window::Dimension(0, shape.y(), 1));
+
+ map(tensor, true);
+
+ Iterator it(&tensor, window);
+
+ switch(tensor.info()->data_type())
{
- case DataType::U8:
- return no_endianness + "u" + support::cpp11::to_string(sizeof(uint8_t));
- case DataType::S8:
- return no_endianness + "i" + support::cpp11::to_string(sizeof(int8_t));
- case DataType::U16:
- return endianness + "u" + support::cpp11::to_string(sizeof(uint16_t));
- case DataType::S16:
- return endianness + "i" + support::cpp11::to_string(sizeof(int16_t));
- case DataType::U32:
- return endianness + "u" + support::cpp11::to_string(sizeof(uint32_t));
- case DataType::S32:
- return endianness + "i" + support::cpp11::to_string(sizeof(int32_t));
- case DataType::U64:
- return endianness + "u" + support::cpp11::to_string(sizeof(uint64_t));
- case DataType::S64:
- return endianness + "i" + support::cpp11::to_string(sizeof(int64_t));
- case DataType::F32:
- return endianness + "f" + support::cpp11::to_string(sizeof(float));
- case DataType::F64:
- return endianness + "f" + support::cpp11::to_string(sizeof(double));
- case DataType::SIZET:
- return endianness + "u" + support::cpp11::to_string(sizeof(size_t));
+ case arm_compute::DataType::F32:
+ {
+ std::uniform_real_distribution<float> dist(lower_bound, upper_bound);
+
+ execute_window_loop(window, [&](const Coordinates & id)
+ {
+ *reinterpret_cast<float *>(it.ptr()) = dist(gen);
+ },
+ it);
+
+ break;
+ }
default:
- ARM_COMPUTE_ERROR("NOT SUPPORTED!");
+ {
+ ARM_COMPUTE_ERROR("Unsupported format");
+ }
}
+
+ unmap(tensor);
}
+
+template <typename T>
+void init_sgemm_output(T &dst, T &src0, T &src1, arm_compute::DataType dt)
+{
+ dst.allocator()->init(TensorInfo(TensorShape(src1.info()->dimension(0), src0.info()->dimension(1)), 1, dt));
+}
+
} // namespace utils
} // namespace arm_compute
#endif /* __UTILS_UTILS_H__*/