summaryrefslogtreecommitdiff
path: root/inference-engine/samples/lenet_network_graph_builder
diff options
context:
space:
mode:
Diffstat (limited to 'inference-engine/samples/lenet_network_graph_builder')
-rw-r--r--inference-engine/samples/lenet_network_graph_builder/CMakeLists.txt37
-rw-r--r--inference-engine/samples/lenet_network_graph_builder/LeNet.binbin0 -> 1724320 bytes
-rw-r--r--inference-engine/samples/lenet_network_graph_builder/README.md54
-rw-r--r--inference-engine/samples/lenet_network_graph_builder/lenet_network_graph_builder.hpp90
-rw-r--r--inference-engine/samples/lenet_network_graph_builder/main.cpp332
5 files changed, 513 insertions, 0 deletions
diff --git a/inference-engine/samples/lenet_network_graph_builder/CMakeLists.txt b/inference-engine/samples/lenet_network_graph_builder/CMakeLists.txt
new file mode 100644
index 000000000..aab478866
--- /dev/null
+++ b/inference-engine/samples/lenet_network_graph_builder/CMakeLists.txt
@@ -0,0 +1,37 @@
+# Copyright (C) 2018 Intel Corporation
+# SPDX-License-Identifier: Apache-2.0
+#
+
+cmake_minimum_required(VERSION 2.8)
+
+set (TARGET_NAME "lenet_network_graph_builder")
+
+file (GLOB MAIN_SRC
+ ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp
+ )
+
+file (GLOB MAIN_HEADERS
+ ${CMAKE_CURRENT_SOURCE_DIR}/*.h
+ )
+
+# Create named folders for the sources within the .vcproj
+# Empty name lists them directly under the .vcproj
+source_group("src" FILES ${MAIN_SRC})
+source_group("include" FILES ${MAIN_HEADERS})
+
+
+link_directories(${LIB_FOLDER})
+
+# Create library file from sources.
+add_executable(${TARGET_NAME} ${MAIN_SRC} ${MAIN_HEADERS})
+
+add_dependencies(${TARGET_NAME} gflags)
+
+set_target_properties(${TARGET_NAME} PROPERTIES "CMAKE_CXX_FLAGS" "${CMAKE_CXX_FLAGS} -fPIE"
+ COMPILE_PDB_NAME ${TARGET_NAME})
+
+target_link_libraries(${TARGET_NAME} ${InferenceEngine_LIBRARIES} gflags format_reader)
+
+if(UNIX)
+ target_link_libraries( ${TARGET_NAME} ${LIB_DL} pthread)
+endif() \ No newline at end of file
diff --git a/inference-engine/samples/lenet_network_graph_builder/LeNet.bin b/inference-engine/samples/lenet_network_graph_builder/LeNet.bin
new file mode 100644
index 000000000..7ce66d0ba
--- /dev/null
+++ b/inference-engine/samples/lenet_network_graph_builder/LeNet.bin
Binary files differ
diff --git a/inference-engine/samples/lenet_network_graph_builder/README.md b/inference-engine/samples/lenet_network_graph_builder/README.md
new file mode 100644
index 000000000..d7fdfb792
--- /dev/null
+++ b/inference-engine/samples/lenet_network_graph_builder/README.md
@@ -0,0 +1,54 @@
+# Lenet Number Classifications Network using Graph Builder API
+
+This sample demonstrates how to execute inference using Inference Engine Graph Builder API to build a network on example of the LeNet classifications network.
+XML file is not required for network building now. Inference Engine Graph Builder API allows building of a network "on the fly" from source code. The sample uses 1-channel ubyte pictures as input.
+<br>
+
+## Running
+
+Running the application with the <code>-h</code> option yields the following usage message:
+```sh
+./lenet_network_graph_builder -h
+InferenceEngine:
+ API version ............ <version>
+ Build .................. <number>
+
+lenet_network_graph_builder [OPTION]
+Options:
+
+ -h Print a usage message.
+ -m "<path>" Path to a .bin file with weights for trained model
+ -i "<path>" Required. Path to image or folder with images
+ -d "<device>" Specify the target device to infer on this. Sample will look for a suitable plugin for device specified(default value is CPU)
+ -pp "<path>" Path to a plugin folder
+ -pc Enables per-layer performance report
+ -nt "<integer>" Number of top results (default 10)
+ -ni "<integer>" Number of iterations (default 1)
+
+```
+
+Running the application with empty list of options yields the usage message given above.
+
+For example, to do inference of an ubyte image on a GPU run the following command:
+```sh
+./lenet_network_graph_builder -i <path_to_image> -m <path_to_weights_file> -d GPU
+```
+
+### Outputs
+
+By default the application outputs top-10 inference results for each infer request.
+In addition to this information it will provide throughput value measured in frames per seconds.
+
+### How it works
+
+Upon the start-up the sample application reads command line parameters and loads a network and an image to the Inference
+Engine plugin. When inference is done, the application creates an
+output image and outputs data to the standard output stream.
+
+Upon the start-up the sample reads command line parameters and builds a network using Graph Builder API and passed weights file.
+Then, the application loads built network and an image to the Inference Engine plugin.
+
+When inference is done, the application outputs inference results to the standard output stream.
+
+## See Also
+* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
diff --git a/inference-engine/samples/lenet_network_graph_builder/lenet_network_graph_builder.hpp b/inference-engine/samples/lenet_network_graph_builder/lenet_network_graph_builder.hpp
new file mode 100644
index 000000000..7cb59e2bd
--- /dev/null
+++ b/inference-engine/samples/lenet_network_graph_builder/lenet_network_graph_builder.hpp
@@ -0,0 +1,90 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <string>
+#include <vector>
+#include <gflags/gflags.h>
+#include <iostream>
+
+#ifdef _WIN32
+#include <os/windows/w_dirent.h>
+#else
+#include <dirent.h>
+#endif
+
+#define DEFAULT_PATH_P "./lib"
+
+/// @brief message for help argument
+static const char help_message[] = "Print a usage message";
+
+/// @brief message for images argument
+static const char input_message[] = "Required. Path to image or folder with images";
+
+/// @brief message for model argument
+static const char model_message[] = "Path to an .bin file with weights for trained model";
+
+/// @brief message for assigning cnn calculation to device
+static const char target_device_message[] = "Specify the target device to infer on this. " \
+ "Sample will look for a suitable plugin for device specified" \
+ "(default value is CPU)";
+
+/// @brief message for plugin_path argument
+static const char plugin_path_message[] = "Path to a plugin folder";
+
+/// @brief message for performance counters
+static const char performance_counter_message[] = "Enables per-layer performance report";
+
+/// @brief message for top results number
+static const char ntop_message[] = "Number of top results (default 10)";
+
+/// @brief message for iterations count
+static const char iterations_count_message[] = "Number of iterations (default 1)";
+
+/// \brief Define flag for showing help message <br>
+DEFINE_bool(h, false, help_message);
+
+/// \brief Define parameter for set weight file <br>
+/// It is a parameter
+DEFINE_string(m, "", model_message);
+
+/// \brief Define parameter for set image file <br>
+/// It is a required parameter
+DEFINE_string(i, "", input_message);
+
+/// \brief device the target device to infer on <br>
+DEFINE_string(d, "CPU", target_device_message);
+
+/// \brief Define parameter for set path to plugins <br>
+/// Default is ./lib
+DEFINE_string(pp, "", plugin_path_message);
+
+/// @brief Enable per-layer performance report
+DEFINE_bool(pc, false, performance_counter_message);
+
+/// @brief Top results number (default 10) <br>
+DEFINE_int32(nt, 10, ntop_message);
+
+/// @brief Iterations count (default 1)
+DEFINE_int32(ni, 1, iterations_count_message);
+
+/**
+ * \brief This function show a help message
+ */
+static void showUsage() {
+ std::cout << std::endl;
+ std::cout << "lenet_network_graph_builder [OPTION]" << std::endl;
+ std::cout << "Options:" << std::endl;
+ std::cout << std::endl;
+ std::cout << " -h " << help_message << std::endl;
+ std::cout << " -m \"<path>\" " << model_message << std::endl;
+ std::cout << " -i \"<path>\" " << input_message << std::endl;
+ std::cout << " -d \"<device>\" " << target_device_message << std::endl;
+ std::cout << " -pp \"<path>\" " << plugin_path_message << std::endl;
+ std::cout << " -pc " << performance_counter_message << std::endl;
+ std::cout << " -nt \"<integer>\" " << ntop_message << std::endl;
+ std::cout << " -ni \"<integer>\" " << iterations_count_message << std::endl;
+}
+
diff --git a/inference-engine/samples/lenet_network_graph_builder/main.cpp b/inference-engine/samples/lenet_network_graph_builder/main.cpp
new file mode 100644
index 000000000..cd9031aa0
--- /dev/null
+++ b/inference-engine/samples/lenet_network_graph_builder/main.cpp
@@ -0,0 +1,332 @@
+// Copyright (C) 2018 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <fstream>
+#include <vector>
+#include <string>
+#include <memory>
+
+#include <inference_engine.hpp>
+#include <ie_builders.hpp>
+#include <ie_utils.hpp>
+#include <format_reader_ptr.h>
+
+#include <samples/common.hpp>
+#include <samples/slog.hpp>
+#include <samples/args_helper.hpp>
+
+#include <gflags/gflags.h>
+#include "lenet_network_graph_builder.hpp"
+
+using namespace InferenceEngine;
+
+bool ParseAndCheckCommandLine(int argc, char *argv[]) {
+ slog::info << "Parsing input parameters" << slog::endl;
+
+ gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true);
+ if (FLAGS_h) {
+ showUsage();
+ return false;
+ }
+
+ if (FLAGS_ni <= 0) {
+ throw std::logic_error("Incorrect value for ni argument. It should be more than 0");
+ }
+
+ if (FLAGS_nt <= 0 || FLAGS_nt > 10) {
+ throw std::logic_error("Incorrect value for nt argument. It should be more than 0 and less than 10");
+ }
+
+ return true;
+}
+
+void readFile(const std::string &file_name, void *buffer, size_t maxSize) {
+ std::ifstream inputFile;
+
+ inputFile.open(file_name, std::ios::binary | std::ios::in);
+ if (!inputFile.is_open()) {
+ throw std::logic_error("cannot open file weight file");
+ }
+ if (!inputFile.read(reinterpret_cast<char *>(buffer), maxSize)) {
+ inputFile.close();
+ throw std::logic_error("cannot read bytes from weight file");
+ }
+
+ inputFile.close();
+}
+
+TBlob<uint8_t>::CPtr ReadWeights(std::string filepath) {
+ std::ifstream weightFile(filepath, std::ifstream::ate | std::ifstream::binary);
+ int64_t fileSize = weightFile.tellg();
+
+ if (fileSize < 0) {
+ throw std::logic_error("Incorrect weight file");
+ }
+
+ size_t ulFileSize = static_cast<size_t>(fileSize);
+
+ TBlob<uint8_t>::Ptr weightsPtr(new TBlob<uint8_t>(Precision::FP32, C, {ulFileSize}));
+ weightsPtr->allocate();
+ readFile(filepath, weightsPtr->buffer(), ulFileSize);
+
+ return weightsPtr;
+}
+
+/**
+ * @brief The entry point for inference engine automatic squeezenet networt builder sample
+ * @file squeezenet networt builder/main.cpp
+ * @example squeezenet networt builder/main.cpp
+ */
+int main(int argc, char *argv[]) {
+ try {
+ slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << slog::endl;
+
+ if (!ParseAndCheckCommandLine(argc, argv)) {
+ return 0;
+ }
+
+ /** This vector stores paths to the processed images **/
+ std::vector<std::string> images;
+ parseInputFilesArguments(images);
+ if (images.empty()) {
+ throw std::logic_error("No suitable images were found");
+ }
+
+ // --------------------------- 1. Load Plugin for inference engine -------------------------------------
+ slog::info << "Loading plugin" << slog::endl;
+ InferencePlugin plugin = PluginDispatcher({FLAGS_pp, "../../../lib/intel64", ""}).getPluginByDevice(FLAGS_d);
+ printPluginVersion(plugin, std::cout);
+
+ /** Per layer metrics **/
+ if (FLAGS_pc) {
+ plugin.SetConfig({ { PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES } });
+ }
+ // -----------------------------------------------------------------------------------------------------
+
+ //--------------------------- 2. Create network using graph builder ------------------------------------
+ TBlob<uint8_t>::CPtr weightsPtr = ReadWeights(FLAGS_m);
+
+ Builder::Network builder("LeNet");
+ size_t layerId = builder.addLayer(Builder::InputLayer("data").setPort(Port({1, 1, 28, 28})));
+ auto ptrWeights = make_shared_blob(TensorDesc(Precision::FP32, {500}, Layout::C),
+ weightsPtr->cbuffer().as<float *>());
+ auto ptrBiases = make_shared_blob(TensorDesc(Precision::FP32, {20}, Layout::C),
+ weightsPtr->cbuffer().as<float *>() + 500);
+ layerId = builder.addLayer({{layerId}}, Builder::ConvolutionLayer("conv1").setKernel({5, 5}).setDilation({1, 1})
+ .setGroup(1).setStrides({1, 1}).setOutDepth(20).setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0})
+ .setWeights(ptrWeights).setBiases(ptrBiases));
+ layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer("pool1").setExcludePad(true).setKernel({2, 2})
+ .setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0})
+ .setPoolingType(Builder::PoolingLayer::PoolingType::MAX)
+ .setRoundingType(Builder::PoolingLayer::RoundingType::CEIL).setStrides({2, 2}));
+ ptrWeights = make_shared_blob(TensorDesc(Precision::FP32, {25000}, Layout::C),
+ weightsPtr->cbuffer().as<float *>() + 520);
+ ptrBiases = make_shared_blob(TensorDesc(Precision::FP32, {50}, Layout::C),
+ weightsPtr->cbuffer().as<float *>() + 25520);
+ layerId = builder.addLayer({{layerId}}, Builder::ConvolutionLayer("conv2").setDilation({1, 1}).setGroup(1)
+ .setKernel({5, 5}).setOutDepth(50).setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0})
+ .setStrides({1, 1}).setWeights(ptrWeights).setBiases(ptrBiases));
+ layerId = builder.addLayer({{layerId}}, Builder::PoolingLayer("pool2").setExcludePad(true).setKernel({2, 2})
+ .setPaddingsBegin({0, 0}).setPaddingsEnd({0, 0}).setPoolingType(Builder::PoolingLayer::PoolingType::MAX)
+ .setRoundingType(Builder::PoolingLayer::RoundingType::CEIL).setStrides({2, 2}));
+ ptrWeights = make_shared_blob(TensorDesc(Precision::FP32, {400000}, Layout::C),
+ weightsPtr->cbuffer().as<float *>() + 102280 / 4);
+ ptrBiases = make_shared_blob(TensorDesc(Precision::FP32, {500}, Layout::C),
+ weightsPtr->cbuffer().as<float *>() + 1702280 / 4);
+ layerId = builder.addLayer({{layerId}}, Builder::FullyConnectedLayer("ip1").setOutputNum(500)
+ .setWeights(ptrWeights).setBiases(ptrBiases));
+ layerId = builder.addLayer({{layerId}}, Builder::ReLULayer("relu1").setNegativeSlope(0.0f));
+ ptrWeights = make_shared_blob(TensorDesc(Precision::FP32, {5000}, Layout::C),
+ weightsPtr->cbuffer().as<float *>() + 1704280 / 4);
+ ptrBiases = make_shared_blob(TensorDesc(Precision::FP32, {10}, Layout::C),
+ weightsPtr->cbuffer().as<float *>() + 1724280 / 4);
+ layerId = builder.addLayer({{layerId}}, Builder::FullyConnectedLayer("ip2").setOutputNum(10)
+ .setWeights(ptrWeights).setBiases(ptrBiases));
+ layerId = builder.addLayer({{layerId}}, Builder::SoftMaxLayer("prob").setAxis(1));
+ size_t outputId = builder.addLayer({PortInfo(layerId)}, Builder::OutputLayer("sf_out"));
+
+ CNNNetwork network{Builder::convertToICNNNetwork(builder.build())};
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 3. Configure input & output ---------------------------------------------
+ // --------------------------- Prepare input blobs -----------------------------------------------------
+ slog::info << "Preparing input blobs" << slog::endl;
+
+ InputsDataMap inputInfo = network.getInputsInfo();
+ if (inputInfo.size() != 1) {
+ throw std::logic_error("Sample supports topologies only with 1 input");
+ }
+
+ auto inputInfoItem = *inputInfo.begin();
+
+ /** Specifying the precision and layout of input data provided by the user.
+ * This should be called before load of the network to the plugin **/
+ inputInfoItem.second->setPrecision(Precision::FP32);
+ inputInfoItem.second->setLayout(Layout::NCHW);
+
+ std::vector<std::shared_ptr<unsigned char>> imagesData;
+ for (auto & i : images) {
+ FormatReader::ReaderPtr reader(i.c_str());
+ if (reader.get() == nullptr) {
+ slog::warn << "Image " + i + " cannot be read!" << slog::endl;
+ continue;
+ }
+ /** Store image data **/
+ std::shared_ptr<unsigned char> data(
+ reader->getData(inputInfoItem.second->getTensorDesc().getDims()[3],
+ inputInfoItem.second->getTensorDesc().getDims()[2]));
+ if (data.get() != nullptr) {
+ imagesData.push_back(data);
+ }
+ }
+
+ if (imagesData.empty()) {
+ throw std::logic_error("Valid input images were not found!");
+ }
+
+ /** Setting batch size using image count **/
+ network.setBatchSize(imagesData.size());
+ size_t batchSize = network.getBatchSize();
+ slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl;
+
+ // --------------------------- Prepare output blobs -----------------------------------------------------
+ slog::info << "Checking that the outputs are as the demo expects" << slog::endl;
+ OutputsDataMap outputInfo(network.getOutputsInfo());
+ std::string firstOutputName;
+
+ for (auto & item : outputInfo) {
+ if (firstOutputName.empty()) {
+ firstOutputName = item.first;
+ }
+ DataPtr outputData = item.second;
+ if (!outputData) {
+ throw std::logic_error("output data pointer is not valid");
+ }
+
+ item.second->setPrecision(Precision::FP32);
+ }
+
+ if (outputInfo.size() != 1) {
+ throw std::logic_error("This demo accepts networks having only one output");
+ }
+
+ DataPtr& output = outputInfo.begin()->second;
+ auto outputName = outputInfo.begin()->first;
+
+ const SizeVector outputDims = output->getTensorDesc().getDims();
+ const int classCount = outputDims[1];
+
+ if (classCount > 10) {
+ throw std::logic_error("Incorrect number of output classes for LeNet network");
+ }
+
+ if (outputDims.size() != 2) {
+ throw std::logic_error("Incorrect output dimensions for LeNet");
+ }
+ output->setPrecision(Precision::FP32);
+ output->setLayout(Layout::NC);
+
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 4. Loading model to the plugin ------------------------------------------
+ slog::info << "Loading model to the plugin" << slog::endl;
+ ExecutableNetwork exeNetwork = plugin.LoadNetwork(network, {});
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 5. Create infer request -------------------------------------------------
+ InferRequest infer_request = exeNetwork.CreateInferRequest();
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 6. Prepare input --------------------------------------------------------
+ /** Iterate over all the input blobs **/
+ for (const auto & item : inputInfo) {
+ /** Creating input blob **/
+ Blob::Ptr input = infer_request.GetBlob(item.first);
+
+ /** Filling input tensor with images. First b channel, then g and r channels **/
+ size_t num_channels = input->getTensorDesc().getDims()[1];
+ size_t image_size = input->getTensorDesc().getDims()[2] * input->getTensorDesc().getDims()[3];
+
+ auto data = input->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
+
+ /** Iterate over all input images **/
+ for (size_t image_id = 0; image_id < imagesData.size(); ++image_id) {
+ /** Iterate over all pixel in image (b,g,r) **/
+ for (size_t pid = 0; pid < image_size; pid++) {
+ /** Iterate over all channels **/
+ for (size_t ch = 0; ch < num_channels; ++ch) {
+ /** [images stride + channels stride + pixel id ] all in bytes **/
+ data[image_id * image_size * num_channels + ch * image_size + pid ] = imagesData.at(image_id).get()[pid*num_channels + ch];
+ }
+ }
+ }
+ }
+ inputInfo = {};
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 7. Do inference ---------------------------------------------------------
+ typedef std::chrono::high_resolution_clock Time;
+ typedef std::chrono::duration<double, std::ratio<1, 1000>> ms;
+ typedef std::chrono::duration<float> fsec;
+
+ double total = 0.0;
+ /** Start inference & calc performance **/
+ for (int iter = 0; iter < FLAGS_ni; ++iter) {
+ auto t0 = Time::now();
+ infer_request.Infer();
+ auto t1 = Time::now();
+ fsec fs = t1 - t0;
+ ms d = std::chrono::duration_cast<ms>(fs);
+ total += d.count();
+ }
+ // -----------------------------------------------------------------------------------------------------
+
+ // --------------------------- 8. Process output -------------------------------------------------------
+ slog::info << "Processing output blobs" << slog::endl;
+
+ const Blob::Ptr outputBlob = infer_request.GetBlob(firstOutputName);
+ auto outputData = outputBlob->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
+
+ /** Validating -nt value **/
+ const int resultsCnt = outputBlob->size() / batchSize;
+ if (FLAGS_nt > resultsCnt || FLAGS_nt < 1) {
+ slog::warn << "-nt " << FLAGS_nt << " is not available for this network (-nt should be less than " \
+ << resultsCnt+1 << " and more than 0)\n will be used maximal value : " << resultsCnt;
+ FLAGS_nt = resultsCnt;
+ }
+
+ /** This vector stores id's of top N results **/
+ std::vector<unsigned> results;
+ TopResults(FLAGS_nt, *outputBlob, results);
+
+ std::cout << std::endl << "Top " << FLAGS_nt << " results:" << std::endl << std::endl;
+
+ /** Print the result iterating over each batch **/
+ for (int image_id = 0; image_id < batchSize; ++image_id) {
+ std::cout << "Image " << images[image_id] << std::endl << std::endl;
+ for (size_t id = image_id * FLAGS_nt, cnt = 0; cnt < FLAGS_nt; ++cnt, ++id) {
+ std::cout.precision(7);
+ /** Getting probability for resulting class **/
+ const auto result = outputData[results[id] + image_id*(outputBlob->size() / batchSize)];
+ std::cout << std::left << std::fixed << "Number: " << results[id] << "; Probability: " << result << std::endl;
+ }
+ std::cout << std::endl;
+ }
+ // -----------------------------------------------------------------------------------------------------
+ std::cout << std::endl << "total inference time: " << total << std::endl;
+ std::cout << "Average running time of one iteration: " << total / static_cast<double>(FLAGS_ni) << " ms" << std::endl;
+ std::cout << std::endl << "Throughput: " << 1000 * static_cast<double>(FLAGS_ni) * batchSize / total << " FPS" << std::endl;
+ std::cout << std::endl;
+ // -----------------------------------------------------------------------------------------------------
+
+ /** Show performance results **/
+ if (FLAGS_pc) {
+ printPerformanceCounts(infer_request, std::cout);
+ }
+ } catch (const std::exception &ex) {
+ slog::err << ex.what() << slog::endl;
+ return 3;
+ }
+ return 0;
+} \ No newline at end of file