/* // Copyright (c) 2018 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "object_detection_sample_ssd.h" using namespace InferenceEngine; bool ParseAndCheckCommandLine(int argc, char *argv[]) { // ---------------------------Parsing and validation of input args-------------------------------------- slog::info << "Parsing input parameters" << slog::endl; gflags::ParseCommandLineNonHelpFlags(&argc, &argv, true); if (FLAGS_h) { showUsage(); return false; } if (FLAGS_ni < 1) { throw std::logic_error("Parameter -ni should be greater than 0 (default: 1)"); } if (FLAGS_i.empty()) { throw std::logic_error("Parameter -i is not set"); } if (FLAGS_m.empty()) { throw std::logic_error("Parameter -m is not set"); } return true; } /** * \brief The entry point for the Inference Engine object_detection sample application * \file object_detection_sample_ssd/main.cpp * \example object_detection_sample_ssd/main.cpp */ int main(int argc, char *argv[]) { try { /** This sample covers certain topology and cannot be generalized for any object detection one **/ slog::info << "InferenceEngine: " << GetInferenceEngineVersion() << "\n"; // --------------------------- 1. Parsing and validation of input args --------------------------------- if (!ParseAndCheckCommandLine(argc, argv)) { return 0; } // ----------------------------------------------------------------------------------------------------- // --------------------------- 2. Read input ----------------------------------------------------------- /** This vector stores paths to the processed images **/ std::vector images; parseImagesArguments(images); if (images.empty()) throw std::logic_error("No suitable images were found"); // ----------------------------------------------------------------------------------------------------- // --------------------------- 3. Load Plugin for inference engine ------------------------------------- slog::info << "Loading plugin" << slog::endl; InferencePlugin plugin = PluginDispatcher({ FLAGS_pp, "../../../lib/intel64" , "" }).getPluginByDevice(FLAGS_d); /*If CPU device, load default library with extensions that comes with the product*/ if (FLAGS_d.find("CPU") != std::string::npos) { /** * cpu_extensions library is compiled from "extension" folder containing * custom MKLDNNPlugin layer implementations. These layers are not supported * by mkldnn, but they can be useful for inferring custom topologies. **/ plugin.AddExtension(std::make_shared()); } if (!FLAGS_l.empty()) { // CPU(MKLDNN) extensions are loaded as a shared library and passed as a pointer to base extension IExtensionPtr extension_ptr = make_so_pointer(FLAGS_l); plugin.AddExtension(extension_ptr); slog::info << "CPU Extension loaded: " << FLAGS_l << slog::endl; } if (!FLAGS_c.empty()) { // clDNN Extensions are loaded from an .xml description and OpenCL kernel files plugin.SetConfig({ { PluginConfigParams::KEY_CONFIG_FILE, FLAGS_c } }); slog::info << "GPU Extension loaded: " << FLAGS_c << slog::endl; } /** Setting plugin parameter for per layer metrics **/ if (FLAGS_pc) { plugin.SetConfig({ { PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES } }); } /** Printing plugin version **/ printPluginVersion(plugin, std::cout); // ----------------------------------------------------------------------------------------------------- // --------------------------- 4. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------ std::string binFileName = fileNameNoExt(FLAGS_m) + ".bin"; slog::info << "Loading network files:" "\n\t" << FLAGS_m << "\n\t" << binFileName << slog::endl; CNNNetReader networkReader; /** Read network model **/ networkReader.ReadNetwork(FLAGS_m); /** Extract model name and load weights **/ networkReader.ReadWeights(binFileName); CNNNetwork network = networkReader.getNetwork(); // ----------------------------------------------------------------------------------------------------- // --------------------------- 5. Prepare input blobs -------------------------------------------------- slog::info << "Preparing input blobs" << slog::endl; /** Taking information about all topology inputs **/ InputsDataMap inputsInfo(network.getInputsInfo()); /** SSD network has one input and one output **/ if (inputsInfo.size() != 1 && inputsInfo.size() != 2) throw std::logic_error("Sample supports topologies only with 1 or 2 inputs"); /** * Some networks have SSD-like output format (ending with DetectionOutput layer), but * having 2 inputs as Faster-RCNN: one for image and one for "image info". * * Although object_datection_sample_ssd's main task is to support clean SSD, it could score * the networks with two inputs as well. For such networks imInfoInputName will contain the "second" input name. */ std::string imageInputName, imInfoInputName; InputInfo::Ptr inputInfo = inputsInfo.begin()->second; SizeVector inputImageDims; /** Stores input image **/ /** Iterating over all input blobs **/ for (auto & item : inputsInfo) { /** Working with first input tensor that stores image **/ if (item.second->getInputData()->getTensorDesc().getDims().size() == 4) { imageInputName = item.first; slog::info << "Batch size is " << std::to_string(networkReader.getNetwork().getBatchSize()) << slog::endl; /** Creating first input blob **/ Precision inputPrecision = Precision::U8; item.second->setPrecision(inputPrecision); } else if (item.second->getInputData()->getTensorDesc().getDims().size() == 2) { imInfoInputName = item.first; Precision inputPrecision = Precision::FP32; item.second->setPrecision(inputPrecision); if ((item.second->getTensorDesc().getDims()[1] != 3 && item.second->getTensorDesc().getDims()[1] != 6) || item.second->getTensorDesc().getDims()[0] != 1) { throw std::logic_error("Invalid input info. Should be 3 or 6 values length"); } } } // ----------------------------------------------------------------------------------------------------- // --------------------------- 6. Prepare output blobs ------------------------------------------------- slog::info << "Preparing output blobs" << slog::endl; OutputsDataMap outputsInfo(network.getOutputsInfo()); std::string outputName; DataPtr outputInfo; for (const auto& out : outputsInfo) { if (out.second->creatorLayer.lock()->type == "DetectionOutput") { outputName = out.first; outputInfo = out.second; } } if (outputInfo == nullptr) { throw std::logic_error("Can't find a DetectionOutput layer in the topology"); } const SizeVector outputDims = outputInfo->getTensorDesc().getDims(); const int maxProposalCount = outputDims[2]; const int objectSize = outputDims[3]; if (objectSize != 7) { throw std::logic_error("Output item should have 7 as a last dimension"); } if (outputDims.size() != 4) { throw std::logic_error("Incorrect output dimensions for SSD model"); } /** Set the precision of output data provided by the user, should be called before load of the network to the plugin **/ outputInfo->setPrecision(Precision::FP32); // ----------------------------------------------------------------------------------------------------- // --------------------------- 7. Loading model to the plugin ------------------------------------------ slog::info << "Loading model to the plugin" << slog::endl; ExecutableNetwork executable_network = plugin.LoadNetwork(network, {}); // ----------------------------------------------------------------------------------------------------- // --------------------------- 8. Create infer request ------------------------------------------------- InferRequest infer_request = executable_network.CreateInferRequest(); // ----------------------------------------------------------------------------------------------------- // --------------------------- 9. Prepare input -------------------------------------------------------- /** Collect images data ptrs **/ std::vector> imagesData, originalImagesData; std::vector imageWidths, imageHeights; for (auto & i : images) { FormatReader::ReaderPtr reader(i.c_str()); if (reader.get() == nullptr) { slog::warn << "Image " + i + " cannot be read!" << slog::endl; continue; } /** Store image data **/ std::shared_ptr originalData(reader->getData()); std::shared_ptr data(reader->getData(inputInfo->getTensorDesc().getDims()[3], inputInfo->getTensorDesc().getDims()[2])); if (data.get() != nullptr) { originalImagesData.push_back(originalData); imagesData.push_back(data); imageWidths.push_back(reader->width()); imageHeights.push_back(reader->height()); } } if (imagesData.empty()) throw std::logic_error("Valid input images were not found!"); size_t batchSize = network.getBatchSize(); slog::info << "Batch size is " << std::to_string(batchSize) << slog::endl; if (batchSize != imagesData.size()) { slog::warn << "Number of images " + std::to_string(imagesData.size()) + \ " doesn't match batch size " + std::to_string(batchSize) << slog::endl; slog::warn << std::to_string(std::min(imagesData.size(), batchSize)) + \ " images will be processed" << slog::endl; batchSize = std::min(batchSize, imagesData.size()); } /** Creating input blob **/ Blob::Ptr imageInput = infer_request.GetBlob(imageInputName); /** Filling input tensor with images. First b channel, then g and r channels **/ size_t num_channels = imageInput->getTensorDesc().getDims()[1]; size_t image_size = imageInput->getTensorDesc().getDims()[3] * imageInput->getTensorDesc().getDims()[2]; unsigned char* data = static_cast(imageInput->buffer()); /** Iterate over all input images **/ for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) { /** Iterate over all pixel in image (b,g,r) **/ for (size_t pid = 0; pid < image_size; pid++) { /** Iterate over all channels **/ for (size_t ch = 0; ch < num_channels; ++ch) { /** [images stride + channels stride + pixel id ] all in bytes **/ data[image_id * image_size * num_channels + ch * image_size + pid] = imagesData.at(image_id).get()[pid*num_channels + ch]; } } } if (imInfoInputName != "") { Blob::Ptr input2 = infer_request.GetBlob(imInfoInputName); auto imInfoDim = inputsInfo.find(imInfoInputName)->second->getTensorDesc().getDims()[1]; /** Fill input tensor with values **/ float *p = input2->buffer().as::value_type*>(); for (size_t image_id = 0; image_id < std::min(imagesData.size(), batchSize); ++image_id) { p[image_id * imInfoDim + 0] = static_cast(inputsInfo[imageInputName]->getTensorDesc().getDims()[2]); p[image_id * imInfoDim + 1] = static_cast(inputsInfo[imageInputName]->getTensorDesc().getDims()[3]); for (int k = 2; k < imInfoDim; k++) { p[image_id * imInfoDim + k] = 1.0f; // all scale factors are set to 1.0 } } } // ----------------------------------------------------------------------------------------------------- // --------------------------- 10. Do inference --------------------------------------------------------- slog::info << "Start inference (" << FLAGS_ni << " iterations)" << slog::endl; typedef std::chrono::high_resolution_clock Time; typedef std::chrono::duration> ms; typedef std::chrono::duration fsec; double total = 0.0; /** Start inference & calc performance **/ for (int iter = 0; iter < FLAGS_ni; ++iter) { auto t0 = Time::now(); infer_request.Infer(); auto t1 = Time::now(); fsec fs = t1 - t0; ms d = std::chrono::duration_cast(fs); total += d.count(); } // ----------------------------------------------------------------------------------------------------- // --------------------------- 11. Process output ------------------------------------------------------- slog::info << "Processing output blobs" << slog::endl; const Blob::Ptr output_blob = infer_request.GetBlob(outputName); const float* detection = static_cast::value_type*>(output_blob->buffer()); std::vector > boxes(batchSize); std::vector > classes(batchSize); /* Each detection has image_id that denotes processed image */ for (int curProposal = 0; curProposal < maxProposalCount; curProposal++) { float image_id = detection[curProposal * objectSize + 0]; float label = detection[curProposal * objectSize + 1]; float confidence = detection[curProposal * objectSize + 2]; /* CPU and GPU plugins have difference in DetectionOutput layer, so we need both checks */ if (image_id < 0 || confidence == 0) { continue; } float xmin = detection[curProposal * objectSize + 3] * imageWidths[image_id]; float ymin = detection[curProposal * objectSize + 4] * imageHeights[image_id]; float xmax = detection[curProposal * objectSize + 5] * imageWidths[image_id]; float ymax = detection[curProposal * objectSize + 6] * imageHeights[image_id]; std::cout << "[" << curProposal << "," << label << "] element, prob = " << confidence << " (" << xmin << "," << ymin << ")-(" << xmax << "," << ymax << ")" << " batch id : " << image_id; if (confidence > 0.5) { /** Drawing only objects with >50% probability **/ classes[image_id].push_back(static_cast(label)); boxes[image_id].push_back(static_cast(xmin)); boxes[image_id].push_back(static_cast(ymin)); boxes[image_id].push_back(static_cast(xmax - xmin)); boxes[image_id].push_back(static_cast(ymax - ymin)); std::cout << " WILL BE PRINTED!"; } std::cout << std::endl; } for (size_t batch_id = 0; batch_id < batchSize; ++batch_id) { addRectangles(originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id], boxes[batch_id], classes[batch_id]); const std::string image_path = "out_" + std::to_string(batch_id) + ".bmp"; if (writeOutputBmp(image_path, originalImagesData[batch_id].get(), imageHeights[batch_id], imageWidths[batch_id])) { slog::info << "Image " + image_path + " created!" << slog::endl; } else { throw std::logic_error(std::string("Can't create a file: ") + image_path); } } // ----------------------------------------------------------------------------------------------------- std::cout << std::endl << "total inference time: " << total << std::endl; std::cout << "Average running time of one iteration: " << total / static_cast(FLAGS_ni) << " ms" << std::endl; std::cout << std::endl << "Throughput: " << 1000 * static_cast(FLAGS_ni) * batchSize / total << " FPS" << std::endl; std::cout << std::endl; /** Show performance results **/ if (FLAGS_pc) { printPerformanceCounts(infer_request, std::cout); } } catch (const std::exception& error) { slog::err << error.what() << slog::endl; return 1; } catch (...) { slog::err << "Unknown/internal exception happened." << slog::endl; return 1; } slog::info << "Execution successful" << slog::endl; return 0; }