summaryrefslogtreecommitdiff
path: root/inference-engine/ie_bridges
diff options
context:
space:
mode:
Diffstat (limited to 'inference-engine/ie_bridges')
-rw-r--r--inference-engine/ie_bridges/python/CMakeLists.txt85
-rw-r--r--inference-engine/ie_bridges/python/README.md57
-rw-r--r--inference-engine/ie_bridges/python/cmake/CopyIeLibs.cmake10
-rw-r--r--inference-engine/ie_bridges/python/cmake/FindCython.cmake25
-rw-r--r--inference-engine/ie_bridges/python/cmake/ReplicatePythonSourceTree.cmake7
-rw-r--r--inference-engine/ie_bridges/python/cmake/UseCython.cmake17
-rw-r--r--inference-engine/ie_bridges/python/docs/api_overview.md108
-rw-r--r--inference-engine/ie_bridges/python/inference_engine/CMakeLists.txt69
-rw-r--r--inference-engine/ie_bridges/python/inference_engine/__init__.py3
-rw-r--r--inference-engine/ie_bridges/python/inference_engine/ie_api_impl.hpp129
-rw-r--r--inference-engine/ie_bridges/python/sample/benchmark_app/README.md81
-rw-r--r--inference-engine/ie_bridges/python/sample/benchmark_app/benchmark.py204
-rw-r--r--inference-engine/ie_bridges/python/sample/benchmark_app/utils/benchmark_utils.py122
-rw-r--r--inference-engine/ie_bridges/python/sample/benchmark_app/utils/constants.py63
-rw-r--r--inference-engine/ie_bridges/python/sample/classification_sample.py6
-rw-r--r--inference-engine/ie_bridges/python/sample/classification_sample_async.py8
-rw-r--r--inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_classification_sample.py26
-rw-r--r--inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_object_detection_sample_ssd.py47
-rw-r--r--inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/classification_demo.ipynb463
-rw-r--r--inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/image_net_synset.txt1000
-rw-r--r--inference-engine/ie_bridges/python/sample/segmentation_sample.py154
-rw-r--r--inference-engine/ie_bridges/python/sample/style_transfer_sample.py7
-rw-r--r--inference-engine/ie_bridges/python/setup.py10
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/__init__.py0
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt36
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/__init__.py3
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/CMakeLists.txt37
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/__init__.py2
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pxd26
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pyx423
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.cpp330
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.hpp161
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl_defs.pxd97
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd (renamed from inference-engine/ie_bridges/python/inference_engine/ie_api.pxd)21
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx (renamed from inference-engine/ie_bridges/python/inference_engine/ie_api.pyx)173
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp (renamed from inference-engine/ie_bridges/python/inference_engine/ie_api_impl.cpp)335
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp174
-rw-r--r--inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd (renamed from inference-engine/ie_bridges/python/inference_engine/ie_api_impl_defs.pxd)44
38 files changed, 3800 insertions, 763 deletions
diff --git a/inference-engine/ie_bridges/python/CMakeLists.txt b/inference-engine/ie_bridges/python/CMakeLists.txt
index 0fed2293b..2ce462bd6 100644
--- a/inference-engine/ie_bridges/python/CMakeLists.txt
+++ b/inference-engine/ie_bridges/python/CMakeLists.txt
@@ -1,42 +1,49 @@
-# Copyright (C) 2018 Intel Corporation
-# SPDX-License-Identifier: Apache-2.0
-#
# Defines the CMake commands/policies
-cmake_minimum_required( VERSION 2.8.5 )
+cmake_minimum_required (VERSION 3.3)
# Set the project name
-project( INFERENCE_ENGINE_DRIVER )
-
-option(COPY_IE_LIBS "Copy Inference Engine libs to package directory" ${WIN32})
-
-set (IE_DEFAULT_PATH computer_vision_sdk/deployment_tools/inference_engine/share)
-
-find_package(InferenceEngine REQUIRED PATHS /opt/intel/${IE_DEFAULT_PATH} $ENV{HOME}/intel/${IE_DEFAULT_PATH})
-
-# Make the scripts available in the 'cmake' directory available for the
-# 'include()' command, 'find_package()' command.
-set( CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR}/cmake )
-
-# Include the CMake script UseCython.cmake. This defines add_cython_module().
-# Instruction for use can be found at the top of cmake/UseCython.cmake.
-include( UseCython )
-
-# With CMake, a clean separation can be made between the source tree and the
-# build tree. When all source is compiled, as with pure C/C++, the source is
-# no-longer needed in the build tree. However, with pure *.py source, the
-# source is processed directly. To handle this, we reproduce the availability
-# of the source files in the build tree.
-add_custom_target( ReplicatePythonSourceTree ALL ${CMAKE_COMMAND} -P
- ${CMAKE_CURRENT_SOURCE_DIR}/cmake/ReplicatePythonSourceTree.cmake
- ${CMAKE_CURRENT_BINARY_DIR}
- WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} )
-
-add_custom_target( CopyIeLibs ${CMAKE_COMMAND} -P
- ${CMAKE_CURRENT_SOURCE_DIR}/cmake/CopyIeLibs.cmake
- ${IE_ROOT_DIR}/bin/${_ARCH}/Release ${_IE_ROOT_LIBRARY}
- WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ie_driver )
-
-include_directories( IE::inference_engine )
-
-# Process the CMakeLists.txt in the 'src' and 'bin' directory.
-add_subdirectory( inference_engine )
+project (ie_python_api)
+set (CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR}/cmake)
+
+if (CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7l")
+ set (ARCH armv7l)
+elseif ("${CMAKE_SIZEOF_VOID_P}" EQUAL "8")
+ set (ARCH intel64)
+else()
+ set (ARCH ia32)
+endif()
+
+
+# in case of independent python api build (out of Inference Engine root Cmake)
+if (NOT(IE_MAIN_SOURCE_DIR))
+ if("${CMAKE_BUILD_TYPE}" STREQUAL "")
+ message(STATUS "CMAKE_BUILD_TYPE not defined, 'Release' will be used")
+ set(CMAKE_BUILD_TYPE "Release")
+ endif()
+ message(STATUS "BUILD_CONFIGURATION: ${CMAKE_BUILD_TYPE}")
+
+ set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/bin/${ARCH})
+ if(NOT(WIN32))
+ set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE})
+ endif()
+endif()
+
+include (UseCython)
+
+if (PYTHONINTERP_FOUND)
+ set (PYTHON_VERSION python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR})
+else()
+ message(FATAL_ERROR "Python Interpretator was not found!")
+endif()
+
+if(WIN32)
+ set (PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$<CONFIG>/python_api/${PYTHON_VERSION}/openvino)
+else()
+ set (PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/${PYTHON_VERSION}/openvino)
+endif()
+
+find_package (InferenceEngine REQUIRED)
+
+set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
+add_subdirectory (src/openvino/inference_engine)
+add_subdirectory (src/openvino/inference_engine/dnn_builder) \ No newline at end of file
diff --git a/inference-engine/ie_bridges/python/README.md b/inference-engine/ie_bridges/python/README.md
index b9704fa25..6dbe6a0c0 100644
--- a/inference-engine/ie_bridges/python/README.md
+++ b/inference-engine/ie_bridges/python/README.md
@@ -7,40 +7,53 @@
## Prerequisites
-Install the following Python modules:
-- opencv-python
-- numpy
-- cython
+2. Install Inference Engine Python API dependencies:
+```bash
+pip3 install -r requirements.txt
+```
+
+## Building on Linux
+
+Build Inference Engine Python API alongside with the Inference Engine build.
+You need to run Inference Engine build with the following flags:
+
+```shellscript
+ cd <IE_ROOT>
+ mkdir -p build
+ cd build
+ cmake -DENABLE_PYTHON=ON -DPYTHON_EXECUTABLE=`which python3.6` \
+ -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so \
+ -DPYTHON_INCLUDE_DIR=/usr/include/python3.6 ..
+ make -j16
+```
## Building on Windows
+
+You need to run Inference Engine build with the following flags:
+
```shellscript
+ cd <IE_ROOT>
mkdir build
cd build
set PATH=C:\Program Files\Python36\Scripts;%PATH%
- cmake -G "Visual Studio 14 2015 Win64" -DInferenceEngine_DIR=..\..\..\build ^
+ cmake -G "Visual Studio 15 2017 Win64" -T "Intel C++ Compiler 18.0" ^
+ -DENABLE_PYTHON=ON ^
-DPYTHON_EXECUTABLE="C:\Program Files\Python36\python.exe" ^
-DPYTHON_INCLUDE_DIR="C:\Program Files\Python36\include" ^
-DPYTHON_LIBRARY="C:\Program Files\Python36\libs\python36.lib" ..
```
-Then build generated solution INFERENCE_ENGINE_DRIVER.sln using Microsoft\* Visual Studio.
+Then build generated solution INFERENCE_ENGINE_DRIVER.sln using Microsoft\* Visual Studio or run `cmake --build . --config Release` to build from the command line.
-## Building on Linux
-```shellscript
- mkdir -p build
- cd build
- cmake -DInferenceEngine_DIR=`realpath ../../../build` -DPYTHON_EXECUTABLE=`which python3.6` \
- -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so \
- -DPYTHON_INCLUDE_DIR=/usr/include/python3.6 ..
- make -j16
-```
-
-Note: `-DInferenceEngine_DIR` parameter is needed to specify the folder with generated make files or Visual Studio solution used to build Inference Engine (see readme file in the inference-engine root folder).
+## Running sample
-Before running the Python samples, please manually replicate OpenVINO folders structure with Python modules:
-- create an empty folder `openvino/inference_engine`
-- move built `ie_api.so` and `__init__.py` files from the `<build_folder>/inference_engine` to `openvino/inference_engine` folder
-- create an empty `__init__.py` file in the `openvino` folder
-- add the root folder where `openvino` folder is located to the PYTHONPATH environment variable.
+Before running the Python samples:
+- add the folder with built `openvino` Python module (located at `inference-engine/bin/intel64/Release/lib/python_api/python3.6`) to the PYTHONPATH environment variable.
- add the folder with Inference Engine libraries to LD_LIBRARY_PATH variable on Linux (or PATH on Windows).
+
+Example of command line to run classification sample:
+
+```bash
+python3 sample/classification_sample.py -m <path/to/xml> -i <path/to/input/image> -d CPU
+```
diff --git a/inference-engine/ie_bridges/python/cmake/CopyIeLibs.cmake b/inference-engine/ie_bridges/python/cmake/CopyIeLibs.cmake
deleted file mode 100644
index 2e96fcfaf..000000000
--- a/inference-engine/ie_bridges/python/cmake/CopyIeLibs.cmake
+++ /dev/null
@@ -1,10 +0,0 @@
-set(IE_WIN_LIBS ${CMAKE_ARGV3})
-set(IE_LIBS ${CMAKE_ARGV4})
-
-if (WIN32)
- file( GLOB IE_LIBS "${IE_WIN_LIBS}/*.dll")
- file( COPY ${IE_LIBS} DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})
-else()
- file( GLOB IE_LIBS "${IE_LIBS}/*.so")
- file( COPY ${IE_LIBS} DESTINATION ${CMAKE_CURRENT_SOURCE_DIR})
-endif()
diff --git a/inference-engine/ie_bridges/python/cmake/FindCython.cmake b/inference-engine/ie_bridges/python/cmake/FindCython.cmake
index 30e1f035c..3070950fd 100644
--- a/inference-engine/ie_bridges/python/cmake/FindCython.cmake
+++ b/inference-engine/ie_bridges/python/cmake/FindCython.cmake
@@ -1,10 +1,19 @@
-# Find the Cython compiler.
+# Copyright (c) 2016 Intel Corporation
#
-# This code sets the following variables:
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
#
-# CYTHON_EXECUTABLE
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# See also UseCython.cmake
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Following changes were done on top of original file:
+# Add CYTHON_EXECUTABLE searching hints at lines 50 and 51
#=============================================================================
# Copyright 2011 Kitware, Inc.
@@ -21,7 +30,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#=============================================================================
-
+# Find the Cython compiler.
+#
+# This code sets the following variables:
+#
+# CYTHON_EXECUTABLE
+#
+# See also UseCython.cmake
# Use the Cython executable that lives next to the Python executable
# if it is a local installation.
find_package( PythonInterp )
diff --git a/inference-engine/ie_bridges/python/cmake/ReplicatePythonSourceTree.cmake b/inference-engine/ie_bridges/python/cmake/ReplicatePythonSourceTree.cmake
deleted file mode 100644
index 4316d6e3d..000000000
--- a/inference-engine/ie_bridges/python/cmake/ReplicatePythonSourceTree.cmake
+++ /dev/null
@@ -1,7 +0,0 @@
-# Note: when executed in the build dir, then CMAKE_CURRENT_SOURCE_DIR is the
-# build dir.
-
-file( COPY setup.py inference_engine tests DESTINATION "${CMAKE_ARGV3}"
- FILES_MATCHING PATTERN "*.py" )
-
-file( COPY requirements.txt DESTINATION "${CMAKE_ARGV3}" )
diff --git a/inference-engine/ie_bridges/python/cmake/UseCython.cmake b/inference-engine/ie_bridges/python/cmake/UseCython.cmake
index ee631b724..1b9a0a2b6 100644
--- a/inference-engine/ie_bridges/python/cmake/UseCython.cmake
+++ b/inference-engine/ie_bridges/python/cmake/UseCython.cmake
@@ -46,6 +46,23 @@
#
# See also FindCython.cmake
+# Copyright (c) 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Following changes were done on top of the original file:
+# added PRIVATE linking mode for target_link_libraries call at lines 298 and 336
+
#=============================================================================
# Copyright 2011 Kitware, Inc.
#
diff --git a/inference-engine/ie_bridges/python/docs/api_overview.md b/inference-engine/ie_bridges/python/docs/api_overview.md
index 9d8779130..3a182ecf1 100644
--- a/inference-engine/ie_bridges/python/docs/api_overview.md
+++ b/inference-engine/ie_bridges/python/docs/api_overview.md
@@ -35,12 +35,15 @@ This class stores main information about the layer and allow to modify some laye
* `name` - Name of the layer
* `type`- Layer type
* `precision` - Layer base operating precision. Provides getter and setter interfaces.
+* `layout` - Returns the layout of shape of the layer.
+* `shape` - Return the list of the shape of the layer.
+* `parents` - Returns a list, which contains names of layers preceding this layer.
+* `children` - Returns a list, which contains names of layers following this layer.
* `affinity` - Layer affinity set by user or a default affinity set by the `IEPlugin.set_initial_affinity()` method.
The affinity attribute provides getter and setter interfaces, so the layer affinity can be modified directly.
- For example:
-
+ For example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="HETERO:FPGA,CPU")
>>> plugin.set_config({"TARGET_FALLBACK": "HETERO:FPGA,CPU"})
>>> plugin.set_initial_affinity(net)
@@ -82,7 +85,12 @@ layers affinity and output layers.
### Class Constructor
-There is no explicit class constructor. Use `from_ir` class method to read the Intermediate Representation (IR) and initialize a correct instance of the `IENetwork` class.
+* `__init__(model: str, weights: str)`
+
+ * Parameters:
+
+ * model - Path to `.xml` file of the IR
+ * weights - Path to `.bin` file of the IR
### Class attributes:
@@ -91,7 +99,7 @@ There is no explicit class constructor. Use `from_ir` class method to read the I
For example, to get a shape of the input layer:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.inputs
{'data': <inference_engine.ie_api.InputInfo object at 0x7efe042dedd8>}
>>> net.inputs['data'].shape
@@ -102,7 +110,7 @@ There is no explicit class constructor. Use `from_ir` class method to read the I
For example, to get a shape of the output layer:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.inputs
{'prob': <inference_engine.ie_api.OutputInfo object at 0x7efe03ab95d0>}
>>> net.outputs['prob'].shape
@@ -113,7 +121,7 @@ There is no explicit class constructor. Use `from_ir` class method to read the I
network batch size. For example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.batch_size
1
>>> net.batch_size = 4
@@ -124,20 +132,37 @@ There is no explicit class constructor. Use `from_ir` class method to read the I
```
* `layers` - Return dictionary that maps network layer names to <a name="ienetlayer-class"></a>`IENetLayer`
- objects containing layer properties. For example, to list all network layers:
+ objects containing layer properties in topological order. For example, to list all network layers:
```py
- >>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+ >>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.layers
{'conv0': <inference_engine.ie_api.IENetLayer object at 0x7f3a4c102370>
...
}
```
+ * `stats` - Returns `LayersStatsMap` object containing dictionary that maps network layer names to calibration statistics
+ represented by <a name="layerstats-class"></a> `LayerStats` objects.
+ `LayersStatsMap` class inherited from built-in python `dict` and overrides default `update()`method to allow
+ to set or modify layers calibration statistics.
+```py
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net.stats.update({
+ "conv1_2d" : LayserStats(min=(-25, -1, 0), max=(63, 124, 70)),
+ "conv2_2d" : LayserStats(min=(-5, -1, 0, 1, -7, 2), max=(63, 124, 70, 174, 99, 106)),
+ })
+```
+For more details about low precision inference please refer to "Low-Precision 8-bit Integer Inference"
+section in Inference Engine Developers Guide documentation.
+
+
### Class Methods
* `from_ir(model: str, weights: str)`
+**Note:** The function is deprecated. Please use `IENetwork()` class constructor to create valid instance of `IENetwork`
+
* Description:
The class method serves to read the model from the `.xml` and `.bin` files of the IR.
@@ -154,7 +179,7 @@ There is no explicit class constructor. Use `from_ir` class method to read the I
* Usage example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net
<inference_engine.ie_api.IENetwork object at 0x7fd7dbce54b0>
```
@@ -179,7 +204,7 @@ There is no explicit class constructor. Use `from_ir` class method to read the I
* Usage example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> net.add_outputs(["conv5_1/dwise', conv2_1/expand'])]
>>> net.outputs
['prob', 'conv5_1/dwise', 'conv2_1/expand']
@@ -213,12 +238,44 @@ outputs.
* Usage example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> input_layer = next(iter(net.inputs))
>>> n, c, h, w = net.inputs[input_layer]
>>> net.reshape({input_layer: (n, c, h*2, w*2)}]
```
+* `serialize(path_to_xml, path_to_bin)`:
+
+ * Description:
+
+ The method serializes the network and stores it in files.
+
+ * Parameters:
+
+ * `path_to_xml` - path to a file, where a serialized model will be stored.
+ * `path_to_bin` - path to a file, where serialized weights will be stored.
+
+ * Return value:
+
+ None
+
+ * Usage example:
+
+```py
+>>> net = IENetwork(model=path_to_model, weights=path_to_weights)
+>>> net.serialize(path_to_xml, path_to_bin)
+```
+## <a name="layerstats-class"></a>LayerStats
+Layer calibration statistic container
+### Class Constructor
+
+* `__init__(min: tuple = (), max: tuple = ())`
+
+ * Parameters:
+
+ * min - Tuple with per-channel minimum layer activation values
+ * max - Tuple with per-channel maximum layer activation values
+
## <a name="inputinfo-class"></a>InputInfo
This class contains the information about the network input layers
@@ -283,7 +340,7 @@ This class is the main plugin interface and serves to initialize and configure t
* Parameters:
- * `network` - A valid IENetwork instance created by `IENetwork.from_ir()` method
+ * `network` - A valid `IENetwork` instance
* `num_requests` - A positive integer value of infer requests to be created. Number of infer requests may be limited
by device capabilities.
* `config` - A dictionary of plugin configuration keys and their values
@@ -295,7 +352,7 @@ This class is the main plugin interface and serves to initialize and configure t
* Usage example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=2)
>>> exec_net
@@ -396,7 +453,7 @@ There is no explicit class constructor. To make a valid instance of `ExecutableN
* Usage example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=3)
>>> exec_net.requests
@@ -424,7 +481,7 @@ There is no explicit class constructor. To make a valid instance of `ExecutableN
* Usage example:
```py
->>> net = IENetwork.from_ir(model=path_to_xml_file, weights=path_to_bin_file)
+>>> net = IENetwork(model=path_to_xml_file, weights=path_to_bin_file)
>>> plugin = IEPlugin(device="CPU")
>>> exec_net = plugin.load(network=net, num_requsts=2)
>>> res = exec_net.infer({'data': img})
@@ -609,3 +666,22 @@ array([4.85416055e-01, 1.70385033e-01, 1.21873841e-01, 1.18894853e-01,
...
}
```
+
+* `set_batch(size)`
+ * Description:
+ Sets new batch size for certain infer request when dynamic batching is enabled in executable network that created this request.
+
+ **Note:** Support of dynamic batch size depends on the target plugin.
+
+ * Parameters:
+ * `batch` - new batch size to be used by all the following inference calls for this request.
+
+ * Usage example:
+```py
+>>> plugin.set_config({"DYN_BATCH_ENABLED": "YES"})
+>>> exec_net = plugin.load(network=net)
+>>> exec_net.requests[0].set_batch(inputs_count)
+```
+Please refer to `dynamic_batch_demo.py` to see the full usage example.
+
+
diff --git a/inference-engine/ie_bridges/python/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/inference_engine/CMakeLists.txt
deleted file mode 100644
index 2835e0409..000000000
--- a/inference-engine/ie_bridges/python/inference_engine/CMakeLists.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-# Copyright (C) 2018 Intel Corporation
-# SPDX-License-Identifier: Apache-2.0
-#
-# If the pyx file is a C++ file, we should specify that here.
-
-set(CMAKE_INCLUDE_CURRENT_DIR ON)
-
-if (COPY_IE_LIBS)
- if (UNIX)
- SET(CMAKE_SKIP_BUILD_RPATH FALSE)
- SET(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
- SET(CMAKE_INSTALL_RPATH "$ORIGIN")
- SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH FALSE)
- endif (UNIX)
-endif()
-
-set_source_files_properties(
- ie_api_impl_defs.pxd
- ie_api_impl.hpp
- ie_api_impl.cpp
- ie_api.pyx
- ie_api.pxd
-
- PROPERTIES CYTHON_IS_CXX TRUE
-)
-
-cython_add_module(
- ie_api
-
- ie_api_impl_defs.pxd
- ie_api_impl.hpp
- ie_api_impl.cpp
- ie_api.pyx
-)
-
-target_link_libraries(ie_api PRIVATE IE::inference_engine)
-set_target_properties(ie_api PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
-
-#if (NOT UNIX AND ${PYTHON_VERSION_STRING} MATCHES "^1.4")
-# set(python_subdir "python2.7")
-#else()
-# set(python_subdir "python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}")
-#endif()
-#
-#
-# Copy required build artifacts to structure which will be used in final package
-#add_custom_command(TARGET ie_api POST_BUILD
-#
-# COMMAND ${CMAKE_COMMAND} -E make_directory
-# ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/
-#
-# COMMAND ${CMAKE_COMMAND} -E touch
-# ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/__init__.py)
-#
-#if (${WIN32})
-#add_custom_command(TARGET ie_api POST_BUILD
-# COMMAND ${CMAKE_COMMAND} -E copy
-# ${CMAKE_CURRENT_BINARY_DIR}/Release/ie_api.pyd ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/
-#
-# COMMAND ${CMAKE_COMMAND} -E copy
-# ${CMAKE_CURRENT_BINARY_DIR}/__init__.py ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/)
-#else()
-#add_custom_command(TARGET ie_api POST_BUILD
-# COMMAND ${CMAKE_COMMAND} -E copy
-# ${CMAKE_CURRENT_BINARY_DIR}/ie_api.so ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/
-#
-# COMMAND ${CMAKE_COMMAND} -E copy
-# ${CMAKE_CURRENT_BINARY_DIR}/__init__.py ${CMAKE_SOURCE_DIR}/bin/${python_subdir}/openvino/inference_engine/)
-#endif()
diff --git a/inference-engine/ie_bridges/python/inference_engine/__init__.py b/inference-engine/ie_bridges/python/inference_engine/__init__.py
deleted file mode 100644
index 07e2717dc..000000000
--- a/inference-engine/ie_bridges/python/inference_engine/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .ie_api import *
-__version__ = get_version()
-__all__ = ['IENetwork', "IEPlugin", "IENetReader"]
diff --git a/inference-engine/ie_bridges/python/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/inference_engine/ie_api_impl.hpp
deleted file mode 100644
index f2265243e..000000000
--- a/inference-engine/ie_bridges/python/inference_engine/ie_api_impl.hpp
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (C) 2018 Intel Corporation
-//
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#ifndef INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP
-#define INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP
-
-#include <string>
-#include <inference_engine.hpp>
-#include <iterator>
-#include <iostream>
-#include <algorithm>
-#include <sstream>
-#include "ie_extension.h"
-
-
-namespace InferenceEnginePython {
-struct IENetLayer {
- InferenceEngine::CNNLayerPtr layer_ptr;
- std::string name;
- std::string type;
- std::string precision;
- std::string affinity;
- std::map<std::string, std::string> params;
- void setAffinity(const std::string & target_affinity);
- void setParams(const std::map<std::string, std::string> & params_map);
- std::map<std::string, InferenceEngine::Blob::Ptr> getWeights();
- void setPrecision(std::string precision);
-};
-struct InputInfo{
- InferenceEngine::InputInfo actual;
- std::vector<size_t> dims;
- std::string precision;
- std::string layout;
- void setPrecision(std::string precision);
- void setLayout(std::string layout);
-};
-struct OutputInfo{
- InferenceEngine::DataPtr actual;
- std::vector<size_t> dims;
- std::string precision;
- std::string layout;
- void setPrecision(std::string precision);
-};
-struct ProfileInfo {
- std::string status;
- std::string exec_type;
- std::string layer_type;
- long long real_time;
- long long cpu_time;
- unsigned execution_index;
-};
-struct IENetwork {
- InferenceEngine::CNNNetwork actual;
- std::string name;
- std::size_t batch_size;
- void setBatch(const size_t size);
- void addOutputs(const std::vector<std::string> &out_layers, const std::string &precision);
- std::map<std::string, InferenceEnginePython::IENetLayer> getLayers();
- std::map<std::string, InferenceEnginePython::InputInfo> getInputs();
- std::map<std::string, InferenceEnginePython::OutputInfo> getOutputs();
- void reshape(const std::map<std::string, std::vector<size_t>> & input_shapes);
-};
-
-struct IENetReader {
- static IENetwork read(std::string const &model, std::string const &weights);
- std::vector<std::pair<std::string, std::string>> getLayers();
-};
-
-struct InferRequestWrap {
- InferenceEngine::IInferRequest::Ptr request_ptr;
- InferenceEngine::BlobMap inputs;
- InferenceEngine::BlobMap outputs;
-
- void infer();
- void infer_async();
- int wait(int64_t timeout);
- InferenceEngine::Blob::Ptr &getInputBlob(const std::string &blob_name);
- InferenceEngine::Blob::Ptr &getOutputBlob(const std::string &blob_name);
- std::vector<std::string> getInputsList();
- std::vector<std::string> getOutputsList();
- std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
-};
-
-
-struct IEExecNetwork {
- InferenceEngine::IExecutableNetwork::Ptr actual;
- std::vector<InferRequestWrap> infer_requests;
- IEExecNetwork(const std::string &name, size_t num_requests);
-
- std::string name;
- int next_req_index = 0;
- bool async;
- void infer();
-};
-
-
-struct IEPlugin {
- std::unique_ptr<InferenceEnginePython::IEExecNetwork> load(InferenceEnginePython::IENetwork &net,
- int num_requests,
- const std::map<std::string,std::string> &config);
- std::string device_name;
- std::string version;
- void setConfig(const std::map<std::string, std::string> &);
- void addCpuExtension(const std::string &extension_path);
- void setInitialAffinity(InferenceEnginePython::IENetwork &net);
- IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs);
- IEPlugin() = default;
- std::set<std::string> queryNetwork(InferenceEnginePython::IENetwork &net);
- InferenceEngine::InferenceEnginePluginPtr actual;
-
-};
-
-template<class T>
-T* get_buffer(InferenceEngine::Blob& blob) {
- return blob.buffer().as<T *>();
-}
-
-template<class T, class... Args>
-std::unique_ptr<T> make_unique(Args&&... args)
-{
- return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
-}
-
-std::string get_version();
-}; // InferenceEnginePython
-
-#endif //INFERENCE_ENGINE_DRIVER_IE_API_IMPL_HPP
diff --git a/inference-engine/ie_bridges/python/sample/benchmark_app/README.md b/inference-engine/ie_bridges/python/sample/benchmark_app/README.md
new file mode 100644
index 000000000..7a9a52604
--- /dev/null
+++ b/inference-engine/ie_bridges/python/sample/benchmark_app/README.md
@@ -0,0 +1,81 @@
+# Benchmark Application Demo
+
+This topic demonstrates how to run the Benchmark Application demo, which performs inference using convolutional networks.
+
+## How It Works
+
+> **NOTE:** To achieve benchmark results similar to the official published results, set CPU frequency to 2.9GHz and GPU frequency to 1GHz.
+
+Upon the start-up, the application reads command-line parameters and loads a network and images to the Inference Engine plugin. The number of infer requests and execution approach depend on a mode defined with the `-api` command-line parameter.
+
+
+### Synchronous API
+For synchronous mode, the primary metric is latency. The application creates one infer request and executes the `Infer` method. A number of executions is defined by one of the two values:
+* Number of iterations defined with the `-niter` command-line argument
+* Predefined duration if `-niter` is skipped. Predefined duration value depends on device.
+
+During the execution, the application collects two types of metrics:
+* Latency for each infer request executed with `Infer` method
+* Duration of all executions
+
+Reported latency value is calculated as mean value of all collected latencies. Reported throughput value is a derivative from reported latency and additionally depends on batch size.
+
+### Asynchronous API
+For asynchronous mode, the primary metric is throughput in frames per second (FPS). The application creates a certain number of infer requests and executes the `StartAsync` method. A number of infer is specified with the `-nireq` command-line parameter. A number of executions is defined by one of the two values:
+* Number of iterations defined with the `-niter` command-line argument
+* Predefined duration if `-niter` is skipped. Predefined duration value depends on device.
+
+The infer requests are executed asynchronously. `Wait` method is used to wait for previous execution to complete. The application measures all infer requests executions and reports the throughput metric based on batch size and total execution duration.
+
+## Running
+
+Running the application with the `-h` or `--help`' option yields the following usage message:
+```python3 benchmark_app.py -h
+
+benchmark_app [OPTION]
+Options:
+
+ -h, --help Print a usage message
+ -i, --path_to_images "<path>" Required. Path to a folder with images or to image files.
+ -m, --path_to_model "<path>" Required. Path to an .xml file with a trained model.
+ -pp "<path>" Path to a plugin folder.
+ -api, --api_type "<sync/async>" Required. Enable using sync/async API.
+ -d, --target_device "<device>" Specify a target device to infer on: CPU, GPU, FPGA or MYRIAD. Use "-d HETERO:<comma separated devices list>" format to specify HETERO plugin. The application looks for a suitable plugin for the specified device.
+ -niter, --number_iterations "<integer>" Optional. Number of iterations. If not specified, the number of iterations is calculated depending on a device.
+ -nireq, --number_infer_requests "<integer>" Optional. Number of infer requests (default value is 2).
+ -l, --path_to_extension "<absolute_path>" Required for CPU custom layers. Absolute path to a shared library with the kernels implementations.
+ Or
+ -c, --path_to_cldnn_config "<absolute_path>" Required for GPU custom kernels. Absolute path to an .xml file with the kernels description.
+ -b, --batch_size "<integer>" Optional. Batch size value. If not specified, the batch size value is determined from IR.
+ -nthreads, --number_threads "<integer>" Number of threads to use for inference on the CPU (including Hetero cases).
+ -pin {YES,NO}, --infer_threads_pinning {YES,NO} Optional. Enable ("YES" is default value) or disable ("NO")CPU threads pinning for CPU-involved inference.
+```
+
+Running the application with the empty list of options yields the usage message given above and an error message.
+
+To run the demo, you can use one-layer public models or one-layer pre-trained and optimized models delivered with the package that support images as input.
+
+For example, to do inference on an image using a trained network with multiple outputs on CPU, run the following command:
+
+```python3 benchmark_app.py -i <path_to_image>/inputImage.bmp -m <path_to_model>/multiple-output.xml -d CPU
+```
+
+> **NOTE**: Public models should be first converted to the Inference Engine format (\*.xml + \*.bin) using the [Model Optimizer tool](./docs/MO_DG/Deep_Learning_Model_Optimizer_DevGuide.md).
+
+## Demo Output
+
+Application output depends on a used API. For synchronous API, the application outputs latency and throughput:
+```
+[ INFO ] Start inference synchronously (10 s duration)
+[BENCHMARK RESULT] Latency is 15.5520 msec
+[BENCHMARK RESULT] Throughput is 1286.0082 FPS
+```
+
+For asynchronous API, the application outputs only throughput:
+```
+[ INFO ] Start inference asynchronously (10 s duration, 8 inference requests in parallel)
+[BENCHMARK RESULT] Throughput is 1444.2591 FPS
+```
+
+## See Also
+* [Using Inference Engine Samples](./docs/IE_DG/Samples_Overview.md)
diff --git a/inference-engine/ie_bridges/python/sample/benchmark_app/benchmark.py b/inference-engine/ie_bridges/python/sample/benchmark_app/benchmark.py
new file mode 100644
index 000000000..761b63e63
--- /dev/null
+++ b/inference-engine/ie_bridges/python/sample/benchmark_app/benchmark.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from statistics import median
+from openvino.inference_engine import IENetwork, IEPlugin
+
+from utils.benchmark_utils import *
+
+def main(args=None):
+ try:
+ if args is None:
+ args = parse_args()
+
+ validate_args(args)
+
+ # --------------------------------- 1. Load Plugin for inference engine ---------------------------------
+ logging.info("Loading plugin")
+ plugin = IEPlugin(args.target_device)
+
+ config = dict()
+ if CPU_DEVICE_NAME in args.target_device:
+ if args.path_to_extension:
+ plugin.add_cpu_extension(args.path_to_extension)
+ # limit threading for CPU portion of inference
+ if args.number_threads is not None:
+ config.update({'CPU_THREADS_NUM': str(args.number_threads)})
+ # pin threads for CPU portion of inference
+ config.update({'CPU_BIND_THREAD': args.infer_threads_pinning})
+ # for pure CPU execution, more throughput-oriented execution via streams
+ if args.api_type == 'async' and CPU_DEVICE_NAME in args.target_device:
+ config.update({'CPU_THROUGHPUT_STREAMS': str(args.number_infer_requests)})
+ elif GPU_DEVICE_NAME in args.target_device:
+ if args.path_to_cldnn_config:
+ config.update({'CONFIG_FILE': args.path_to_cldnn_config})
+ logger.info("GPU extensions is loaded {}".format(args.path_to_cldnn_config))
+ elif MYRIAD_DEVICE_NAME in args.target_device:
+ config.update({'LOG_LEVEL': 'LOG_INFO'})
+ config.update({'VPU_LOG_LEVEL': 'LOG_INFO'})
+
+ plugin.set_config(config)
+
+ logger.info("Device is {}".format(plugin.device))
+ logger.info("Plugin version is {}".format(plugin.version))
+
+ # --------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ---------------------
+ logger.info("Loading network files")
+
+ xml_filename = os.path.abspath(args.path_to_model)
+ head, tail = os.path.splitext(xml_filename)
+ bin_filename = os.path.abspath(head + BIN_EXTENSION)
+
+ ie_network = IENetwork(xml_filename, bin_filename)
+
+ input_info = ie_network.inputs
+
+ if len(input_info) == 0:
+ raise AttributeError('No inputs info is provided')
+ elif len(input_info) != 1:
+ raise AttributeError("only one input layer network is supported")
+
+ # -------------------------------------- 3. Change network batch_size -------------------------------------
+ batch_size = ie_network.batch_size
+ key = list(input_info.keys()).pop()
+ precision = input_info[key].precision
+
+ if args.batch_size and args.batch_size != ie_network.batch_size:
+ # deepcopy input_info
+ shape = input_info[key].shape
+ # We support models having only one input layers
+ if input_info[key].layout != LAYOUT_TYPE:
+ raise Exception('Unsupported model for batch size changing in automatic mode')
+ shape[BATCH_SIZE_ELEM] = args.batch_size
+ ie_network.reshape({key: shape})
+
+ input_info = ie_network.inputs
+
+ batch_size = args.batch_size
+
+
+ logger_message = "Network batch size was changed to: " if args.batch_size is not None else "Network batch size: "
+ logger_message += " {}, precision: {}".format(batch_size, precision)
+ logger.info(logger_message)
+
+ # ------------------------------------- 4. Loading model to the plugin -------------------------------------
+ logger.info("Loading model to the plugin")
+ exe_network = plugin.load(ie_network, args.number_infer_requests)
+
+ # ------------------------------------ 5. Performance measurements stuff -----------------------------------
+ inputs = get_images(os.path.abspath(args.path_to_images), batch_size)
+
+ if batch_size < len(inputs):
+ logger.warn("Network batch size {} is less then images count {}"
+ ", some input files will be ignored".format(batch_size, len(inputs)))
+
+ input_images = {key: fill_blob_with_image(inputs, input_info[key].shape)}
+
+ times = list()
+ duration = 0
+
+ if args.number_iterations is None:
+ duration = get_duration_in_secs(args.target_device)
+
+ if args.api_type == 'sync':
+
+ # warming up - out of scope
+ exe_network.infer(input_images)
+
+ if args.number_iterations is not None:
+ logger.info(
+ "Start inference synchronously ({}) sync inference executions".format(args.number_iterations))
+ for iteration in range(args.number_iterations):
+ sync_infer_request(exe_network, times, input_images)
+
+ else:
+ logger.info("Start inference synchronously ({} s duration)".format(duration))
+ start_time = datetime.now()
+ current_time = start_time
+ while (current_time - start_time).total_seconds() < duration:
+ current_time = sync_infer_request(exe_network, times, input_images)
+
+ times.sort()
+ latency = median(times)
+ fps = batch_size / latency
+
+ print("[BENCHMARK RESULT] Latency is {:.4f} msec".format(latency * 1e3))
+ print("[BENCHMARK RESULT] Throughput is {:.4f} FPS".format(fps))
+ else:
+ infer_requests = exe_network.requests
+
+ if args.number_iterations is not None:
+ logger.info("Start inference asynchronously ({}"
+ " async inference executions, {} "
+ " inference requests in parallel".format(args.number_iterations,
+ args.number_infer_requests))
+ else:
+ logger.info("Start inference asynchronously ({} s duration, "
+ "{} inference requests in parallel)".format(duration, args.number_infer_requests))
+
+ current_inference = 0
+ required_inference_requests_were_executed = False
+ previous_inference = 1 - args.number_infer_requests
+ step = 0
+ steps_count = args.number_infer_requests - 1
+ if args.number_iterations is not None:
+ steps_count += args.number_iterations
+
+ # warming up - out of scope
+ infer_requests[0].async_infer(input_images)
+ infer_requests[0].wait()
+
+ start_time = datetime.now()
+ while not required_inference_requests_were_executed or step < steps_count or \
+ args.number_iterations is None and (datetime.now() - start_time).total_seconds() < duration:
+ exe_network.start_async(current_inference, input_images)
+
+ if previous_inference >= 0:
+ status = infer_requests[previous_inference].wait()
+ if status is not 0:
+ raise Exception("Infer request not completed successfully")
+
+ current_inference += 1
+ if current_inference >= args.number_infer_requests:
+ current_inference = 0
+ required_inference_requests_were_executed = True
+
+ previous_inference += 1
+ if previous_inference >= args.number_infer_requests:
+ previous_inference = 0
+
+ step += 1
+
+ # wait the latest inference executions
+ for not_completed_index in range(args.number_infer_requests):
+ if infer_requests[not_completed_index].wait(0) != 0:
+ infer_requests[not_completed_index].wait()
+
+ total_duration = (datetime.now() - start_time).total_seconds()
+ fps = batch_size * step / total_duration
+
+ print("[BENCHMARK RESULT] Throughput is {:.4f} FPS".format(fps))
+
+ del exe_network
+ del plugin
+
+ except Exception as e:
+ logging.exception(e)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/inference-engine/ie_bridges/python/sample/benchmark_app/utils/benchmark_utils.py b/inference-engine/ie_bridges/python/sample/benchmark_app/utils/benchmark_utils.py
new file mode 100644
index 000000000..42676141f
--- /dev/null
+++ b/inference-engine/ie_bridges/python/sample/benchmark_app/utils/benchmark_utils.py
@@ -0,0 +1,122 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import logging
+import argparse
+import os
+import cv2
+import numpy as np
+import sys
+
+from glob import glob
+from random import choice
+from datetime import datetime
+from fnmatch import fnmatch
+
+from . constants import *
+
+logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
+logger = logging.getLogger('BenchmarkApp')
+
+
+def validate_args(args):
+ if args.number_iterations is not None and args.number_iterations < 0:
+ raise Exception("Number of iterations should be positive (invalid -niter option value)")
+ if args.number_infer_requests < 0:
+ raise Exception("Number of inference requests should be positive (invalid -nireq option value)")
+ if not fnmatch(args.path_to_model, XML_EXTENSION_PATTERN):
+ raise Exception('Path {} is not xml file.')
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-i', '--path_to_images', type=str, required=True, help=HELP_MESSAGES['IMAGE_MESSAGE'])
+ parser.add_argument('-m', '--path_to_model', type=str, required=True, help=HELP_MESSAGES['MODEL_MESSAGE'])
+ parser.add_argument('-c', '--path_to_cldnn_config', type=str, required=False,
+ help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
+ parser.add_argument('-l', '--path_to_extension', type=str, required=False, default=None,
+ help=HELP_MESSAGES['CUSTOM_GPU_LIBRARY_MESSAGE'])
+ parser.add_argument('-api', '--api_type', type=str, required=False, default='async', choices=['sync', 'async'],
+ help=HELP_MESSAGES['API_MESSAGE'])
+ parser.add_argument('-d', '--target_device', type=str, required=False, default="CPU",
+ help=HELP_MESSAGES['TARGET_DEVICE_MESSAGE'])
+ parser.add_argument('-niter', '--number_iterations', type=int, required=False, default=None,
+ help=HELP_MESSAGES['ITERATIONS_COUNT_MESSAGE'])
+ parser.add_argument('-nireq', '--number_infer_requests', type=int, required=False, default=2,
+ help=HELP_MESSAGES['INFER_REQUESTS_COUNT_MESSAGE'])
+ parser.add_argument('-nthreads', '--number_threads', type=int, required=False, default=None,
+ help=HELP_MESSAGES['INFER_NUM_THREADS_MESSAGE'])
+ parser.add_argument('-b', '--batch_size', type=int, required=False, default=None,
+ help=HELP_MESSAGES['BATCH_SIZE_MESSAGE'])
+ parser.add_argument('-pin', '--infer_threads_pinning', type=str, required=False, default='YES',
+ choices=['YES', 'NO'], help=HELP_MESSAGES['INFER_THREADS_PINNING_MESSAGE'])
+ return parser.parse_args()
+
+
+def get_images(path_to_images, batch_size):
+ images = list()
+ if os.path.isfile(path_to_images):
+ while len(images) != batch_size:
+ images.append(path_to_images)
+ else:
+ path = os.path.join(path_to_images, '*')
+ files = glob(path, recursive=True)
+ for file in files:
+ file_extension = file.rsplit('.').pop().upper()
+ if file_extension in IMAGE_EXTENSIONS:
+ images.append(file)
+ if len(images) == 0:
+ raise Exception("No images found in {}".format(path_to_images))
+ if len(images) < batch_size:
+ while len(images) != batch_size:
+ images.append(choice(images))
+ return images
+
+
+def get_duration_in_secs(target_device):
+ duration = 0
+ for device in DEVICE_DURATION_IN_SECS:
+ if device in target_device:
+ duration = max(duration, DEVICE_DURATION_IN_SECS[device])
+
+ if duration == 0:
+ duration = DEVICE_DURATION_IN_SECS[UNKNOWN_DEVICE_TYPE]
+ logger.warn("Default duration {} seconds for unknown device {} is used".format(duration, target_device))
+
+ return duration
+
+
+def fill_blob_with_image(images_path, shape):
+ images = np.ndarray(shape)
+ for item in range(shape[0]):
+ image = cv2.imread(images_path[item])
+
+ new_im_size = tuple(shape[2:])
+ if image.shape[:-1] != new_im_size:
+ logger.warn("Image {} is resize from ({}) to ({})".format(images_path[item], image.shape[:-1], new_im_size))
+ image = cv2.resize(image, new_im_size)
+
+ image = image.transpose((2, 0, 1))
+ images[item] = image
+ return images
+
+
+def sync_infer_request(exe_network, times, images):
+ iteration_start_time = datetime.now()
+ exe_network.infer(images)
+ current_time = datetime.now()
+ times.append((current_time - iteration_start_time).total_seconds())
+ return current_time
diff --git a/inference-engine/ie_bridges/python/sample/benchmark_app/utils/constants.py b/inference-engine/ie_bridges/python/sample/benchmark_app/utils/constants.py
new file mode 100644
index 000000000..f68919e51
--- /dev/null
+++ b/inference-engine/ie_bridges/python/sample/benchmark_app/utils/constants.py
@@ -0,0 +1,63 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+HELP_MESSAGES = {
+ 'IMAGE_MESSAGE': "Path to a folder with images or to image files.",
+ 'MULTI_INPUT_MESSAGE': "Path to multi input file containing.",
+ 'MODEL_MESSAGE': "Path to an .xml file with a trained model.",
+ 'PLUGIN_PATH_MESSAGE': "Path to a plugin folder.",
+ 'API_MESSAGE': "Enable using sync/async API. Default value is sync",
+ 'TARGET_DEVICE_MESSAGE': "Specify a target device to infer on: CPU, GPU, FPGA or MYRIAD. "
+ "Use \"-d HETERO:<comma separated devices list>\" format to specify HETERO plugin. "
+ "The application looks for a suitable plugin for the specified device.",
+ 'ITERATIONS_COUNT_MESSAGE': "Number of iterations. "
+ "If not specified, the number of iterations is calculated depending on a device.",
+ 'INFER_REQUESTS_COUNT_MESSAGE': "Number of infer requests (default value is 2).",
+ 'INFER_NUM_THREADS_MESSAGE': "Number of threads to use for inference on the CPU "
+ "(including Hetero cases).",
+ 'CUSTOM_CPU_LIBRARY_MESSAGE': "Required for CPU custom layers. "
+ "Absolute path to a shared library with the kernels implementations.",
+ 'CUSTOM_GPU_LIBRARY_MESSAGE': "Required for GPU custom kernels. Absolute path to an .xml file with the kernels description.",
+ 'BATCH_SIZE_MESSAGE': "Optional. Batch size value. If not specified, the batch size value is determined from IR",
+ 'INFER_THREADS_PINNING_MESSAGE': "Optional. Enable (\"YES\" is default value) or disable (\"NO\")"
+ "CPU threads pinning for CPU-involved inference."
+}
+
+DEVICE_DURATION_IN_SECS = {
+ "CPU": 60,
+ "GPU": 60,
+ "VPU": 60,
+ "MYRIAD": 60,
+ "FPGA": 120,
+ "HDDL": 60,
+ "UNKNOWN": 120
+}
+
+IMAGE_EXTENSIONS = ['JPEG', 'JPG', 'PNG', 'BMP']
+
+MYRIAD_DEVICE_NAME = "MYRIAD"
+CPU_DEVICE_NAME = "CPU"
+GPU_DEVICE_NAME = "GPU"
+UNKNOWN_DEVICE_TYPE = "UNKNOWN"
+
+BATCH_SIZE_ELEM = 0
+
+LAYOUT_TYPE = 'NCHW'
+
+XML_EXTENSION = ".xml"
+BIN_EXTENSION = ".bin"
+
+XML_EXTENSION_PATTERN = '*' + XML_EXTENSION
diff --git a/inference-engine/ie_bridges/python/sample/classification_sample.py b/inference-engine/ie_bridges/python/sample/classification_sample.py
index 082a84d33..f02459f2e 100644
--- a/inference-engine/ie_bridges/python/sample/classification_sample.py
+++ b/inference-engine/ie_bridges/python/sample/classification_sample.py
@@ -58,7 +58,7 @@ def main():
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
- net = IENetwork.from_ir(model=model_xml, weights=model_bin)
+ net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
@@ -108,8 +108,8 @@ def main():
log.info("Performance counters:")
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
- print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
- stats['status'], stats['real_time']))
+ print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
+ stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
diff --git a/inference-engine/ie_bridges/python/sample/classification_sample_async.py b/inference-engine/ie_bridges/python/sample/classification_sample_async.py
index 9cf7d2307..ae8655570 100644
--- a/inference-engine/ie_bridges/python/sample/classification_sample_async.py
+++ b/inference-engine/ie_bridges/python/sample/classification_sample_async.py
@@ -58,7 +58,7 @@ def main():
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
- net = IENetwork.from_ir(model=model_xml, weights=model_bin)
+ net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
@@ -106,10 +106,10 @@ def main():
if args.perf_counts:
perf_counts = infer_request_handle.get_perf_counts()
log.info("Performance counters:")
- print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
+ print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
- print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
- stats['status'], stats['real_time']))
+ print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
+ stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
res = infer_request_handle.outputs[out_blob]
diff --git a/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_classification_sample.py b/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_classification_sample.py
index 91563b3be..193c5a5bb 100644
--- a/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_classification_sample.py
+++ b/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_classification_sample.py
@@ -1,7 +1,7 @@
"""
BSD 3-clause "New" or "Revised" license
-Copyright (C) 2018 Intel Coporation.
+Copyright (C) 2018 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -38,7 +38,7 @@ import boto3
import timeit
import datetime
import json
-from collections import OrderedDict
+from collections import OrderedDict
from openvino.inference_engine import IENetwork, IEPlugin
@@ -82,6 +82,7 @@ PARAM_LABELMAP_FILE = os.environ.get("PARAM_LABELMAP_FILE")
PARAM_TOPIC_NAME = os.environ.get("PARAM_TOPIC_NAME", "intel/faas/classification")
PARAM_NUM_TOP_RESULTS = int(os.environ.get("PARAM_NUM_TOP_RESULTS", "10"))
+
def report(res_json, frame):
now = datetime.datetime.now()
date_prefix = str(now).replace(" ", "_")
@@ -89,17 +90,18 @@ def report(res_json, frame):
data = json.dumps(res_json)
client.publish(topic=PARAM_TOPIC_NAME, payload=data)
if enable_kinesis_output:
- kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json), PartitionKey=kinesis_partition_key)
+ kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json),
+ PartitionKey=kinesis_partition_key)
if enable_s3_jpeg_output:
temp_image = os.path.join(PARAM_OUTPUT_DIRECTORY, "inference_result.jpeg")
cv2.imwrite(temp_image, frame)
with open(temp_image) as file:
image_contents = file.read()
- s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
+ s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
if enable_local_jpeg_output:
cv2.imwrite(os.path.join(PARAM_OUTPUT_DIRECTORY, date_prefix + ".jpeg"), frame)
-
+
def greengrass_classification_sample_run():
client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"
@@ -109,7 +111,7 @@ def greengrass_classification_sample_run():
if "CPU" in PARAM_DEVICE:
plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
# Read IR
- net = IENetwork.from_ir(model=PARAM_MODEL_XML, weights=model_bin)
+ net = IENetwork(model=PARAM_MODEL_XML, weights=model_bin)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
input_blob = next(iter(net.inputs))
@@ -126,9 +128,9 @@ def greengrass_classification_sample_run():
res_json = []
labeldata = None
if PARAM_LABELMAP_FILE is not None:
- with open(PARAM_LABELMAP_FILE) as labelmap_file:
+ with open(PARAM_LABELMAP_FILE) as labelmap_file:
labeldata = json.load(labelmap_file)
-
+
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
@@ -148,17 +150,17 @@ def greengrass_classification_sample_run():
res_json = OrderedDict()
res_json["Candidates"] = OrderedDict()
frame_timestamp = datetime.datetime.now()
-
+
for i in top_ind:
classlabel = labeldata[str(i)] if labeldata else str(i)
res_json["Candidates"][classlabel] = round(res[out_blob][0, i], 2)
-
+
frame_count += 1
# Measure elapsed seconds since the last report
seconds_elapsed = timeit.default_timer() - start_time
if seconds_elapsed >= reporting_interval:
res_json["timestamp"] = frame_timestamp.isoformat()
- res_json["frame_id"] = int(frameid)
+ res_json["frame_id"] = int(frameid)
res_json["inference_fps"] = frame_count / inf_seconds
start_time = timeit.default_timer()
report(res_json, frame)
@@ -169,8 +171,10 @@ def greengrass_classification_sample_run():
del exec_net
del plugin
+
greengrass_classification_sample_run()
+
def function_handler(event, context):
client.publish(topic=PARAM_TOPIC_NAME, payload='HANDLER_CALLED!')
return
diff --git a/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_object_detection_sample_ssd.py b/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_object_detection_sample_ssd.py
index 55c2f0f84..e6898bee3 100644
--- a/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_object_detection_sample_ssd.py
+++ b/inference-engine/ie_bridges/python/sample/greengrass_samples/greengrass_object_detection_sample_ssd.py
@@ -1,7 +1,7 @@
"""
BSD 3-clause "New" or "Revised" license
-Copyright (C) 2018 Intel Coporation.
+Copyright (C) 2018 Intel Corporation.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@@ -38,7 +38,7 @@ import boto3
import timeit
import datetime
import json
-from collections import OrderedDict
+from collections import OrderedDict
from openvino.inference_engine import IENetwork, IEPlugin
@@ -81,6 +81,7 @@ PARAM_CPU_EXTENSION_PATH = os.environ.get("PARAM_CPU_EXTENSION_PATH")
PARAM_LABELMAP_FILE = os.environ.get("PARAM_LABELMAP_FILE")
PARAM_TOPIC_NAME = os.environ.get("PARAM_TOPIC_NAME", "intel/faas/ssd")
+
def report(res_json, frame):
now = datetime.datetime.now()
date_prefix = str(now).replace(" ", "_")
@@ -88,17 +89,18 @@ def report(res_json, frame):
data = json.dumps(res_json)
client.publish(topic=PARAM_TOPIC_NAME, payload=data)
if enable_kinesis_output:
- kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json), PartitionKey=kinesis_partition_key)
+ kinesis_client.put_record(StreamName=kinesis_stream_name, Data=json.dumps(res_json),
+ PartitionKey=kinesis_partition_key)
if enable_s3_jpeg_output:
temp_image = os.path.join(PARAM_OUTPUT_DIRECTORY, "inference_result.jpeg")
cv2.imwrite(temp_image, frame)
with open(temp_image) as file:
image_contents = file.read()
- s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
+ s3_client.put_object(Body=image_contents, Bucket=s3_bucket_name, Key=date_prefix + ".jpeg")
if enable_local_jpeg_output:
cv2.imwrite(os.path.join(PARAM_OUTPUT_DIRECTORY, date_prefix + ".jpeg"), frame)
-
+
def greengrass_object_detection_sample_ssd_run():
client.publish(topic=PARAM_TOPIC_NAME, payload="OpenVINO: Initializing...")
model_bin = os.path.splitext(PARAM_MODEL_XML)[0] + ".bin"
@@ -108,7 +110,7 @@ def greengrass_object_detection_sample_ssd_run():
if "CPU" in PARAM_DEVICE:
plugin.add_cpu_extension(PARAM_CPU_EXTENSION_PATH)
# Read IR
- net = IENetwork.from_ir(model=PARAM_MODEL_XML, weights=model_bin)
+ net = IENetwork(model=PARAM_MODEL_XML, weights=model_bin)
assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
assert len(net.outputs) == 1, "Sample supports only single output topologies"
input_blob = next(iter(net.inputs))
@@ -124,9 +126,9 @@ def greengrass_object_detection_sample_ssd_run():
frame_count = 0
labeldata = None
if PARAM_LABELMAP_FILE is not None:
- with open(PARAM_LABELMAP_FILE) as labelmap_file:
+ with open(PARAM_LABELMAP_FILE) as labelmap_file:
labeldata = json.load(labelmap_file)
-
+
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
@@ -142,26 +144,27 @@ def greengrass_object_detection_sample_ssd_run():
res = exec_net.infer(inputs={input_blob: in_frame})
inf_seconds += timeit.default_timer() - inf_start_time
# Parse detection results of the current request
- res_json = OrderedDict()
- frame_timestamp = datetime.datetime.now()
+ res_json = OrderedDict()
+ frame_timestamp = datetime.datetime.now()
object_id = 0
for obj in res[out_blob][0][0]:
- if obj[2] > 0.5:
- xmin = int(obj[3] * initial_w)
- ymin = int(obj[4] * initial_h)
- xmax = int(obj[5] * initial_w)
- ymax = int(obj[6] * initial_h)
- cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
- obj_id = "Object" + str(object_id)
- classlabel = labeldata[str(int(obj[1]))] if labeldata else ""
- res_json[obj_id] = {"label": int(obj[1]), "class": classlabel, "confidence": round(obj[2], 2), "xmin": round(obj[3], 2), "ymin": round(obj[4], 2), "xmax": round(obj[5], 2), "ymax": round(obj[6], 2)}
- object_id += 1
+ if obj[2] > 0.5:
+ xmin = int(obj[3] * initial_w)
+ ymin = int(obj[4] * initial_h)
+ xmax = int(obj[5] * initial_w)
+ ymax = int(obj[6] * initial_h)
+ cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (255, 165, 20), 4)
+ obj_id = "Object" + str(object_id)
+ classlabel = labeldata[str(int(obj[1]))] if labeldata else ""
+ res_json[obj_id] = {"label": int(obj[1]), "class": classlabel, "confidence": round(obj[2], 2), "xmin": round(
+ obj[3], 2), "ymin": round(obj[4], 2), "xmax": round(obj[5], 2), "ymax": round(obj[6], 2)}
+ object_id += 1
frame_count += 1
# Measure elapsed seconds since the last report
seconds_elapsed = timeit.default_timer() - start_time
if seconds_elapsed >= reporting_interval:
res_json["timestamp"] = frame_timestamp.isoformat()
- res_json["frame_id"] = int(frameid)
+ res_json["frame_id"] = int(frameid)
res_json["inference_fps"] = frame_count / inf_seconds
start_time = timeit.default_timer()
report(res_json, frame)
@@ -172,8 +175,10 @@ def greengrass_object_detection_sample_ssd_run():
del exec_net
del plugin
+
greengrass_object_detection_sample_ssd_run()
+
def function_handler(event, context):
client.publish(topic=PARAM_TOPIC_NAME, payload='HANDLER_CALLED!')
return
diff --git a/inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/classification_demo.ipynb b/inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/classification_demo.ipynb
new file mode 100644
index 000000000..632672f96
--- /dev/null
+++ b/inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/classification_demo.ipynb
@@ -0,0 +1,463 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "This notebook demonstrates the worklflow of a simple image classification task.\n",
+ "We will go through all the pipeline steps: downloading the model, generating the Intermediate Representation (IR) using the Model Optimizer, running inference in Python, and parsing and interpretating the output results.\n",
+ "\n",
+ "To demonstrate the scenario, we will use the pre-trained SquezeNet V1.1 Caffe\\* model. SqueezeNet is a pretty accurate and at the same time lightweight network. For more information about the model, please visit <a href=\"https://github.com/DeepScale/SqueezeNet/\">GitHub</a> page and refer to original <a href=\"https://arxiv.org/abs/1602.07360\">SqueezeNet paper</a>.\n",
+ "\n",
+ "Follow the steps to perform image classification with the SquezeNet V1.1 model:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**1. Download the model files:** "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%bash\n",
+ "echo \"Downloading deploy.protxt ...\"\n",
+ "if [ -f deploy.prototxt ]; then \n",
+ " echo \"deploy.protxt file already exists. Downloading skipped\"\n",
+ "else\n",
+ " wget https://raw.githubusercontent.com/DeepScale/SqueezeNet/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/deploy.prototxt -q\n",
+ " echo \"Finished!\"\n",
+ "fi"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%bash\n",
+ "! echo \"Downloading squeezenet_v1.1.caffemodel ...\"\n",
+ "if [ -f squeezenet_v1.1.caffemodel ]; then\n",
+ " echo \"squeezenet_v1.1.caffemodel file already exists. Download skipped\"\n",
+ "else\n",
+ " wget https://github.com/DeepScale/SqueezeNet/raw/a47b6f13d30985279789d08053d37013d67d131b/SqueezeNet_v1.1/squeezenet_v1.1.caffemodel -q\n",
+ " echo \"Finished!\"\n",
+ "fi"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**Run the following command to see the model files:**"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!ls -la"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "* `deploy.prototxt` contains the network toplogy description in text format. \n",
+ "* `squeezenet_v1.1.caffemodel` contains weights for all network layers"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**2. Optimize and convert the model from intial Caffe representation to the IR representation, which is required for scoring the model using Inference Engine. To convert and optimize the model, use the Model Optimizer command line tool.**\n",
+ "\n",
+ "To locate Model Optimizer scripts, specify the path to the Model Optimizer root directory in the `MO_ROOT` variable in the cell bellow and then run it (If you use the installed OpenVINO&trade; package, you can find the Model Optimizer in `<INSTALLATION_ROOT_DIR>/deployment_tools/model_optimizer`)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%%bash\n",
+ "MO_ROOT=/localdisk/repos/model-optimizer-tensorflow/\n",
+ "echo $MO_ROOT\n",
+ "python3 $MO_ROOT/mo.py --input_model squeezenet_v1.1.caffemodel --input_proto deploy.prototxt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**3. Now, you have the SqueezeNet model converted to the IR, and you can infer it.**\n",
+ "\n",
+ "a. First, import required modules:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from openvino.inference_engine import IENetwork, IEPlugin\n",
+ "import numpy as np\n",
+ "import cv2\n",
+ "import logging as log\n",
+ "from time import time\n",
+ "import sys\n",
+ "import glob\n",
+ "import os\n",
+ "from matplotlib import pyplot as plt\n",
+ "%matplotlib inline"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "b. Initialize required constants:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Configure logging format\n",
+ "log.basicConfig(format=\"[ %(levelname)s ] %(message)s\", level=log.INFO, stream=sys.stdout)\n",
+ "\n",
+ "# Path to IR model files\n",
+ "MODEL_XML = \"./squeezenet_v1.1.xml\"\n",
+ "MODEL_BIN = \"./squeezenet_v1.1.bin\"\n",
+ "\n",
+ "# Target device to run inference\n",
+ "TARGET_DEVICE = \"CPU\"\n",
+ "\n",
+ "# Folder with input images for the model\n",
+ "IMAGES_FOLDER = \"./images\"\n",
+ "\n",
+ "# File containing information about classes names \n",
+ "LABELS_FILE = \"./image_net_synset.txt\"\n",
+ "\n",
+ "# Number of top prediction results to parse\n",
+ "NTOP = 5\n",
+ "\n",
+ "# Required batch size - number of images which will be processed in parallel\n",
+ "BATCH = 4"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "c. Create a plugin instance for the specified target device \n",
+ "d. Read the IR files and create an `IENEtwork` instance"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plugin = IEPlugin(TARGET_DEVICE)\n",
+ "net = IENetwork(model=MODEL_XML, weights=MODEL_BIN)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "e. Set the network batch size to the constatns specified above. \n",
+ "\n",
+ "Batch size is an \"amount\" of input data that will be infered in parallel. In this cases it is a number of images, which will be classified in parallel. \n",
+ "\n",
+ "You can set the network batch size using one of the following options:\n",
+ "1. On the IR generation stage, run the Model Optimizer with `-b` command line option. For example, to generate the IR with batch size equal to 4, add `-b 4` to Model Optimizer command line options. By default, it takes the batch size from the original network in framework representation (usually, it is equal to 1, but in this case, the original Caffe model is provided with the batch size equal to 10). \n",
+ "2. Use Inference Engine after reading IR. We will use this option.\n",
+ "\n",
+ "To set the batch size with the Inference Engine:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "log.info(\"Current network batch size is {}, will be changed to {}\".format(net.batch_size, BATCH))\n",
+ "net.batch_size = BATCH"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "f. After setting batch size, you can get required information about network input layers.\n",
+ "To preprocess input images, you need to know input layer shape.\n",
+ "\n",
+ "`inputs` property of `IENetwork` returns the dicitonary with input layer names and `InputInfo` objects, which contain information about an input layer including its shape.\n",
+ "\n",
+ "SqueezeNet is a single-input toplogy, so to get the input layer name and its shape, you can get the first item from the `inputs` dictionary:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "input_layer = next(iter(net.inputs))\n",
+ "n,c,h,w = net.inputs[input_layer].shape\n",
+ "layout = net.inputs[input_layer].layout\n",
+ "log.info(\"Network input layer {} has shape {} and layout {}\".format(input_layer, (n,c,h,w), layout))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "So what do the shape and layout mean? \n",
+ "Layout will helps to interprete the shape dimsesnions meaning. \n",
+ "\n",
+ "`NCHW` input layer layout means:\n",
+ "* the fisrt dimension of an input data is a batch of **N** images processed in parallel \n",
+ "* the second dimension is a numnber of **C**hannels expected in the input images\n",
+ "* the third and the forth are a spatial dimensions - **H**eight and **W**idth of an input image\n",
+ "\n",
+ "Our shapes means that the network expects four 3-channel images running in parallel with size 227x227."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "g. Read and preprocess input images.\n",
+ "\n",
+ "For it, go to `IMAGES_FOLDER`, find all `.bmp` files, and take four images for inference:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "search_pattern = os.path.join(IMAGES_FOLDER, \"*.bmp\")\n",
+ "images = glob.glob(search_pattern)[:BATCH]\n",
+ "log.info(\"Input images:\\n {}\".format(\"\\n\".join(images)))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Now you can read and preprocess the image files and create an array with input blob data.\n",
+ "\n",
+ "For preprocessing, you must do the following:\n",
+ "1. Resize the images to fit the HxW input dimenstions.\n",
+ "2. Transpose the HWC layout.\n",
+ "\n",
+ "Transposing is tricky and not really obvious.\n",
+ "As you alredy saw above, the network has the `NCHW` layout, so each input image should be in `CHW` format. But by deafult, OpenCV\\* reads images in the `HWC` format. That is why you have to swap the axes using the `numpy.transpose()` function:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "input_data = np.ndarray(shape=(n, c, h, w))\n",
+ "orig_images = [] # Will be used to show image in notebook\n",
+ "for i, img in enumerate(images):\n",
+ " image = cv2.imread(img)\n",
+ " orig_images.append(image)\n",
+ " if image.shape[:-1] != (h, w):\n",
+ " log.warning(\"Image {} is resized from {} to {}\".format(img, image.shape[:-1], (h, w)))\n",
+ " image = cv2.resize(image, (w, h))\n",
+ " image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n",
+ " input_data[i] = image"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "i. Infer the model model to classify input images:\n",
+ "\n",
+ "1. Load the `IENetwork` object to the plugin to create `ExectuableNEtwork` object. \n",
+ "2. Start inference using the `infer()` function specifying dictionary with input layer name and prepared data as an argument for the function. \n",
+ "3. Measure inference time in miliseconds and calculate throughput metric in frames-per-second (FPS)."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "exec_net = plugin.load(net)\n",
+ "t0 = time()\n",
+ "res_map = exec_net.infer({input_layer: input_data})\n",
+ "inf_time = (time() - t0) * 1000 \n",
+ "fps = BATCH * inf_time \n",
+ "log.info(\"Inference time: {} ms.\".format(inf_time))\n",
+ "log.info(\"Throughput: {} fps.\".format(fps))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "**4. After the inference, you need to parse and interpretate the inference results.**\n",
+ "\n",
+ "First, you need to see the shape of the network output layer. It can be done in similar way as for the inputs, but here you need to call `outputs` property of `IENetwork` object:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "output_layer = next(iter(net.outputs))\n",
+ "n,c,h,w = net.outputs[output_layer].shape\n",
+ "layout = net.outputs[output_layer].layout\n",
+ "log.info(\"Network output layer {} has shape {} and layout {}\".format(output_layer, (n,c,h,w), layout))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "It is not a common case for classification netowrks to have output layer with *NCHW* layout. Usually, it is just *NC*. However, in this case, the last two dimensions are just a feature of the network and do not have much sense. Ignore them as you will remove them on the final parsing stage. \n",
+ "\n",
+ "What are the first and second dimensions of the output layer? \n",
+ "* The first dimension is a batch. We precoessed four images, and the prediction result for a particular image is stored in the first dimension of the output array. For example, prediction results for the third image is `res[2]` (since numeration starts from 0).\n",
+ "* The second dimension is an array with normalized probabilities (from 0 to 1) for each class. This network is trained using the <a href=\"http://image-net.org/index\">ImageNet</a> dataset with 1000 classes. Each `n`-th value in the output data for a certain image represent the probability of the image belonging to the `n`-th class. "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "To parse the output results:\n",
+ "\n",
+ "a. Read the `LABELS_FILE`, which maps the class ID to human-readable class names:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "with open(LABELS_FILE, 'r') as f:\n",
+ " labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "b. Parse the output array with prediction results. The parsing algorith is the following:\n",
+ "0. Squeeze the last two \"extra\" dimensions of the output data.\n",
+ "1. Iterate over all batches.\n",
+ "2. Sort the probabilities vector descendingly to get `NTOP` classes with the highest probabilities (by default, the `numpy.argsort` sorts the data in the ascending order, but using the array slicing `[::-1]`, you can reverse the data order).\n",
+ "3. Map the `NTOP` probabilities to the corresponding labeles in `labeles_map`.\n",
+ "\n",
+ "For the vizualization, you also need to store top-1 class and probability."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "top1_res = [] # will be used for the visualization\n",
+ "res = np.squeeze(res_map[output_layer])\n",
+ "log.info(\"Top {} results: \".format(NTOP))\n",
+ "for i, probs in enumerate(res):\n",
+ " top_ind = np.argsort(probs)[-NTOP:][::-1]\n",
+ " print(\"Image {}\".format(images[i]))\n",
+ " top1_ind = top_ind[0]\n",
+ " top1_res.append((labels_map[top1_ind], probs[top1_ind]))\n",
+ " for id in top_ind:\n",
+ " print(\"label: {} probability: {:.2f}% \".format(labels_map[id], probs[id] * 100))\n",
+ " print(\"\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "The code above prints the results as plain text. \n",
+ "You can also use OpenCV\\* to visualize the results using the `orig_images` and `top1_res` variables, which you created during images reading and results parsing:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "plt.clf()\n",
+ "for i, img in enumerate(orig_images):\n",
+ " label_str = \"{}\".format(top1_res[i][0].split(',')[0])\n",
+ " prob_str = \"{:.2f}%\".format(top1_res[i][1])\n",
+ " cv2.putText(img, label_str, (5, 15), cv2.FONT_HERSHEY_COMPLEX, 0.6, (220,100,10), 1)\n",
+ " cv2.putText(img, prob_str, (5, 35), cv2.FONT_HERSHEY_COMPLEX, 0.6, (220,100,10), 1)\n",
+ " plt.figure()\n",
+ " plt.axis(\"off\")\n",
+ " \n",
+ " # We have to convert colors, because matplotlib expects an image in RGB color format \n",
+ " # but by default, the OpenCV read images in BRG format\n",
+ " im_to_show = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
+ " plt.imshow(im_to_show)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.6.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/image_net_synset.txt b/inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/image_net_synset.txt
new file mode 100644
index 000000000..a9e8c7f50
--- /dev/null
+++ b/inference-engine/ie_bridges/python/sample/jupyter_notebooks/classification_demo/image_net_synset.txt
@@ -0,0 +1,1000 @@
+n01440764 tench, Tinca tinca
+n01443537 goldfish, Carassius auratus
+n01484850 great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias
+n01491361 tiger shark, Galeocerdo cuvieri
+n01494475 hammerhead, hammerhead shark
+n01496331 electric ray, crampfish, numbfish, torpedo
+n01498041 stingray
+n01514668 cock
+n01514859 hen
+n01518878 ostrich, Struthio camelus
+n01530575 brambling, Fringilla montifringilla
+n01531178 goldfinch, Carduelis carduelis
+n01532829 house finch, linnet, Carpodacus mexicanus
+n01534433 junco, snowbird
+n01537544 indigo bunting, indigo finch, indigo bird, Passerina cyanea
+n01558993 robin, American robin, Turdus migratorius
+n01560419 bulbul
+n01580077 jay
+n01582220 magpie
+n01592084 chickadee
+n01601694 water ouzel, dipper
+n01608432 kite
+n01614925 bald eagle, American eagle, Haliaeetus leucocephalus
+n01616318 vulture
+n01622779 great grey owl, great gray owl, Strix nebulosa
+n01629819 European fire salamander, Salamandra salamandra
+n01630670 common newt, Triturus vulgaris
+n01631663 eft
+n01632458 spotted salamander, Ambystoma maculatum
+n01632777 axolotl, mud puppy, Ambystoma mexicanum
+n01641577 bullfrog, Rana catesbeiana
+n01644373 tree frog, tree-frog
+n01644900 tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
+n01664065 loggerhead, loggerhead turtle, Caretta caretta
+n01665541 leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
+n01667114 mud turtle
+n01667778 terrapin
+n01669191 box turtle, box tortoise
+n01675722 banded gecko
+n01677366 common iguana, iguana, Iguana iguana
+n01682714 American chameleon, anole, Anolis carolinensis
+n01685808 whiptail, whiptail lizard
+n01687978 agama
+n01688243 frilled lizard, Chlamydosaurus kingi
+n01689811 alligator lizard
+n01692333 Gila monster, Heloderma suspectum
+n01693334 green lizard, Lacerta viridis
+n01694178 African chameleon, Chamaeleo chamaeleon
+n01695060 Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis
+n01697457 African crocodile, Nile crocodile, Crocodylus niloticus
+n01698640 American alligator, Alligator mississipiensis
+n01704323 triceratops
+n01728572 thunder snake, worm snake, Carphophis amoenus
+n01728920 ringneck snake, ring-necked snake, ring snake
+n01729322 hognose snake, puff adder, sand viper
+n01729977 green snake, grass snake
+n01734418 king snake, kingsnake
+n01735189 garter snake, grass snake
+n01737021 water snake
+n01739381 vine snake
+n01740131 night snake, Hypsiglena torquata
+n01742172 boa constrictor, Constrictor constrictor
+n01744401 rock python, rock snake, Python sebae
+n01748264 Indian cobra, Naja naja
+n01749939 green mamba
+n01751748 sea snake
+n01753488 horned viper, cerastes, sand viper, horned asp, Cerastes cornutus
+n01755581 diamondback, diamondback rattlesnake, Crotalus adamanteus
+n01756291 sidewinder, horned rattlesnake, Crotalus cerastes
+n01768244 trilobite
+n01770081 harvestman, daddy longlegs, Phalangium opilio
+n01770393 scorpion
+n01773157 black and gold garden spider, Argiope aurantia
+n01773549 barn spider, Araneus cavaticus
+n01773797 garden spider, Aranea diademata
+n01774384 black widow, Latrodectus mactans
+n01774750 tarantula
+n01775062 wolf spider, hunting spider
+n01776313 tick
+n01784675 centipede
+n01795545 black grouse
+n01796340 ptarmigan
+n01797886 ruffed grouse, partridge, Bonasa umbellus
+n01798484 prairie chicken, prairie grouse, prairie fowl
+n01806143 peacock
+n01806567 quail
+n01807496 partridge
+n01817953 African grey, African gray, Psittacus erithacus
+n01818515 macaw
+n01819313 sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita
+n01820546 lorikeet
+n01824575 coucal
+n01828970 bee eater
+n01829413 hornbill
+n01833805 hummingbird
+n01843065 jacamar
+n01843383 toucan
+n01847000 drake
+n01855032 red-breasted merganser, Mergus serrator
+n01855672 goose
+n01860187 black swan, Cygnus atratus
+n01871265 tusker
+n01872401 echidna, spiny anteater, anteater
+n01873310 platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus
+n01877812 wallaby, brush kangaroo
+n01882714 koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus
+n01883070 wombat
+n01910747 jellyfish
+n01914609 sea anemone, anemone
+n01917289 brain coral
+n01924916 flatworm, platyhelminth
+n01930112 nematode, nematode worm, roundworm
+n01943899 conch
+n01944390 snail
+n01945685 slug
+n01950731 sea slug, nudibranch
+n01955084 chiton, coat-of-mail shell, sea cradle, polyplacophore
+n01968897 chambered nautilus, pearly nautilus, nautilus
+n01978287 Dungeness crab, Cancer magister
+n01978455 rock crab, Cancer irroratus
+n01980166 fiddler crab
+n01981276 king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica
+n01983481 American lobster, Northern lobster, Maine lobster, Homarus americanus
+n01984695 spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish
+n01985128 crayfish, crawfish, crawdad, crawdaddy
+n01986214 hermit crab
+n01990800 isopod
+n02002556 white stork, Ciconia ciconia
+n02002724 black stork, Ciconia nigra
+n02006656 spoonbill
+n02007558 flamingo
+n02009229 little blue heron, Egretta caerulea
+n02009912 American egret, great white heron, Egretta albus
+n02011460 bittern
+n02012849 crane
+n02013706 limpkin, Aramus pictus
+n02017213 European gallinule, Porphyrio porphyrio
+n02018207 American coot, marsh hen, mud hen, water hen, Fulica americana
+n02018795 bustard
+n02025239 ruddy turnstone, Arenaria interpres
+n02027492 red-backed sandpiper, dunlin, Erolia alpina
+n02028035 redshank, Tringa totanus
+n02033041 dowitcher
+n02037110 oystercatcher, oyster catcher
+n02051845 pelican
+n02056570 king penguin, Aptenodytes patagonica
+n02058221 albatross, mollymawk
+n02066245 grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus
+n02071294 killer whale, killer, orca, grampus, sea wolf, Orcinus orca
+n02074367 dugong, Dugong dugon
+n02077923 sea lion
+n02085620 Chihuahua
+n02085782 Japanese spaniel
+n02085936 Maltese dog, Maltese terrier, Maltese
+n02086079 Pekinese, Pekingese, Peke
+n02086240 Shih-Tzu
+n02086646 Blenheim spaniel
+n02086910 papillon
+n02087046 toy terrier
+n02087394 Rhodesian ridgeback
+n02088094 Afghan hound, Afghan
+n02088238 basset, basset hound
+n02088364 beagle
+n02088466 bloodhound, sleuthhound
+n02088632 bluetick
+n02089078 black-and-tan coonhound
+n02089867 Walker hound, Walker foxhound
+n02089973 English foxhound
+n02090379 redbone
+n02090622 borzoi, Russian wolfhound
+n02090721 Irish wolfhound
+n02091032 Italian greyhound
+n02091134 whippet
+n02091244 Ibizan hound, Ibizan Podenco
+n02091467 Norwegian elkhound, elkhound
+n02091635 otterhound, otter hound
+n02091831 Saluki, gazelle hound
+n02092002 Scottish deerhound, deerhound
+n02092339 Weimaraner
+n02093256 Staffordshire bullterrier, Staffordshire bull terrier
+n02093428 American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier
+n02093647 Bedlington terrier
+n02093754 Border terrier
+n02093859 Kerry blue terrier
+n02093991 Irish terrier
+n02094114 Norfolk terrier
+n02094258 Norwich terrier
+n02094433 Yorkshire terrier
+n02095314 wire-haired fox terrier
+n02095570 Lakeland terrier
+n02095889 Sealyham terrier, Sealyham
+n02096051 Airedale, Airedale terrier
+n02096177 cairn, cairn terrier
+n02096294 Australian terrier
+n02096437 Dandie Dinmont, Dandie Dinmont terrier
+n02096585 Boston bull, Boston terrier
+n02097047 miniature schnauzer
+n02097130 giant schnauzer
+n02097209 standard schnauzer
+n02097298 Scotch terrier, Scottish terrier, Scottie
+n02097474 Tibetan terrier, chrysanthemum dog
+n02097658 silky terrier, Sydney silky
+n02098105 soft-coated wheaten terrier
+n02098286 West Highland white terrier
+n02098413 Lhasa, Lhasa apso
+n02099267 flat-coated retriever
+n02099429 curly-coated retriever
+n02099601 golden retriever
+n02099712 Labrador retriever
+n02099849 Chesapeake Bay retriever
+n02100236 German short-haired pointer
+n02100583 vizsla, Hungarian pointer
+n02100735 English setter
+n02100877 Irish setter, red setter
+n02101006 Gordon setter
+n02101388 Brittany spaniel
+n02101556 clumber, clumber spaniel
+n02102040 English springer, English springer spaniel
+n02102177 Welsh springer spaniel
+n02102318 cocker spaniel, English cocker spaniel, cocker
+n02102480 Sussex spaniel
+n02102973 Irish water spaniel
+n02104029 kuvasz
+n02104365 schipperke
+n02105056 groenendael
+n02105162 malinois
+n02105251 briard
+n02105412 kelpie
+n02105505 komondor
+n02105641 Old English sheepdog, bobtail
+n02105855 Shetland sheepdog, Shetland sheep dog, Shetland
+n02106030 collie
+n02106166 Border collie
+n02106382 Bouvier des Flandres, Bouviers des Flandres
+n02106550 Rottweiler
+n02106662 German shepherd, German shepherd dog, German police dog, alsatian
+n02107142 Doberman, Doberman pinscher
+n02107312 miniature pinscher
+n02107574 Greater Swiss Mountain dog
+n02107683 Bernese mountain dog
+n02107908 Appenzeller
+n02108000 EntleBucher
+n02108089 boxer
+n02108422 bull mastiff
+n02108551 Tibetan mastiff
+n02108915 French bulldog
+n02109047 Great Dane
+n02109525 Saint Bernard, St Bernard
+n02109961 Eskimo dog, husky
+n02110063 malamute, malemute, Alaskan malamute
+n02110185 Siberian husky
+n02110341 dalmatian, coach dog, carriage dog
+n02110627 affenpinscher, monkey pinscher, monkey dog
+n02110806 basenji
+n02110958 pug, pug-dog
+n02111129 Leonberg
+n02111277 Newfoundland, Newfoundland dog
+n02111500 Great Pyrenees
+n02111889 Samoyed, Samoyede
+n02112018 Pomeranian
+n02112137 chow, chow chow
+n02112350 keeshond
+n02112706 Brabancon griffon
+n02113023 Pembroke, Pembroke Welsh corgi
+n02113186 Cardigan, Cardigan Welsh corgi
+n02113624 toy poodle
+n02113712 miniature poodle
+n02113799 standard poodle
+n02113978 Mexican hairless
+n02114367 timber wolf, grey wolf, gray wolf, Canis lupus
+n02114548 white wolf, Arctic wolf, Canis lupus tundrarum
+n02114712 red wolf, maned wolf, Canis rufus, Canis niger
+n02114855 coyote, prairie wolf, brush wolf, Canis latrans
+n02115641 dingo, warrigal, warragal, Canis dingo
+n02115913 dhole, Cuon alpinus
+n02116738 African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus
+n02117135 hyena, hyaena
+n02119022 red fox, Vulpes vulpes
+n02119789 kit fox, Vulpes macrotis
+n02120079 Arctic fox, white fox, Alopex lagopus
+n02120505 grey fox, gray fox, Urocyon cinereoargenteus
+n02123045 tabby, tabby cat
+n02123159 tiger cat
+n02123394 Persian cat
+n02123597 Siamese cat, Siamese
+n02124075 Egyptian cat
+n02125311 cougar, puma, catamount, mountain lion, painter, panther, Felis concolor
+n02127052 lynx, catamount
+n02128385 leopard, Panthera pardus
+n02128757 snow leopard, ounce, Panthera uncia
+n02128925 jaguar, panther, Panthera onca, Felis onca
+n02129165 lion, king of beasts, Panthera leo
+n02129604 tiger, Panthera tigris
+n02130308 cheetah, chetah, Acinonyx jubatus
+n02132136 brown bear, bruin, Ursus arctos
+n02133161 American black bear, black bear, Ursus americanus, Euarctos americanus
+n02134084 ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus
+n02134418 sloth bear, Melursus ursinus, Ursus ursinus
+n02137549 mongoose
+n02138441 meerkat, mierkat
+n02165105 tiger beetle
+n02165456 ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle
+n02167151 ground beetle, carabid beetle
+n02168699 long-horned beetle, longicorn, longicorn beetle
+n02169497 leaf beetle, chrysomelid
+n02172182 dung beetle
+n02174001 rhinoceros beetle
+n02177972 weevil
+n02190166 fly
+n02206856 bee
+n02219486 ant, emmet, pismire
+n02226429 grasshopper, hopper
+n02229544 cricket
+n02231487 walking stick, walkingstick, stick insect
+n02233338 cockroach, roach
+n02236044 mantis, mantid
+n02256656 cicada, cicala
+n02259212 leafhopper
+n02264363 lacewing, lacewing fly
+n02268443 dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk
+n02268853 damselfly
+n02276258 admiral
+n02277742 ringlet, ringlet butterfly
+n02279972 monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
+n02280649 cabbage butterfly
+n02281406 sulphur butterfly, sulfur butterfly
+n02281787 lycaenid, lycaenid butterfly
+n02317335 starfish, sea star
+n02319095 sea urchin
+n02321529 sea cucumber, holothurian
+n02325366 wood rabbit, cottontail, cottontail rabbit
+n02326432 hare
+n02328150 Angora, Angora rabbit
+n02342885 hamster
+n02346627 porcupine, hedgehog
+n02356798 fox squirrel, eastern fox squirrel, Sciurus niger
+n02361337 marmot
+n02363005 beaver
+n02364673 guinea pig, Cavia cobaya
+n02389026 sorrel
+n02391049 zebra
+n02395406 hog, pig, grunter, squealer, Sus scrofa
+n02396427 wild boar, boar, Sus scrofa
+n02397096 warthog
+n02398521 hippopotamus, hippo, river horse, Hippopotamus amphibius
+n02403003 ox
+n02408429 water buffalo, water ox, Asiatic buffalo, Bubalus bubalis
+n02410509 bison
+n02412080 ram, tup
+n02415577 bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis
+n02417914 ibex, Capra ibex
+n02422106 hartebeest
+n02422699 impala, Aepyceros melampus
+n02423022 gazelle
+n02437312 Arabian camel, dromedary, Camelus dromedarius
+n02437616 llama
+n02441942 weasel
+n02442845 mink
+n02443114 polecat, fitch, foulmart, foumart, Mustela putorius
+n02443484 black-footed ferret, ferret, Mustela nigripes
+n02444819 otter
+n02445715 skunk, polecat, wood pussy
+n02447366 badger
+n02454379 armadillo
+n02457408 three-toed sloth, ai, Bradypus tridactylus
+n02480495 orangutan, orang, orangutang, Pongo pygmaeus
+n02480855 gorilla, Gorilla gorilla
+n02481823 chimpanzee, chimp, Pan troglodytes
+n02483362 gibbon, Hylobates lar
+n02483708 siamang, Hylobates syndactylus, Symphalangus syndactylus
+n02484975 guenon, guenon monkey
+n02486261 patas, hussar monkey, Erythrocebus patas
+n02486410 baboon
+n02487347 macaque
+n02488291 langur
+n02488702 colobus, colobus monkey
+n02489166 proboscis monkey, Nasalis larvatus
+n02490219 marmoset
+n02492035 capuchin, ringtail, Cebus capucinus
+n02492660 howler monkey, howler
+n02493509 titi, titi monkey
+n02493793 spider monkey, Ateles geoffroyi
+n02494079 squirrel monkey, Saimiri sciureus
+n02497673 Madagascar cat, ring-tailed lemur, Lemur catta
+n02500267 indri, indris, Indri indri, Indri brevicaudatus
+n02504013 Indian elephant, Elephas maximus
+n02504458 African elephant, Loxodonta africana
+n02509815 lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens
+n02510455 giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca
+n02514041 barracouta, snoek
+n02526121 eel
+n02536864 coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch
+n02606052 rock beauty, Holocanthus tricolor
+n02607072 anemone fish
+n02640242 sturgeon
+n02641379 gar, garfish, garpike, billfish, Lepisosteus osseus
+n02643566 lionfish
+n02655020 puffer, pufferfish, blowfish, globefish
+n02666196 abacus
+n02667093 abaya
+n02669723 academic gown, academic robe, judge's robe
+n02672831 accordion, piano accordion, squeeze box
+n02676566 acoustic guitar
+n02687172 aircraft carrier, carrier, flattop, attack aircraft carrier
+n02690373 airliner
+n02692877 airship, dirigible
+n02699494 altar
+n02701002 ambulance
+n02704792 amphibian, amphibious vehicle
+n02708093 analog clock
+n02727426 apiary, bee house
+n02730930 apron
+n02747177 ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin
+n02749479 assault rifle, assault gun
+n02769748 backpack, back pack, knapsack, packsack, rucksack, haversack
+n02776631 bakery, bakeshop, bakehouse
+n02777292 balance beam, beam
+n02782093 balloon
+n02783161 ballpoint, ballpoint pen, ballpen, Biro
+n02786058 Band Aid
+n02787622 banjo
+n02788148 bannister, banister, balustrade, balusters, handrail
+n02790996 barbell
+n02791124 barber chair
+n02791270 barbershop
+n02793495 barn
+n02794156 barometer
+n02795169 barrel, cask
+n02797295 barrow, garden cart, lawn cart, wheelbarrow
+n02799071 baseball
+n02802426 basketball
+n02804414 bassinet
+n02804610 bassoon
+n02807133 bathing cap, swimming cap
+n02808304 bath towel
+n02808440 bathtub, bathing tub, bath, tub
+n02814533 beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
+n02814860 beacon, lighthouse, beacon light, pharos
+n02815834 beaker
+n02817516 bearskin, busby, shako
+n02823428 beer bottle
+n02823750 beer glass
+n02825657 bell cote, bell cot
+n02834397 bib
+n02835271 bicycle-built-for-two, tandem bicycle, tandem
+n02837789 bikini, two-piece
+n02840245 binder, ring-binder
+n02841315 binoculars, field glasses, opera glasses
+n02843684 birdhouse
+n02859443 boathouse
+n02860847 bobsled, bobsleigh, bob
+n02865351 bolo tie, bolo, bola tie, bola
+n02869837 bonnet, poke bonnet
+n02870880 bookcase
+n02871525 bookshop, bookstore, bookstall
+n02877765 bottlecap
+n02879718 bow
+n02883205 bow tie, bow-tie, bowtie
+n02892201 brass, memorial tablet, plaque
+n02892767 brassiere, bra, bandeau
+n02894605 breakwater, groin, groyne, mole, bulwark, seawall, jetty
+n02895154 breastplate, aegis, egis
+n02906734 broom
+n02909870 bucket, pail
+n02910353 buckle
+n02916936 bulletproof vest
+n02917067 bullet train, bullet
+n02927161 butcher shop, meat market
+n02930766 cab, hack, taxi, taxicab
+n02939185 caldron, cauldron
+n02948072 candle, taper, wax light
+n02950826 cannon
+n02951358 canoe
+n02951585 can opener, tin opener
+n02963159 cardigan
+n02965783 car mirror
+n02966193 carousel, carrousel, merry-go-round, roundabout, whirligig
+n02966687 carpenter's kit, tool kit
+n02971356 carton
+n02974003 car wheel
+n02977058 cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM
+n02978881 cassette
+n02979186 cassette player
+n02980441 castle
+n02981792 catamaran
+n02988304 CD player
+n02992211 cello, violoncello
+n02992529 cellular telephone, cellular phone, cellphone, cell, mobile phone
+n02999410 chain
+n03000134 chainlink fence
+n03000247 chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour
+n03000684 chain saw, chainsaw
+n03014705 chest
+n03016953 chiffonier, commode
+n03017168 chime, bell, gong
+n03018349 china cabinet, china closet
+n03026506 Christmas stocking
+n03028079 church, church building
+n03032252 cinema, movie theater, movie theatre, movie house, picture palace
+n03041632 cleaver, meat cleaver, chopper
+n03042490 cliff dwelling
+n03045698 cloak
+n03047690 clog, geta, patten, sabot
+n03062245 cocktail shaker
+n03063599 coffee mug
+n03063689 coffeepot
+n03065424 coil, spiral, volute, whorl, helix
+n03075370 combination lock
+n03085013 computer keyboard, keypad
+n03089624 confectionery, confectionary, candy store
+n03095699 container ship, containership, container vessel
+n03100240 convertible
+n03109150 corkscrew, bottle screw
+n03110669 cornet, horn, trumpet, trump
+n03124043 cowboy boot
+n03124170 cowboy hat, ten-gallon hat
+n03125729 cradle
+n03126707 crane
+n03127747 crash helmet
+n03127925 crate
+n03131574 crib, cot
+n03133878 Crock Pot
+n03134739 croquet ball
+n03141823 crutch
+n03146219 cuirass
+n03160309 dam, dike, dyke
+n03179701 desk
+n03180011 desktop computer
+n03187595 dial telephone, dial phone
+n03188531 diaper, nappy, napkin
+n03196217 digital clock
+n03197337 digital watch
+n03201208 dining table, board
+n03207743 dishrag, dishcloth
+n03207941 dishwasher, dish washer, dishwashing machine
+n03208938 disk brake, disc brake
+n03216828 dock, dockage, docking facility
+n03218198 dogsled, dog sled, dog sleigh
+n03220513 dome
+n03223299 doormat, welcome mat
+n03240683 drilling platform, offshore rig
+n03249569 drum, membranophone, tympan
+n03250847 drumstick
+n03255030 dumbbell
+n03259280 Dutch oven
+n03271574 electric fan, blower
+n03272010 electric guitar
+n03272562 electric locomotive
+n03290653 entertainment center
+n03291819 envelope
+n03297495 espresso maker
+n03314780 face powder
+n03325584 feather boa, boa
+n03337140 file, file cabinet, filing cabinet
+n03344393 fireboat
+n03345487 fire engine, fire truck
+n03347037 fire screen, fireguard
+n03355925 flagpole, flagstaff
+n03372029 flute, transverse flute
+n03376595 folding chair
+n03379051 football helmet
+n03384352 forklift
+n03388043 fountain
+n03388183 fountain pen
+n03388549 four-poster
+n03393912 freight car
+n03394916 French horn, horn
+n03400231 frying pan, frypan, skillet
+n03404251 fur coat
+n03417042 garbage truck, dustcart
+n03424325 gasmask, respirator, gas helmet
+n03425413 gas pump, gasoline pump, petrol pump, island dispenser
+n03443371 goblet
+n03444034 go-kart
+n03445777 golf ball
+n03445924 golfcart, golf cart
+n03447447 gondola
+n03447721 gong, tam-tam
+n03450230 gown
+n03452741 grand piano, grand
+n03457902 greenhouse, nursery, glasshouse
+n03459775 grille, radiator grille
+n03461385 grocery store, grocery, food market, market
+n03467068 guillotine
+n03476684 hair slide
+n03476991 hair spray
+n03478589 half track
+n03481172 hammer
+n03482405 hamper
+n03483316 hand blower, blow dryer, blow drier, hair dryer, hair drier
+n03485407 hand-held computer, hand-held microcomputer
+n03485794 handkerchief, hankie, hanky, hankey
+n03492542 hard disc, hard disk, fixed disk
+n03494278 harmonica, mouth organ, harp, mouth harp
+n03495258 harp
+n03496892 harvester, reaper
+n03498962 hatchet
+n03527444 holster
+n03529860 home theater, home theatre
+n03530642 honeycomb
+n03532672 hook, claw
+n03534580 hoopskirt, crinoline
+n03535780 horizontal bar, high bar
+n03538406 horse cart, horse-cart
+n03544143 hourglass
+n03584254 iPod
+n03584829 iron, smoothing iron
+n03590841 jack-o'-lantern
+n03594734 jean, blue jean, denim
+n03594945 jeep, landrover
+n03595614 jersey, T-shirt, tee shirt
+n03598930 jigsaw puzzle
+n03599486 jinrikisha, ricksha, rickshaw
+n03602883 joystick
+n03617480 kimono
+n03623198 knee pad
+n03627232 knot
+n03630383 lab coat, laboratory coat
+n03633091 ladle
+n03637318 lampshade, lamp shade
+n03642806 laptop, laptop computer
+n03649909 lawn mower, mower
+n03657121 lens cap, lens cover
+n03658185 letter opener, paper knife, paperknife
+n03661043 library
+n03662601 lifeboat
+n03666591 lighter, light, igniter, ignitor
+n03670208 limousine, limo
+n03673027 liner, ocean liner
+n03676483 lipstick, lip rouge
+n03680355 Loafer
+n03690938 lotion
+n03691459 loudspeaker, speaker, speaker unit, loudspeaker system, speaker system
+n03692522 loupe, jeweler's loupe
+n03697007 lumbermill, sawmill
+n03706229 magnetic compass
+n03709823 mailbag, postbag
+n03710193 mailbox, letter box
+n03710637 maillot
+n03710721 maillot, tank suit
+n03717622 manhole cover
+n03720891 maraca
+n03721384 marimba, xylophone
+n03724870 mask
+n03729826 matchstick
+n03733131 maypole
+n03733281 maze, labyrinth
+n03733805 measuring cup
+n03742115 medicine chest, medicine cabinet
+n03743016 megalith, megalithic structure
+n03759954 microphone, mike
+n03761084 microwave, microwave oven
+n03763968 military uniform
+n03764736 milk can
+n03769881 minibus
+n03770439 miniskirt, mini
+n03770679 minivan
+n03773504 missile
+n03775071 mitten
+n03775546 mixing bowl
+n03776460 mobile home, manufactured home
+n03777568 Model T
+n03777754 modem
+n03781244 monastery
+n03782006 monitor
+n03785016 moped
+n03786901 mortar
+n03787032 mortarboard
+n03788195 mosque
+n03788365 mosquito net
+n03791053 motor scooter, scooter
+n03792782 mountain bike, all-terrain bike, off-roader
+n03792972 mountain tent
+n03793489 mouse, computer mouse
+n03794056 mousetrap
+n03796401 moving van
+n03803284 muzzle
+n03804744 nail
+n03814639 neck brace
+n03814906 necklace
+n03825788 nipple
+n03832673 notebook, notebook computer
+n03837869 obelisk
+n03838899 oboe, hautboy, hautbois
+n03840681 ocarina, sweet potato
+n03841143 odometer, hodometer, mileometer, milometer
+n03843555 oil filter
+n03854065 organ, pipe organ
+n03857828 oscilloscope, scope, cathode-ray oscilloscope, CRO
+n03866082 overskirt
+n03868242 oxcart
+n03868863 oxygen mask
+n03871628 packet
+n03873416 paddle, boat paddle
+n03874293 paddlewheel, paddle wheel
+n03874599 padlock
+n03876231 paintbrush
+n03877472 pajama, pyjama, pj's, jammies
+n03877845 palace
+n03884397 panpipe, pandean pipe, syrinx
+n03887697 paper towel
+n03888257 parachute, chute
+n03888605 parallel bars, bars
+n03891251 park bench
+n03891332 parking meter
+n03895866 passenger car, coach, carriage
+n03899768 patio, terrace
+n03902125 pay-phone, pay-station
+n03903868 pedestal, plinth, footstall
+n03908618 pencil box, pencil case
+n03908714 pencil sharpener
+n03916031 perfume, essence
+n03920288 Petri dish
+n03924679 photocopier
+n03929660 pick, plectrum, plectron
+n03929855 pickelhaube
+n03930313 picket fence, paling
+n03930630 pickup, pickup truck
+n03933933 pier
+n03935335 piggy bank, penny bank
+n03937543 pill bottle
+n03938244 pillow
+n03942813 ping-pong ball
+n03944341 pinwheel
+n03947888 pirate, pirate ship
+n03950228 pitcher, ewer
+n03954731 plane, carpenter's plane, woodworking plane
+n03956157 planetarium
+n03958227 plastic bag
+n03961711 plate rack
+n03967562 plow, plough
+n03970156 plunger, plumber's helper
+n03976467 Polaroid camera, Polaroid Land camera
+n03976657 pole
+n03977966 police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria
+n03980874 poncho
+n03982430 pool table, billiard table, snooker table
+n03983396 pop bottle, soda bottle
+n03991062 pot, flowerpot
+n03992509 potter's wheel
+n03995372 power drill
+n03998194 prayer rug, prayer mat
+n04004767 printer
+n04005630 prison, prison house
+n04008634 projectile, missile
+n04009552 projector
+n04019541 puck, hockey puck
+n04023962 punching bag, punch bag, punching ball, punchball
+n04026417 purse
+n04033901 quill, quill pen
+n04033995 quilt, comforter, comfort, puff
+n04037443 racer, race car, racing car
+n04039381 racket, racquet
+n04040759 radiator
+n04041544 radio, wireless
+n04044716 radio telescope, radio reflector
+n04049303 rain barrel
+n04065272 recreational vehicle, RV, R.V.
+n04067472 reel
+n04069434 reflex camera
+n04070727 refrigerator, icebox
+n04074963 remote control, remote
+n04081281 restaurant, eating house, eating place, eatery
+n04086273 revolver, six-gun, six-shooter
+n04090263 rifle
+n04099969 rocking chair, rocker
+n04111531 rotisserie
+n04116512 rubber eraser, rubber, pencil eraser
+n04118538 rugby ball
+n04118776 rule, ruler
+n04120489 running shoe
+n04125021 safe
+n04127249 safety pin
+n04131690 saltshaker, salt shaker
+n04133789 sandal
+n04136333 sarong
+n04141076 sax, saxophone
+n04141327 scabbard
+n04141975 scale, weighing machine
+n04146614 school bus
+n04147183 schooner
+n04149813 scoreboard
+n04152593 screen, CRT screen
+n04153751 screw
+n04154565 screwdriver
+n04162706 seat belt, seatbelt
+n04179913 sewing machine
+n04192698 shield, buckler
+n04200800 shoe shop, shoe-shop, shoe store
+n04201297 shoji
+n04204238 shopping basket
+n04204347 shopping cart
+n04208210 shovel
+n04209133 shower cap
+n04209239 shower curtain
+n04228054 ski
+n04229816 ski mask
+n04235860 sleeping bag
+n04238763 slide rule, slipstick
+n04239074 sliding door
+n04243546 slot, one-armed bandit
+n04251144 snorkel
+n04252077 snowmobile
+n04252225 snowplow, snowplough
+n04254120 soap dispenser
+n04254680 soccer ball
+n04254777 sock
+n04258138 solar dish, solar collector, solar furnace
+n04259630 sombrero
+n04263257 soup bowl
+n04264628 space bar
+n04265275 space heater
+n04266014 space shuttle
+n04270147 spatula
+n04273569 speedboat
+n04275548 spider web, spider's web
+n04277352 spindle
+n04285008 sports car, sport car
+n04286575 spotlight, spot
+n04296562 stage
+n04310018 steam locomotive
+n04311004 steel arch bridge
+n04311174 steel drum
+n04317175 stethoscope
+n04325704 stole
+n04326547 stone wall
+n04328186 stopwatch, stop watch
+n04330267 stove
+n04332243 strainer
+n04335435 streetcar, tram, tramcar, trolley, trolley car
+n04336792 stretcher
+n04344873 studio couch, day bed
+n04346328 stupa, tope
+n04347754 submarine, pigboat, sub, U-boat
+n04350905 suit, suit of clothes
+n04355338 sundial
+n04355933 sunglass
+n04356056 sunglasses, dark glasses, shades
+n04357314 sunscreen, sunblock, sun blocker
+n04366367 suspension bridge
+n04367480 swab, swob, mop
+n04370456 sweatshirt
+n04371430 swimming trunks, bathing trunks
+n04371774 swing
+n04372370 switch, electric switch, electrical switch
+n04376876 syringe
+n04380533 table lamp
+n04389033 tank, army tank, armored combat vehicle, armoured combat vehicle
+n04392985 tape player
+n04398044 teapot
+n04399382 teddy, teddy bear
+n04404412 television, television system
+n04409515 tennis ball
+n04417672 thatch, thatched roof
+n04418357 theater curtain, theatre curtain
+n04423845 thimble
+n04428191 thresher, thrasher, threshing machine
+n04429376 throne
+n04435653 tile roof
+n04442312 toaster
+n04443257 tobacco shop, tobacconist shop, tobacconist
+n04447861 toilet seat
+n04456115 torch
+n04458633 totem pole
+n04461696 tow truck, tow car, wrecker
+n04462240 toyshop
+n04465501 tractor
+n04467665 trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi
+n04476259 tray
+n04479046 trench coat
+n04482393 tricycle, trike, velocipede
+n04483307 trimaran
+n04485082 tripod
+n04486054 triumphal arch
+n04487081 trolleybus, trolley coach, trackless trolley
+n04487394 trombone
+n04493381 tub, vat
+n04501370 turnstile
+n04505470 typewriter keyboard
+n04507155 umbrella
+n04509417 unicycle, monocycle
+n04515003 upright, upright piano
+n04517823 vacuum, vacuum cleaner
+n04522168 vase
+n04523525 vault
+n04525038 velvet
+n04525305 vending machine
+n04532106 vestment
+n04532670 viaduct
+n04536866 violin, fiddle
+n04540053 volleyball
+n04542943 waffle iron
+n04548280 wall clock
+n04548362 wallet, billfold, notecase, pocketbook
+n04550184 wardrobe, closet, press
+n04552348 warplane, military plane
+n04553703 washbasin, handbasin, washbowl, lavabo, wash-hand basin
+n04554684 washer, automatic washer, washing machine
+n04557648 water bottle
+n04560804 water jug
+n04562935 water tower
+n04579145 whiskey jug
+n04579432 whistle
+n04584207 wig
+n04589890 window screen
+n04590129 window shade
+n04591157 Windsor tie
+n04591713 wine bottle
+n04592741 wing
+n04596742 wok
+n04597913 wooden spoon
+n04599235 wool, woolen, woollen
+n04604644 worm fence, snake fence, snake-rail fence, Virginia fence
+n04606251 wreck
+n04612504 yawl
+n04613696 yurt
+n06359193 web site, website, internet site, site
+n06596364 comic book
+n06785654 crossword puzzle, crossword
+n06794110 street sign
+n06874185 traffic light, traffic signal, stoplight
+n07248320 book jacket, dust cover, dust jacket, dust wrapper
+n07565083 menu
+n07579787 plate
+n07583066 guacamole
+n07584110 consomme
+n07590611 hot pot, hotpot
+n07613480 trifle
+n07614500 ice cream, icecream
+n07615774 ice lolly, lolly, lollipop, popsicle
+n07684084 French loaf
+n07693725 bagel, beigel
+n07695742 pretzel
+n07697313 cheeseburger
+n07697537 hotdog, hot dog, red hot
+n07711569 mashed potato
+n07714571 head cabbage
+n07714990 broccoli
+n07715103 cauliflower
+n07716358 zucchini, courgette
+n07716906 spaghetti squash
+n07717410 acorn squash
+n07717556 butternut squash
+n07718472 cucumber, cuke
+n07718747 artichoke, globe artichoke
+n07720875 bell pepper
+n07730033 cardoon
+n07734744 mushroom
+n07742313 Granny Smith
+n07745940 strawberry
+n07747607 orange
+n07749582 lemon
+n07753113 fig
+n07753275 pineapple, ananas
+n07753592 banana
+n07754684 jackfruit, jak, jack
+n07760859 custard apple
+n07768694 pomegranate
+n07802026 hay
+n07831146 carbonara
+n07836838 chocolate sauce, chocolate syrup
+n07860988 dough
+n07871810 meat loaf, meatloaf
+n07873807 pizza, pizza pie
+n07875152 potpie
+n07880968 burrito
+n07892512 red wine
+n07920052 espresso
+n07930864 cup
+n07932039 eggnog
+n09193705 alp
+n09229709 bubble
+n09246464 cliff, drop, drop-off
+n09256479 coral reef
+n09288635 geyser
+n09332890 lakeside, lakeshore
+n09399592 promontory, headland, head, foreland
+n09421951 sandbar, sand bar
+n09428293 seashore, coast, seacoast, sea-coast
+n09468604 valley, vale
+n09472597 volcano
+n09835506 ballplayer, baseball player
+n10148035 groom, bridegroom
+n10565667 scuba diver
+n11879895 rapeseed
+n11939491 daisy
+n12057211 yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum
+n12144580 corn
+n12267677 acorn
+n12620546 hip, rose hip, rosehip
+n12768682 buckeye, horse chestnut, conker
+n12985857 coral fungus
+n12998815 agaric
+n13037406 gyromitra
+n13040303 stinkhorn, carrion fungus
+n13044778 earthstar
+n13052670 hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa
+n13054560 bolete
+n13133613 ear, spike, capitulum
+n15075141 toilet tissue, toilet paper, bathroom tissue
diff --git a/inference-engine/ie_bridges/python/sample/segmentation_sample.py b/inference-engine/ie_bridges/python/sample/segmentation_sample.py
deleted file mode 100644
index ad6605094..000000000
--- a/inference-engine/ie_bridges/python/sample/segmentation_sample.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env python
-"""
- Copyright (c) 2018 Intel Corporation
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-"""
-from __future__ import print_function
-import sys
-import os
-from argparse import ArgumentParser
-import cv2
-import numpy as np
-import logging as log
-from time import time
-from openvino.inference_engine import IENetwork, IEPlugin
-
-classes_color_map = [
- (150, 150, 150),
- (58, 55, 169),
- (211, 51, 17),
- (157, 80, 44),
- (23, 95, 189),
- (210, 133, 34),
- (76, 226, 202),
- (101, 138, 127),
- (223, 91, 182),
- (80, 128, 113),
- (235, 155, 55),
- (44, 151, 243),
- (159, 80, 170),
- (239, 208, 44),
- (128, 50, 51),
- (82, 141, 193),
- (9, 107, 10),
- (223, 90, 142),
- (50, 248, 83),
- (178, 101, 130),
- (71, 30, 204)
-]
-
-
-def build_argparser():
- parser = ArgumentParser()
- parser.add_argument("-m", "--model", help="Path to an .xml file with a trained model.", required=True, type=str)
- parser.add_argument("-i", "--input", help="Path to a folder with images or path to an image files", required=True,
- type=str, nargs="+")
- parser.add_argument("-l", "--cpu_extension",
- help="MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels "
- "impl.", type=str, default=None)
- parser.add_argument("-pp", "--plugin_dir", help="Path to a plugin folder", type=str, default=None)
- parser.add_argument("-d", "--device",
- help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
- "will look for a suitable plugin for device specified (CPU by default)", default="CPU",
- type=str)
- parser.add_argument("-nt", "--number_top", help="Number of top results", default=10, type=int)
- parser.add_argument("-ni", "--number_iter", help="Number of inference iterations", default=1, type=int)
- parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
- return parser
-
-
-def main():
- log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
- args = build_argparser().parse_args()
- model_xml = args.model
- model_bin = os.path.splitext(model_xml)[0] + ".bin"
-
- # Plugin initialization for specified device and load extensions library if specified
- plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
- if args.cpu_extension and 'CPU' in args.device:
- plugin.add_cpu_extension(args.cpu_extension)
- # Read IR
- log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
- net = IENetwork.from_ir(model=model_xml, weights=model_bin)
-
- if plugin.device == "CPU":
- supported_layers = plugin.get_supported_layers(net)
- not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
- if len(not_supported_layers) != 0:
- log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
- format(plugin.device, ', '.join(not_supported_layers)))
- log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
- "or --cpu_extension command line argument")
- sys.exit(1)
- assert len(net.inputs.keys()) == 1, "Sample supports only single input topologies"
- assert len(net.outputs) == 1, "Sample supports only single output topologies"
-
- log.info("Preparing input blobs")
- input_blob = next(iter(net.inputs))
- out_blob = next(iter(net.outputs))
- net.batch_size = len(args.input)
-
- # Read and pre-process input images
- n, c, h, w = net.inputs[input_blob].shape
- images = np.ndarray(shape=(n, c, h, w))
- for i in range(n):
- image = cv2.imread(args.input[i])
- if image.shape[:-1] != (h, w):
- log.warning("Image {} is resized from {} to {}".format(args.input[i], image.shape[:-1], (h, w)))
- image = cv2.resize(image, (w, h))
- image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW
- images[i] = image
- log.info("Batch size is {}".format(n))
-
- # Loading model to the plugin
- log.info("Loading model to the plugin")
- exec_net = plugin.load(network=net)
- del net
-
- # Start sync inference
- log.info("Starting inference ({} iterations)".format(args.number_iter))
- infer_time = []
- for i in range(args.number_iter):
- t0 = time()
- res = exec_net.infer(inputs={input_blob: images})
- infer_time.append((time() - t0) * 1000)
- log.info("Average running time of one iteration: {} ms".format(np.average(np.asarray(infer_time))))
- if args.perf_counts:
- perf_counts = exec_net.requests[0].get_perf_counts()
- log.info("Performance counters:")
- print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
- for layer, stats in perf_counts.items():
- print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
- stats['status'], stats['real_time']))
- # Processing output blob
- log.info("Processing output blob")
- res = res[out_blob]
- for batch, data in enumerate(res):
- classes_map = np.zeros(shape=(h, w, c), dtype=np.int)
- for i in range(h):
- for j in range(w):
- if len(data[:, i, j]) == 1:
- pixel_class = int(data[:, i, j])
- else:
- pixel_class = np.argmax(data[:, i, j])
- classes_map[i, j, :] = classes_color_map[min(pixel_class, 20)]
- out_img = os.path.join(os.path.dirname(__file__), "out_{}.bmp".format(batch))
- cv2.imwrite(out_img, classes_map)
- log.info("Result image was saved to {}".format(out_img))
- del exec_net
- del plugin
-
-
-if __name__ == '__main__':
- sys.exit(main() or 0)
diff --git a/inference-engine/ie_bridges/python/sample/style_transfer_sample.py b/inference-engine/ie_bridges/python/sample/style_transfer_sample.py
index fc471a583..76fcadaff 100644
--- a/inference-engine/ie_bridges/python/sample/style_transfer_sample.py
+++ b/inference-engine/ie_bridges/python/sample/style_transfer_sample.py
@@ -51,7 +51,6 @@ def build_argparser():
type=float)
parser.add_argument("-pc", "--perf_counts", help="Report performance counters", default=False, action="store_true")
-
return parser
@@ -67,7 +66,7 @@ def main():
plugin.add_cpu_extension(args.cpu_extension)
# Read IR
log.info("Loading network files:\n\t{}\n\t{}".format(model_xml, model_bin))
- net = IENetwork.from_ir(model=model_xml, weights=model_bin)
+ net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
@@ -117,8 +116,8 @@ def main():
log.info("Performance counters:")
print("{:<70} {:<15} {:<15} {:<15} {:<10}".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))
for layer, stats in perf_counts.items():
- print ("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
- stats['status'], stats['real_time']))
+ print("{:<70} {:<15} {:<15} {:<15} {:<10}".format(layer, stats['layer_type'], stats['exec_type'],
+ stats['status'], stats['real_time']))
# Processing output blob
log.info("Processing output blob")
res = res[out_blob]
diff --git a/inference-engine/ie_bridges/python/setup.py b/inference-engine/ie_bridges/python/setup.py
index 41f877229..bb9df0ecd 100644
--- a/inference-engine/ie_bridges/python/setup.py
+++ b/inference-engine/ie_bridges/python/setup.py
@@ -51,8 +51,8 @@ def parse_command_line_options(cls):
base_init_options(self)
def run(self):
- global INFERENCE_ENGINE_DIR
- global BUNDLE_INFERENCE_ENGINE
+ global INFERENCE_ENGINE_DIR
+ global BUNDLE_INFERENCE_ENGINE
if self.copy_ie_libs:
BUNDLE_INFERENCE_ENGINE = True
@@ -187,16 +187,14 @@ cmdclass = {
}
setup(
- name="inference_engine",
- version='0.1.1',
+ name="src",
+ version='1.0',
description='Python inference for Inference Engine',
packages=find_packages(exclude=['tests']),
package_data={PACKAGE_NAME: ['*.so', '*.dll', '*dylib*', '*.pyd']},
include_package_data=True,
ext_modules=extensions,
cmdclass=cmdclass,
- author='', author_email='',
- tests_require=['pytest'],
install_requires=list(requirements),
zip_safe=False,
)
diff --git a/inference-engine/ie_bridges/python/src/openvino/__init__.py b/inference-engine/ie_bridges/python/src/openvino/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/__init__.py
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt
new file mode 100644
index 000000000..aa8ac74b1
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt
@@ -0,0 +1,36 @@
+# If the pyx file is a C++ file, we should specify that here.
+set (CMAKE_INCLUDE_CURRENT_DIR ON)
+set (TARGET_NAME "ie_api")
+
+set (CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine)
+set (CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
+
+set_source_files_properties(
+ ie_api_impl_defs.pxd
+ ie_api_impl.hpp
+ ie_api_impl.cpp
+ ie_api.pyx
+ ie_api.pxd
+
+ PROPERTIES CYTHON_IS_CXX TRUE
+)
+
+cython_add_module (
+ ${TARGET_NAME}
+
+ ie_api_impl_defs.pxd
+ ie_api_impl.hpp
+ ie_api_impl.cpp
+ ie_api.pyx
+)
+
+set_target_properties (${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
+target_link_libraries (${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
+
+# perform copy
+ADD_CUSTOM_COMMAND (TARGET ${TARGET_NAME}
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/inference_engine/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/__init__.py
+ COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/requirements.txt ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../../requirements.txt
+ COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/../__init__.py
+) \ No newline at end of file
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/__init__.py b/inference-engine/ie_bridges/python/src/openvino/inference_engine/__init__.py
new file mode 100644
index 000000000..ff435b3bd
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/__init__.py
@@ -0,0 +1,3 @@
+from .ie_api import *
+__version__ = get_version()
+__all__ = ['IENetwork', "IEPlugin", "IENetReader"] \ No newline at end of file
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/CMakeLists.txt
new file mode 100644
index 000000000..1b25c3ebe
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/CMakeLists.txt
@@ -0,0 +1,37 @@
+# If the pyx file is a C++ file, we should specify that here.
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+set(TARGET_NAME "dnn_builder")
+
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}/inference_engine/${TARGET_NAME})
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
+
+set_source_files_properties(
+ dnn_builder_defs.pxd
+ dnn_builder_impl.hpp
+ dnn_builder_impl.cpp
+ dnn_builder.pyx
+ dnn_builder.pxd
+
+ PROPERTIES CYTHON_IS_CXX TRUE
+)
+
+cython_add_module(
+ ${TARGET_NAME}
+
+ dnn_builder_impl_defs.pxd
+ dnn_builder_impl.hpp
+ dnn_builder_impl.cpp
+ dnn_builder.pyx
+)
+
+set_target_properties (${TARGET_NAME} PROPERTIES CXX_STANDARD 11 LINKER_LANGUAGE CXX)
+add_dependencies (${TARGET_NAME} ie_api)
+target_include_directories (${TARGET_NAME} PRIVATE ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/inference_engine )
+target_link_libraries (${TARGET_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
+
+# perform copy
+ADD_CUSTOM_COMMAND (TARGET ${TARGET_NAME}
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy ${PYTHON_BRIDGE_SRC_ROOT}/src/openvino/inference_engine/${TARGET_NAME}/__init__.py ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
+) \ No newline at end of file
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/__init__.py b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/__init__.py
new file mode 100644
index 000000000..79744ab14
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/__init__.py
@@ -0,0 +1,2 @@
+from .dnn_builder import *
+__all__ = ["NetworkBuilder", "LayerBuilder"]
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pxd
new file mode 100644
index 000000000..9a5621508
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pxd
@@ -0,0 +1,26 @@
+from .cimport dnn_builder_impl_defs as C
+from libcpp.memory cimport shared_ptr
+
+cdef class NetworkBuilder:
+ cdef C.NetworkBuilder impl
+
+cdef class INetwork:
+ cdef C.INetwork impl
+
+cdef class ILayer:
+ cdef C.ILayer impl
+
+cdef class Port:
+ cdef C.Port impl
+
+cdef class PortInfo:
+ cdef C.PortInfo impl
+
+cdef class Connection:
+ cdef C.Connection impl
+
+cdef class LayerBuilder:
+ cdef C.LayerBuilder impl
+
+cdef class LayerConstantData(dict):
+ cdef shared_ptr[C.LayerBuilder] impl \ No newline at end of file
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pyx
new file mode 100644
index 000000000..b0754cb5f
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder.pyx
@@ -0,0 +1,423 @@
+# #distutils: language=c++
+#from cython.operator cimport dereference as deref
+from libcpp.vector cimport vector
+from libcpp.map cimport map
+from libcpp.string cimport string
+from ..ie_api cimport IENetwork, BlobBuffer
+from .cimport dnn_builder_impl_defs as C
+from .dnn_builder_impl_defs cimport Blob
+import numpy as np
+
+
+np_precision_map = {
+ "float32": "FP32",
+ "float16": "FP16",
+ "int32": "I32",
+ "int16": "I16",
+ "uint16": "U16",
+ "int8": "I8",
+ "uint8": "U8",
+ }
+cdef class NetworkBuilder:
+ def __cinit__(self, name=None, IENetwork ie_net=None):
+ if name is not None and ie_net is not None:
+ raise AttributeError("Both name and ie_net arguments are defined")
+ elif name is not None:
+ self.impl = C.NetworkBuilder(name.encode())
+ elif ie_net is not None:
+ self.impl = C.NetworkBuilder().from_ie_network(ie_net.impl)
+
+ def build(self):
+ cdef INetwork i_net = INetwork()
+ i_net.impl = self.impl.build()
+ return i_net
+
+ def get_layer(self, id: int):
+ cdef LayerBuilder py_layer = LayerBuilder()
+ py_layer.impl = self.impl.getLayer(id)
+ return py_layer
+
+ @property
+ def layers(self):
+ cdef vector[C.LayerBuilder] c_layers = self.impl.getLayers()
+ cdef LayerBuilder py_layer
+ py_layers = {}
+ for l in c_layers:
+ py_layer = LayerBuilder()
+ py_layer.impl = l
+ py_layers[l.getName().decode()] = py_layer
+ return py_layers
+
+ def remove_layer(self, LayerBuilder layer):
+ self.impl.removeLayer(layer.impl)
+
+ def get_layer_connection(self, LayerBuilder layer):
+ cdef vector[C.Connection] c_connections = self.impl.getLayerConnections(layer.impl)
+ cdef Connection connection
+ connections = []
+ for con in c_connections:
+ connection = Connection()
+ connection.impl = con
+ connections.append(connection)
+ return connections
+
+ def disconnect(self, Connection connection):
+ self.impl.disconnect(connection.impl)
+
+ def connect(self, PortInfo input, PortInfo output):
+ self.impl.connect(input.impl, output.impl)
+
+ def add_layer(self, LayerBuilder layer, input_ports: list = None):
+ cdef vector[C.PortInfo] c_ports
+ cdef PortInfo c_port
+ if not input_ports:
+ return self.impl.addLayer(layer.impl)
+ else:
+ for p in input_ports:
+ c_port = PortInfo(p.layer_id, p.port_id)
+ c_ports.push_back(c_port.impl)
+ return self.impl.addAndConnectLayer(c_ports, layer.impl)
+
+cdef class INetwork:
+ def __iter__(self):
+ cdef ILayer layer
+ layers = []
+ cdef vector[C.ILayer] c_layers = self.impl.layers
+ for l in c_layers:
+ layer = ILayer()
+ layer.impl = l
+ layers.append(layer)
+ return iter(layers)
+
+ @property
+ def layers(self):
+ cdef ILayer layer
+ layers = {}
+ cdef vector[C.ILayer] c_layers = self.impl.layers
+ for l in c_layers:
+ layer = ILayer()
+ layer.impl = l
+ layers[l.name.decode()] = layer
+ return layers
+
+ @property
+ def inputs(self):
+ cdef ILayer layer
+ layers = {}
+ cdef vector[C.ILayer] c_layers = self.impl.inputs
+ for l in c_layers:
+ layer = ILayer()
+ layer.impl = l
+ layers[l.name.decode()] = layer
+ return layers
+
+ @property
+ def outputs(self):
+ cdef ILayer layer
+ layers = {}
+ cdef vector[C.ILayer] c_layers = self.impl.outputs
+ for l in c_layers:
+ layer = ILayer()
+ layer.impl = l
+ layers[l.name.decode()] = layer
+ return layers
+
+ @property
+ def name(self):
+ return self.impl.name.decode()
+
+
+ @property
+ def size(self):
+ return self.impl.size
+
+ def get_layer_connection(self, layer: ILayer):
+ cdef Connection connection
+ connections = []
+ cdef vector[C.Connection] c_connections = self.impl.getLayerConnections(layer.id)
+ for con in c_connections:
+ connection = Connection()
+ connection.impl = con
+ connections.append(connection)
+ return connections
+
+ def to_ie_network(self):
+ cdef IENetwork net = IENetwork()
+ net.impl = self.impl.to_ie_network()
+ return net
+
+cdef class ILayer:
+ @property
+ def name(self):
+ return self.impl.name.decode()
+
+ @property
+ def id(self):
+ return self.impl.id
+
+ @property
+ def type(self):
+ return self.impl.type.decode()
+
+ @property
+ def params(self):
+ return {k.decode(): v.decode() for k, v in self.impl.parameters}
+
+ @property
+ def input_ports(self):
+ cdef Port port
+ cdef vector[C.Port] c_ports = self.impl.in_ports
+ ports = []
+ for p in c_ports:
+ port = Port()
+ port.impl = p
+ ports.append(port)
+ return ports
+
+ @property
+ def output_ports(self):
+ cdef Port port
+ cdef vector[C.Port] c_ports = self.impl.out_ports
+ ports = []
+ for p in c_ports:
+ port = Port()
+ port.impl = p
+ ports.append(port)
+ return ports
+
+ @property
+ def constant_data(self):
+ cdef map[string, Blob.Ptr] c_constant_data
+ c_constant_data = self.impl.constant_data
+ constant_data = {}
+ cdef BlobBuffer weights_buffer
+ for weights in c_constant_data:
+ weights_buffer = BlobBuffer()
+ weights_buffer.reset(weights.second)
+ constant_data[weights.first.decode()] = weights_buffer.to_numpy()
+ return constant_data
+
+
+cdef class Port:
+ def __cinit__(self, shape: list=[]):
+ cdef vector[size_t] c_shape
+ for d in shape:
+ c_shape.push_back(d)
+ self.impl = C.Port(c_shape)
+ @property
+ def shape(self):
+ return self.impl.shape
+
+cdef class PortInfo:
+ def __cinit__(self, layer_id: int = -1, port_id: int = -1):
+ if layer_id != -1 and port_id != -1:
+ self.impl = C.PortInfo(layer_id, port_id)
+ else:
+ self.impl = C.PortInfo()
+ @property
+ def layer_id(self):
+ return self.impl.layer_id
+
+ @property
+ def port_id(self):
+ return self.impl.port_id
+
+ def __eq__(self, other):
+ return self.layer_id == other.layer_id and self.port_id == other.port_id
+
+ def __ne__(self, other):
+ return self.layer_id != other.layer_id and self.port_id != other.port_id
+
+cdef class Connection:
+ def __cinit__(self, PortInfo input = None, PortInfo output = None):
+ if input and output:
+ self.impl = C.Connection(input.impl, output.impl)
+ else:
+ self.impl = C.Connection()
+ @property
+ def _from(self):
+ cdef PortInfo port_info = PortInfo()
+ port_info.impl = self.impl._from
+ return port_info
+
+ @property
+ def to(self):
+ cdef PortInfo port_info = PortInfo()
+ port_info.impl = self.impl.to
+ return port_info
+
+ def __eq__(self, other):
+ return self._from == other._from and self.to == other.to
+
+ def __ne__(self, other):
+ return self._from != other._from and self.to != other.to
+
+
+def check_constant_data(data):
+ for k, v in data.items():
+ if not all([isinstance(x, type(v[0])) for x in v]):
+ raise TypeError("Elements of list for key {} have different data types! "
+ "Please specify list of 'int' or 'float' values.".format(k))
+ if isinstance(v, list):
+ if isinstance(v[0], float):
+ dtype = np.float32
+ elif isinstance(v[0], int):
+ dtype = np.int32
+ else:
+ raise TypeError("Unsupported precision of the data for key {}! Given {} but 'float or 'int' precision expected".
+ format(k, str(v.dtype)))
+ data[k] = np.asanyarray(v, dtype=dtype)
+ elif isinstance(v, np.ndarray):
+ pass
+ else:
+ raise TypeError("Unsupported data type for key '{}'. {} given but 'list' or 'numpy.ndarray' expected".
+ format(k, type(v)))
+ return data
+
+
+# TODO: Fix LAyerBuilder object copying - pass by reference
+# cdef class LayerConstantData(dict):
+# def update(self, other=None, **kwargs):
+# if other:
+# other = check_constant_data(other)
+# cdef vector[size_t] dims
+# cdef Blob.Ptr blob_ptr
+# cdef BlobBuffer buffer
+# for k, v in other.items():
+# if k in self.keys() and (v.shape == self[k].shape and v.dtype == self[k].dtype):
+# print("Reuse blob for {}\n".format(k))
+# self[k][:] = v
+# else:
+# for dim in v.shape:
+# dims.push_back(dim)
+# ie_precision = np_precision_map.get(str(v.dtype), None)
+# if not ie_precision:
+# raise BufferError("Unsupported precision of the data for key {}! Given {} but one of the {} precisions expected".
+# format(k, str(v.dtype), ", ".join(np_precision_map.keys())))
+# blob_ptr = deref(self.impl).allocateBlob(dims, ie_precision.encode())
+# buffer = BlobBuffer()
+# buffer.reset(blob_ptr)
+# np_buffer = buffer.to_numpy()
+# np_buffer[:] = v
+# deref(self.impl).addConstantData(k.encode(), blob_ptr)
+
+cdef class LayerBuilder:
+
+ def __cinit__(self, type: str=None, name: str=None):
+ if name and type:
+ self.impl = C.LayerBuilder(name.encode(), type.encode())
+ else:
+ self.impl = C.LayerBuilder()
+
+ @property
+ def id(self):
+ return self.impl.id
+ @property
+ def name(self):
+ return self.impl.getName().decode()
+ @name.setter
+ def name(self, name: str):
+ self.impl.setName(name.encode())
+
+ @property
+ def type(self):
+ return self.impl.getType().decode()
+ @type.setter
+ def type(self, type: str):
+ self.impl.setType(type.encode())
+
+ @property
+ def input_ports(self):
+ cdef Port port
+ cdef vector[C.Port] c_ports = self.impl.getInputPorts()
+ py_ports = []
+ for p in c_ports:
+ port = Port()
+ port.impl = p
+ py_ports.append(port)
+ return py_ports
+
+ @input_ports.setter
+ def input_ports(self, ports: list):
+ cdef vector[C.Port] c_ports
+ cdef Port c_port
+ for p in ports:
+ c_port = Port(p.shape)
+ c_ports.push_back(c_port.impl)
+ self.impl.setInputPorts(c_ports)
+
+ @property
+ def output_ports(self):
+ cdef Port port
+ cdef vector[C.Port] c_ports = self.impl.getOutputPorts()
+ py_ports = []
+ for p in c_ports:
+ port = Port()
+ port.impl = p
+ py_ports.append(port)
+ return py_ports
+
+ @output_ports.setter
+ def output_ports(self, ports: list):
+ cdef vector[C.Port] c_ports
+ cdef Port c_port
+ for p in ports:
+ c_port = Port(p.shape)
+ c_ports.push_back(c_port.impl)
+ self.impl.setOutputPorts(c_ports)
+
+ @property
+ def params(self):
+ return {k.decode(): v.decode() for k, v in self.impl.getParameters()}
+
+ @params.setter
+ def params(self, params_map: dict):
+ cdef map[string, string] c_params_map
+ for k, v in params_map.items():
+ c_params_map[k.encode()] = str(v).encode()
+ self.impl.setParameters(c_params_map)
+
+ def build(self):
+ cdef ILayer layer = ILayer()
+ layer.impl = self.impl.build()
+ return layer
+
+ @property
+ def constant_data(self):
+ cdef map[string, Blob.Ptr] c_constant_data
+ c_constant_data = self.impl.getConstantData()
+ constant_data = {}
+ # TODO: Fix LAyerBuilder object copying - pass by reference
+ # constant_data = LayerConstantData()
+ # constant_data.impl = make_shared[C.LayerBuilder](self.impl)
+ cdef BlobBuffer weights_buffer
+ for weights in c_constant_data:
+ weights_buffer = BlobBuffer()
+ weights_buffer.reset(weights.second)
+ constant_data[weights.first.decode()] = weights_buffer.to_numpy()
+ return constant_data
+
+ @constant_data.setter
+ def constant_data(self, data: dict):
+ cdef vector[size_t] dims
+ cdef map[string, Blob.Ptr] c_constant_data
+ cdef Blob.Ptr blob_ptr
+ cdef BlobBuffer buffer
+ data = check_constant_data(data)
+ for k, v in data.items():
+ for dim in v.shape:
+ dims.push_back(dim)
+ ie_precision = np_precision_map.get(str(v.dtype), None)
+ if not ie_precision:
+ raise BufferError("Unsupported precision of the data for key {}! Given {} but one of the {} precisions expected".
+ format(k, str(v.dtype), ", ".join(np_precision_map.keys())))
+ blob_ptr = self.impl.allocateBlob(dims, ie_precision.encode())
+ buffer = BlobBuffer()
+ buffer.reset(blob_ptr)
+ np_buffer = buffer.to_numpy()
+ np_buffer[:] = v
+ c_constant_data[k.encode()] = blob_ptr
+
+ self.impl.setConstantData(c_constant_data)
+
+ # TODO: Implement get\setGraph when will be supported \ No newline at end of file
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.cpp
new file mode 100644
index 000000000..fc9ab4edf
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.cpp
@@ -0,0 +1,330 @@
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "dnn_builder_impl.hpp"
+
+// using namespace InferenceEnginePython;
+// using namespace std;
+
+std::map<std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
+ {"FP16", InferenceEngine::Precision::FP16},
+ {"Q78", InferenceEngine::Precision::Q78},
+ {"I32", InferenceEngine::Precision::I32},
+ {"I16", InferenceEngine::Precision::I16},
+ {"I8", InferenceEngine::Precision::I8},
+ {"U16", InferenceEngine::Precision::U16},
+ {"U8", InferenceEngine::Precision::U8}};
+
+InferenceEnginePython::ILayer buildILayer(InferenceEngine::ILayer::CPtr it) {
+ std::vector<InferenceEnginePython::Port> in_ports;
+ std::vector<InferenceEnginePython::Port> out_ports;
+ for (const auto &port : it->getInputPorts()) {
+ in_ports.push_back(InferenceEnginePython::Port(port.shape()));
+ }
+ for (const auto &port : it->getOutputPorts()) {
+ out_ports.push_back(InferenceEnginePython::Port(port.shape()));
+ }
+
+ std::map<std::string, std::string> params_map;
+ for (const auto &params : it->getParameters()->getParameters()) {
+ params_map.emplace(params.first, params.second);
+ }
+ std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
+ for (const auto &data : it->getParameters()->getConstantData()) {
+ data_map.emplace(data.first, std::const_pointer_cast<InferenceEngine::Blob>(data.second));
+ }
+ return {it,
+ it->getName(),
+ it->getId(),
+ it->getType(),
+ params_map,
+ data_map,
+ in_ports,
+ out_ports,
+ };
+}
+
+// NetworkBuilder
+InferenceEnginePython::NetworkBuilder::NetworkBuilder(const std::string &name) {
+ // TODO( ): std::move or instance in heap? Please check in other places.
+ InferenceEngine::Builder::Network network(name);
+ network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
+}
+
+InferenceEnginePython::NetworkBuilder InferenceEnginePython::NetworkBuilder::from_ie_network(
+ const InferenceEnginePython::IENetwork &icnn_net) {
+ InferenceEngine::Builder::Network network((InferenceEngine::ICNNNetwork &) icnn_net.actual);
+ NetworkBuilder net_builder = NetworkBuilder();
+ net_builder.network_ptr = std::make_shared<InferenceEngine::Builder::Network>(network);
+ return net_builder;
+}
+
+InferenceEnginePython::INetwork InferenceEnginePython::NetworkBuilder::build() {
+ InferenceEngine::INetwork::Ptr i_net = network_ptr->build();
+ std::vector<ILayer> layers;
+ for (const auto &it : *i_net) {
+ layers.push_back(buildILayer(it));
+ }
+ std::vector<ILayer> inputs;
+ for (const auto &it : i_net->getInputs()) {
+ inputs.push_back(buildILayer(it));
+ }
+ std::vector<ILayer> outputs;
+ for (const auto &it : i_net->getInputs()) {
+ outputs.push_back(buildILayer(it));
+ }
+ return {i_net, // INetwork ptr
+ i_net->getName(), // name
+ i_net->size(), // Number of layers
+ layers,
+ inputs,
+ outputs
+ };
+}
+
+std::vector<InferenceEnginePython::LayerBuilder> InferenceEnginePython::NetworkBuilder::getLayers() {
+ std::vector<LayerBuilder> layers;
+ for (const auto &it : network_ptr->getLayers()) {
+ LayerBuilder layer;
+ layer.actual = it;
+ layer.id = it.getId();
+ layers.push_back(layer);
+ }
+ return layers;
+}
+
+InferenceEnginePython::LayerBuilder InferenceEnginePython::NetworkBuilder::getLayer(size_t layer_id) {
+ LayerBuilder layer;
+ InferenceEngine::Builder::Layer ie_layer = network_ptr->getLayer(layer_id);
+ layer.actual = ie_layer;
+ layer.id = ie_layer.getId();
+ return layer;
+}
+
+void InferenceEnginePython::NetworkBuilder::removeLayer(const LayerBuilder &layer) {
+ network_ptr->removeLayer(layer.id);
+}
+
+const std::vector<InferenceEnginePython::Connection> InferenceEnginePython::NetworkBuilder::getLayerConnections(
+ const LayerBuilder &layer) {
+ std::vector<InferenceEngine::Connection> ie_connections = network_ptr->getLayerConnections(layer.id);
+ std::vector<Connection> connections;
+ for (auto const &it : ie_connections) {
+ PortInfo input(it.from().layerId(), it.from().portId());
+ PortInfo output(it.to().layerId(), it.to().portId());
+ connections.push_back(Connection(input, output));
+ }
+ return connections;
+}
+
+void InferenceEnginePython::NetworkBuilder::disconnect(const Connection &connection) {
+ network_ptr->disconnect(connection.actual);
+}
+
+void InferenceEnginePython::NetworkBuilder::connect(const PortInfo &input, const PortInfo &output) {
+ network_ptr->connect(input.actual, output.actual);
+}
+
+size_t InferenceEnginePython::NetworkBuilder::addLayer(const LayerBuilder &layer) {
+ return network_ptr->addLayer(layer.actual);
+}
+
+size_t InferenceEnginePython::NetworkBuilder::addAndConnectLayer(const std::vector<PortInfo> &input,
+ const LayerBuilder &layer) {
+ std::vector<InferenceEngine::PortInfo> ie_ports;
+ for (const auto &it : input) {
+ ie_ports.push_back(it.actual);
+ }
+ return network_ptr->addLayer(ie_ports, layer.actual);
+}
+// NetworkBuilder end
+// NetworkBuilder end
+
+// Port
+InferenceEnginePython::Port::Port(const std::vector<size_t> &shapes) {
+ actual = InferenceEngine::Port(shapes);
+ shape = actual.shape();
+}
+
+InferenceEnginePython::PortInfo::PortInfo(size_t layer_id, size_t port_id) : PortInfo() {
+ this->actual = InferenceEngine::PortInfo(layer_id, port_id);
+ this->layer_id = layer_id;
+ this->port_id = port_id;
+}
+// Port end
+
+// INetwork
+std::vector<InferenceEnginePython::Connection> InferenceEnginePython::INetwork::getLayerConnections(size_t layer_id) {
+ std::vector<Connection> connections;
+ for (const auto &it : actual->getLayerConnections(layer_id)) {
+ PortInfo input = PortInfo(it.from().layerId(), it.from().portId());
+ PortInfo output = PortInfo(it.to().layerId(), it.to().portId());
+ connections.push_back(Connection(input, output));
+ }
+ return connections;
+}
+
+InferenceEnginePython::IENetwork InferenceEnginePython::INetwork::to_ie_network() {
+ std::shared_ptr<InferenceEngine::ICNNNetwork> icnn_net = InferenceEngine::Builder::convertToICNNNetwork(actual);
+ InferenceEngine::CNNNetwork cnn_net(icnn_net);
+ IENetwork ie_net = IENetwork();
+ ie_net.actual = cnn_net;
+ ie_net.name = name;
+ ie_net.batch_size = cnn_net.getBatchSize();
+ return ie_net;
+}
+// INetwork end
+
+// Connection
+InferenceEnginePython::Connection::Connection(PortInfo input, PortInfo output) : Connection() {
+ this->actual = InferenceEngine::Connection(InferenceEngine::PortInfo(input.layer_id, input.port_id),
+ InferenceEngine::PortInfo(output.layer_id, output.port_id));
+ this->_from = PortInfo(actual.from().layerId(), actual.from().portId());
+ this->to = PortInfo(actual.to().layerId(), actual.to().portId());
+}
+// Connection end
+
+// LayerBuilder
+InferenceEnginePython::LayerBuilder::LayerBuilder(const std::string &type, const std::string &name) : LayerBuilder() {
+ InferenceEngine::Builder::Layer layer(type, name);
+ this->actual = layer;
+ this->id = layer.getId();
+}
+
+const std::string &InferenceEnginePython::LayerBuilder::getName() {
+ return actual.getName();
+}
+
+const std::string &InferenceEnginePython::LayerBuilder::getType() {
+ return actual.getType();
+}
+
+std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getInputPorts() {
+ std::vector<Port> ports;
+ for (const auto &it : actual.getInputPorts()) {
+ ports.push_back(Port(it.shape()));
+ }
+ return ports;
+}
+
+std::vector<InferenceEnginePython::Port> InferenceEnginePython::LayerBuilder::getOutputPorts() {
+ std::vector<Port> ports;
+ for (const auto &it : actual.getOutputPorts()) {
+ ports.push_back(Port(it.shape()));
+ }
+ return ports;
+}
+
+std::map<std::string, std::string> InferenceEnginePython::LayerBuilder::getParameters() {
+ std::map<std::string, std::string> params_map;
+ for (const auto &it : actual.getParameters()) {
+ params_map.emplace(it.first, it.second);
+ }
+ return params_map;
+}
+
+void InferenceEnginePython::LayerBuilder::setParameters(std::map<std::string, std::string> params_map) {
+ std::map<std::string, InferenceEngine::Parameter> ie_params_map;
+ for (const auto &it : params_map) {
+ InferenceEngine::Parameter ie_param((it.second));
+ ie_params_map.emplace(it.first, ie_param);
+ }
+ actual = actual.setParameters(ie_params_map);
+}
+
+void InferenceEnginePython::LayerBuilder::setName(const std::string &name) {
+ actual = actual.setName(name);
+}
+
+void InferenceEnginePython::LayerBuilder::setType(const std::string &type) {
+ actual = actual.setType(type);
+}
+
+void InferenceEnginePython::LayerBuilder::setInputPorts(const std::vector<Port> ports) {
+ std::vector<InferenceEngine::Port> ie_ports;
+ for (const auto &it : ports) {
+ ie_ports.push_back(it.actual);
+ }
+ actual = actual.setInputPorts(ie_ports);
+}
+
+void InferenceEnginePython::LayerBuilder::setOutputPorts(const std::vector<Port> ports) {
+ std::vector<InferenceEngine::Port> ie_ports;
+ for (const auto &it : ports) {
+ ie_ports.push_back(it.actual);
+ }
+ actual = actual.setOutputPorts(ie_ports);
+}
+
+InferenceEnginePython::ILayer InferenceEnginePython::LayerBuilder::build() {
+ return buildILayer(actual.build());
+}
+
+std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::LayerBuilder::getConstantData() {
+ std::map<std::string, InferenceEngine::Blob::Ptr> data_map;
+ for (const auto &it : actual.getConstantData()) {
+ data_map.emplace(it.first, std::const_pointer_cast<InferenceEngine::Blob>(it.second));
+ }
+ return data_map;
+}
+
+InferenceEngine::Blob::Ptr InferenceEnginePython::LayerBuilder::allocateBlob(std::vector<size_t> dims,
+ const std::string &precision) {
+ InferenceEngine::Layout ie_layout;
+ ie_layout = InferenceEngine::TensorDesc::getLayoutByDims(dims);
+ InferenceEngine::Precision ie_precision = precision_map.at(precision);
+ const InferenceEngine::TensorDesc &tdesc = InferenceEngine::TensorDesc(ie_precision, dims, ie_layout);
+ InferenceEngine::Blob::Ptr blob;
+ switch (ie_precision) {
+ case InferenceEngine::Precision::FP32:
+ blob = InferenceEngine::make_shared_blob<float>(tdesc);
+ break;
+ case InferenceEngine::Precision::FP16:
+ blob = InferenceEngine::make_shared_blob<int>(tdesc);
+ break;
+ case InferenceEngine::Precision::I16:
+ blob = InferenceEngine::make_shared_blob<int>(tdesc);
+ break;
+ case InferenceEngine::Precision::U16:
+ blob = InferenceEngine::make_shared_blob<int>(tdesc);
+ break;
+ case InferenceEngine::Precision::U8:
+ blob = InferenceEngine::make_shared_blob<unsigned char>(tdesc);
+ break;
+ case InferenceEngine::Precision::I8:
+ blob = InferenceEngine::make_shared_blob<signed char>(tdesc);
+ break;
+ case InferenceEngine::Precision::I32:
+ blob = InferenceEngine::make_shared_blob<signed int>(tdesc);
+ break;
+ default:
+ blob = InferenceEngine::make_shared_blob<float>(tdesc);
+ break;
+ }
+
+ blob->allocate();
+ return blob;
+}
+
+void InferenceEnginePython::LayerBuilder::setConstantData(const std::map<std::string,
+ InferenceEngine::Blob::Ptr> &const_data) {
+ actual.setConstantData(const_data);
+}
+// TODO( ): Fix LAyerBuilder object copying - pass by reference
+// void LayerBuilder::addConstantData(const std::string & name, InferenceEngine::Blob::Ptr data){
+// InferenceEngine::Blob::CPtr c_data = const_pointer_cast<const InferenceEngine::Blob>(data);
+// actual.addConstantData(name, c_data);
+// }
+
+// LayerBuilder end
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.hpp
new file mode 100644
index 000000000..b58994abc
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl.hpp
@@ -0,0 +1,161 @@
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <ie_blob.h>
+
+#include <iterator>
+
+#include <string>
+#include <iostream>
+#include <algorithm>
+#include <vector>
+#include <map>
+
+#include <sstream>
+#include <ie_builders.hpp>
+#include <inference_engine.hpp>
+
+#include <ie_api_impl.hpp>
+
+
+// namespace IE Python
+namespace InferenceEnginePython {
+struct LayerBuilder;
+
+struct Port {
+ Port() = default;
+
+ explicit Port(const std::vector<size_t> &shapes);
+
+ InferenceEngine::Port actual;
+ std::vector<size_t> shape;
+};
+
+struct ILayer {
+ InferenceEngine::ILayer::CPtr layer_ptr;
+ std::string name;
+ size_t id;
+ std::string type;
+ std::map<std::string, std::string> parameters;
+ std::map<std::string, InferenceEngine::Blob::Ptr> constant_data;
+ std::vector<Port> in_ports;
+ std::vector<Port> out_ports;
+};
+
+struct PortInfo {
+ PortInfo(size_t layer_id, size_t port_id);
+
+ PortInfo() : actual(0, 0) {}
+
+ InferenceEngine::PortInfo actual;
+ size_t layer_id;
+ size_t port_id;
+};
+
+struct Connection {
+ Connection() : actual(InferenceEngine::PortInfo(0), InferenceEngine::PortInfo(0)) {}
+
+ Connection(PortInfo input, PortInfo output);
+
+ InferenceEngine::Connection actual;
+ PortInfo _from;
+ PortInfo to;
+};
+
+struct INetwork {
+ InferenceEngine::INetwork::Ptr actual;
+ std::string name;
+ size_t size;
+ std::vector<ILayer> layers;
+ std::vector<ILayer> inputs;
+ std::vector<ILayer> outputs;
+
+ std::vector<Connection> getLayerConnections(size_t layer_id);
+
+ IENetwork to_ie_network();
+};
+
+struct NetworkBuilder {
+ InferenceEngine::Builder::Network::Ptr network_ptr;
+
+ explicit NetworkBuilder(const std::string &name);
+
+ NetworkBuilder() = default;
+
+ NetworkBuilder from_ie_network(const InferenceEnginePython::IENetwork &icnn_net);
+
+ INetwork build();
+
+ std::vector<LayerBuilder> getLayers();
+
+ LayerBuilder getLayer(size_t layer_id);
+
+ void removeLayer(const LayerBuilder &layer);
+
+ size_t addLayer(const LayerBuilder &layer);
+
+ size_t addAndConnectLayer(const std::vector<PortInfo> &input, const LayerBuilder &layer);
+
+ const std::vector<Connection> getLayerConnections(const LayerBuilder &layer);
+
+ void disconnect(const Connection &connection);
+
+ void connect(const PortInfo &input, const PortInfo &output);
+};
+
+struct LayerBuilder {
+ InferenceEngine::Builder::Layer actual;
+ size_t id;
+
+ LayerBuilder(const std::string &type, const std::string &name);
+
+ LayerBuilder() : actual("", "") {}
+
+ LayerBuilder from_ilayer(const ILayer &ilayer);
+
+ const std::string &getName();
+
+ void setName(const std::string &name);
+
+ const std::string &getType();
+
+ void setType(const std::string &type);
+
+ std::vector<Port> getInputPorts();
+
+ void setInputPorts(const std::vector<Port> ports);
+
+ std::vector<Port> getOutputPorts();
+
+ void setOutputPorts(const std::vector<Port> ports);
+
+
+ std::map<std::string, std::string> getParameters();
+
+ void setParameters(std::map<std::string, std::string> params_map);
+
+ ILayer build();
+
+ std::map<std::string, InferenceEngine::Blob::Ptr> getConstantData();
+
+ InferenceEngine::Blob::Ptr allocateBlob(std::vector<size_t> dims, const std::string &precision);
+
+ void setConstantData(const std::map<std::string, InferenceEngine::Blob::Ptr> &const_data);
+
+// TODO( ): Fix LAyerBuilder object copying - pass by reference
+// void addConstantData(const std::string & name, InferenceEngine::Blob::Ptr data);
+};
+} // namespace InferenceEnginePython
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl_defs.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl_defs.pxd
new file mode 100644
index 000000000..29795f26a
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/dnn_builder/dnn_builder_impl_defs.pxd
@@ -0,0 +1,97 @@
+from libcpp.string cimport string
+from libcpp.vector cimport vector
+from libc.stddef cimport size_t
+from libcpp.memory cimport shared_ptr
+from libcpp.map cimport map
+from ..ie_api_impl_defs cimport IENetwork
+
+cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
+ ctypedef vector[size_t] SizeVector
+
+ cdef cppclass TensorDesc:
+ SizeVector& getDims()
+ const Precision& getPrecision() const
+
+ cdef cppclass Blob:
+ ctypedef shared_ptr[Blob] Ptr
+ const TensorDesc& getTensorDesc() const
+ size_t element_size() const
+
+ cdef cppclass Precision:
+ const char*name() const
+
+cdef extern from "dnn_builder_impl.hpp" namespace "InferenceEnginePython":
+ cdef cppclass ILayer:
+ const string name
+ size_t id
+ string type
+ map[string, string] parameters
+ vector[Port] in_ports
+ vector[Port] out_ports
+ map[string, Blob.Ptr] constant_data;
+
+
+ cdef cppclass INetwork:
+ string name
+ size_t size
+ vector[ILayer] layers
+ vector[ILayer] inputs
+ vector[ILayer] outputs
+ vector[Port] in_ports;
+ vector[Port] out_ports;
+ vector[Connection] getLayerConnections(size_t layer_id);
+ IENetwork to_ie_network();
+
+ cdef cppclass NetworkBuilder:
+ NetworkBuilder() except +
+ NetworkBuilder(string name) except +
+ NetworkBuilder from_ie_network(IENetwork &icnn_net) except +
+ INetwork build() except +
+ vector[LayerBuilder] getLayers() except +
+ LayerBuilder getLayer(size_t layer_id) except +
+ void removeLayer(const LayerBuilder& layer) except +
+ const vector[Connection] getLayerConnections(const LayerBuilder& layer) except +
+ void disconnect(const Connection& connection) except +
+ void connect(const PortInfo& input, const PortInfo& output) except +
+ size_t addLayer(const LayerBuilder& layer) except +
+ size_t addAndConnectLayer(const vector[PortInfo]& input, const LayerBuilder& layer);
+
+ cdef cppclass Port:
+ Port() except +
+ Port(const vector[size_t] & shapes) except +
+ const vector[size_t] shape
+
+
+ cdef cppclass PortInfo:
+ PortInfo(size_t layer_id, size_t port_id) except +
+ PortInfo() except +
+ size_t layer_id
+ size_t port_id
+
+ cdef cppclass Connection:
+ Connection(PortInfo input, PortInfo output) except +
+ Connection() except +
+ PortInfo _from
+ PortInfo to
+
+ cdef cppclass LayerBuilder:
+ LayerBuilder()
+ LayerBuilder(const string& type, const string& name ) except +
+ size_t id
+ LayerBuilder from_ilayer(const ILayer& ilayer) except +
+ string getName() except +
+ string getType() except +
+ vector[Port] getInputPorts() except +
+ vector[Port] getOutputPorts() except +
+ map[string, string] getParameters() except +
+ void setParameters(map[string, string] params_map) except +
+ void setName(const string & name) except +
+ void setType(const string & type) except +
+ void setInputPorts(const vector[Port] ports) except +
+ void setOutputPorts(const vector[Port] ports) except +
+ ILayer build() except +
+ map[string, Blob.Ptr] getConstantData()
+ void setConstantData(map[string, Blob.Ptr] &const_data)
+ # TODO: Fix LAyerBuilder object copying - pass by reference
+ # void addConstantData(const string & name, Blob.Ptr data)
+ Blob.Ptr allocateBlob(vector[size_t] dims, const string & precision)
diff --git a/inference-engine/ie_bridges/python/inference_engine/ie_api.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd
index 8bf75be82..52bb27e1a 100644
--- a/inference-engine/ie_bridges/python/inference_engine/ie_api.pxd
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pxd
@@ -1,8 +1,3 @@
-# Copyright (C) 2018 Intel Corporation
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
from .cimport ie_api_impl_defs as C
from .ie_api_impl_defs cimport Blob, TensorDesc
@@ -24,24 +19,22 @@ cdef class BlobBuffer:
cdef class InferRequest:
cdef C.InferRequestWrap *impl
- cpdef BlobBuffer _get_input_buffer(self, const string & blob_name)
- cpdef BlobBuffer _get_output_buffer(self, const string & blob_name)
+ cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name)
cpdef infer(self, inputs = ?)
cpdef async_infer(self, inputs = ?)
cpdef wait(self, timeout = ?)
cpdef get_perf_counts(self)
cdef public:
- _inputs, _outputs
+ _inputs_list, _outputs_list
cdef class IENetwork:
cdef C.IENetwork impl
-
cdef class ExecutableNetwork:
cdef unique_ptr[C.IEExecNetwork] impl
cdef public:
- _requests, async, _request_iterator
+ _requests, inputs, outputs
cdef class IEPlugin:
cdef C.IEPlugin impl
@@ -51,9 +44,6 @@ cdef class IEPlugin:
cpdef void set_initial_affinity(self, IENetwork network) except *
cpdef set get_supported_layers(self, IENetwork net)
-cdef class IENetReader:
- cdef C.IENetReader impl
-
cdef class IENetLayer:
cdef C.IENetLayer impl
@@ -61,4 +51,7 @@ cdef class InputInfo:
cdef C.InputInfo impl
cdef class OutputInfo:
- cdef C.OutputInfo impl \ No newline at end of file
+ cdef C.OutputInfo impl
+
+cdef class LayersStatsMap(dict):
+ cdef C.IENetwork net_impl
diff --git a/inference-engine/ie_bridges/python/inference_engine/ie_api.pyx b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx
index 4d8c0da5e..518125ebe 100644
--- a/inference-engine/ie_bridges/python/inference_engine/ie_api.pyx
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api.pyx
@@ -1,20 +1,18 @@
-# Copyright (C) 2018 Intel Corporation
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
#distutils: language=c++
from cython.operator cimport dereference as deref
from .cimport ie_api_impl_defs as C
from .ie_api_impl_defs cimport Blob, TensorDesc, SizeVector, Precision
from libcpp.string cimport string
from libcpp.vector cimport vector
+from libcpp.pair cimport pair
from libcpp.map cimport map
from libcpp.memory cimport unique_ptr
from libc.stdint cimport int64_t
import os
import numpy as np
from copy import deepcopy
+import warnings
+from collections import OrderedDict
cdef extern from "<utility>" namespace "std" nogil:
cdef unique_ptr[C.IEExecNetwork] move(unique_ptr[C.IEExecNetwork])
@@ -35,7 +33,7 @@ cdef dict_to_c_map(py_dict):
supported_precisions = ["FP32", "FP16", "Q78", "I32", "I16", "I8", "U32", "U16"]
supported_layouts = ["NCHW", "NHWC", "OIHW", "C", "CHW", "HW", "NC", "CN", "BLOCKED"]
-known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO']
+known_plugins = ['CPU', 'GPU', 'FPGA', 'MYRIAD', 'HETERO', 'HDDL']
def get_version():
return C.get_version().decode()
@@ -68,7 +66,23 @@ cdef class IENetLayer:
@property
def params(self):
return {k.decode(): v.decode() for k, v in self.impl.params}
-
+ @property
+ def parents(self):
+ cdef vector[string] c_parents = self.impl.parents
+ parents = []
+ return [parent.decode() for parent in c_parents]
+ @property
+ def children(self):
+ cdef vector[string] c_children = self.impl.children
+ children = []
+ return [child.decode() for child in c_children]
+ @property
+ def shape(self):
+ string_shape = self.impl.shape.decode()
+ return [int(i) for i in string_shape.split(' ')]
+ @property
+ def layout(self):
+ return self.impl.layout.decode()
@affinity.setter
def affinity(self, target_affinity):
self.impl.setAffinity(target_affinity.encode())
@@ -80,7 +94,6 @@ cdef class IENetLayer:
def precision(self, precision: str):
self.impl.setPrecision(precision.upper().encode())
-
cdef class InputInfo:
@property
def precision(self):
@@ -105,7 +118,6 @@ cdef class InputInfo:
"Unsupported layout {}! List of supported layouts: {}".format(layout, supported_layouts))
self.impl.setLayout(layout.encode())
-
cdef class OutputInfo:
@property
def precision(self):
@@ -122,20 +134,18 @@ cdef class OutputInfo:
raise AttributeError(
"Unsupported precision {}! List of supported precisions: {}".format(precision, supported_precisions))
self.impl.setPrecision(precision.encode())
- # @layout.setter
- # def layout(self, layout):
- # self.impl.setLayout(layout.encode())
cdef class ExecutableNetwork:
def __init__(self):
self._requests = []
+ self.inputs = []
+ self.outputs = []
def infer(self, inputs=None):
current_request = self.requests[0]
current_request.infer(inputs)
return deepcopy(current_request.outputs)
-
def start_async(self, request_id, inputs=None):
if request_id not in list(range(len(self.requests))):
raise ValueError("Incorrect request_id specified!")
@@ -145,21 +155,25 @@ cdef class ExecutableNetwork:
@property
def requests(self):
- return self._requests
+ requests = []
+ for i in range(deref(self.impl).infer_requests.size()):
+ infer_request = InferRequest()
+ infer_request.impl = &(deref(self.impl).infer_requests[i])
+ infer_request._inputs_list = self.inputs
+ infer_request._outputs_list = self.outputs
+ requests.append(infer_request)
+ return requests
cdef class InferRequest:
def __init__(self):
- self._inputs = {}
- self._outputs = {}
-
- cpdef BlobBuffer _get_input_buffer(self, const string & blob_name):
- cdef BlobBuffer buffer = BlobBuffer()
- buffer.reset(deref(self.impl).getInputBlob(blob_name))
- return buffer
+ self._inputs_list = []
+ self._outputs_list = []
- cpdef BlobBuffer _get_output_buffer(self, const string & blob_name):
+ cpdef BlobBuffer _get_blob_buffer(self, const string & blob_name):
cdef BlobBuffer buffer = BlobBuffer()
- buffer.reset(deref(self.impl).getOutputBlob(blob_name))
+ cdef Blob.Ptr blob_ptr
+ deref(self.impl).getBlobPtr(blob_name, blob_ptr)
+ buffer.reset(blob_ptr)
return buffer
cpdef infer(self, inputs=None):
@@ -192,17 +206,66 @@ cdef class InferRequest:
@property
def inputs(self):
- return self._inputs
+ inputs = {}
+ for input in self._inputs_list:
+ inputs[input] = self._get_blob_buffer(input.encode()).to_numpy()
+ return inputs
@property
def outputs(self):
- return self._outputs
+ outputs = {}
+ for output in self._outputs_list:
+ outputs[output] = self._get_blob_buffer(output.encode()).to_numpy()
+ return deepcopy(outputs)
+
+ def set_batch(self, size):
+ if size <= 0:
+ raise ValueError("Batch size should be positive integer number but {} specified".format(size))
+ deref(self.impl).setBatch(size)
def _fill_inputs(self, inputs):
for k, v in inputs.items():
- self._inputs[k][:] = v
+ self.inputs[k][:] = v
+
+
+class LayerStats:
+ def __init__(self, min: tuple = (), max: tuple = ()):
+ self._min = min
+ self._max = max
+
+ @property
+ def min(self):
+ return self._min
+ @property
+ def max(self):
+ return self._max
+
+
+cdef class LayersStatsMap(dict):
+ def update(self, other=None, **kwargs):
+ super(LayersStatsMap, self).update(other, **kwargs)
+ cdef map[string, map[string, vector[float]]] c_stats_map
+ cdef map[string, vector[float]] c_node_stats
+ for k, v in self.items():
+ c_node_stats["min".encode()] = v.min
+ c_node_stats["max".encode()] = v.max
+ c_stats_map[k.encode()] = c_node_stats
+ self.net_impl.setStats(c_stats_map)
cdef class IENetwork:
+ def __cinit__(self, model: str="", weights: str=""):
+ cdef string model_
+ cdef string weights_
+ if model and weights:
+ if not os.path.isfile(model):
+ raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
+ if not os.path.isfile(weights):
+ raise Exception("Path to the weights {} doesn't exists or it's a directory".format(weights))
+ model_ = model.encode()
+ weights_ = weights.encode()
+ self.impl = C.IENetwork(model_, weights_)
+ else:
+ self.impl = C.IENetwork()
@property
def name(self):
name = bytes(self.impl.name)
@@ -213,7 +276,7 @@ cdef class IENetwork:
cdef map[string, C.InputInfo] c_inputs = self.impl.getInputs()
inputs = {}
cdef InputInfo in_info
- for input in c_inputs:
+ for input in c_inputs:
in_info = InputInfo()
in_info.impl = input.second
inputs[input.first.decode()] = in_info
@@ -224,7 +287,7 @@ cdef class IENetwork:
cdef map[string, C.OutputInfo] c_outputs = self.impl.getOutputs()
outputs = {}
cdef OutputInfo out_info
- for out in c_outputs:
+ for out in c_outputs:
out_info = OutputInfo()
out_info.impl = out.second
outputs[out.first.decode()] = out_info
@@ -243,23 +306,37 @@ cdef class IENetwork:
@property
def layers(self):
- cdef map[string, C.IENetLayer] c_layers = <map[string, C.IENetLayer]> self.impl.getLayers()
- layers = {}
+ cdef vector[pair[string, C.IENetLayer]] c_layers = self.impl.getLayers()
+ layers = OrderedDict()
cdef IENetLayer net_l = IENetLayer()
for l in c_layers:
net_l = IENetLayer()
net_l.impl = l.second
layers[l.first.decode()] = net_l
return layers
+ @property
+ def stats(self):
+ cdef map[string, map[string, vector[float]]] c_stats_map = self.impl.getStats()
+ py_stats_map = LayersStatsMap()
+ py_stats_map.net_impl = self.impl
+ for it in c_stats_map:
+ stats_map = LayersStatsMap()
+ py_stats_map[it.first.decode()] = LayerStats(min=tuple(it.second["min".encode()]),
+ max=tuple(it.second["max".encode()]))
+ return py_stats_map
@classmethod
def from_ir(cls, model: str, weights: str):
+ warnings.filterwarnings("always",category=DeprecationWarning)
+ warnings.warn("from_ir() method of IENetwork is deprecated. "
+ "Please use IENetwork class constructor to create valid IENetwork instance",
+ DeprecationWarning)
if not os.path.isfile(model):
raise Exception("Path to the model {} doesn't exists or it's a directory".format(model))
if not os.path.isfile(weights):
raise Exception("Path to the weights {} doesn't exists or it's a directory".format(weights))
- net_reader = IENetReader()
- return net_reader.read(model, weights)
+ cdef IENetwork net = IENetwork(model, weights)
+ return net
# TODO: Use enum with precision type instead of srting parameter when python2 support will not be required.
def add_outputs(self, outputs, precision="FP32"):
@@ -273,6 +350,8 @@ cdef class IENetwork:
_outputs.push_back(l.encode())
self.impl.addOutputs(_outputs, precision.upper().encode())
+ def serialize(self, path_to_xml, path_to_bin):
+ self.impl.serialize(path_to_xml.encode(), path_to_bin.encode())
def reshape(self, input_shapes: dict):
cdef map[string, vector[size_t]] c_input_shapes;
cdef vector[size_t] c_shape
@@ -312,8 +391,6 @@ cdef class IEPlugin:
raise ValueError(
"Incorrect number of requests specified: {}. Expected positive integer number.".format(num_requests))
cdef ExecutableNetwork exec_net = ExecutableNetwork()
- cdef vector[string] inputs_list
- cdef vector[string] outputs_list
cdef map[string, string] c_config
if config:
@@ -321,27 +398,8 @@ cdef class IEPlugin:
c_config[to_std_string(k)] = to_std_string(v)
exec_net.impl = move(self.impl.load(network.impl, num_requests, c_config))
-
- requests = []
- for i in range(deref(exec_net.impl).infer_requests.size()):
- infer_request = InferRequest()
- infer_request.impl = &(deref(exec_net.impl).infer_requests[i])
-
- inputs_list = infer_request.impl.getInputsList()
- outputs_list = infer_request.impl.getOutputsList()
-
- for input_b in inputs_list:
- input_s = input_b.decode()
- infer_request._inputs[input_s] = infer_request._get_input_buffer(input_b).to_numpy()
-
- for output_b in outputs_list:
- output_s = output_b.decode()
- infer_request._outputs[output_s] = infer_request._get_output_buffer(output_b).to_numpy()
-
- # create blob buffers
- requests.append(infer_request)
- exec_net._requests = tuple(requests)
-
+ exec_net.inputs = network.inputs.keys()
+ exec_net.outputs = list(network.outputs.keys())
return exec_net
cpdef void set_initial_affinity(self, IENetwork net) except *:
@@ -374,11 +432,6 @@ cdef class IEPlugin:
c_config[to_std_string(k)] = to_std_string(v)
self.impl.setConfig(c_config)
-cdef class IENetReader:
- def read(self, model: str, weights: str) -> IENetwork:
- cdef IENetwork net = IENetwork()
- net.impl = self.impl.read(model.encode(), weights.encode())
- return net
cdef class BlobBuffer:
"""Copy-less accessor for Inference Engine Blob"""
diff --git a/inference-engine/ie_bridges/python/inference_engine/ie_api_impl.cpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp
index 798cd7021..296b1bfe4 100644
--- a/inference-engine/ie_bridges/python/inference_engine/ie_api_impl.cpp
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.cpp
@@ -1,31 +1,42 @@
-// Copyright (C) 2018 Intel Corporation
+// Copyright (c) 2018 Intel Corporation
//
-// SPDX-License-Identifier: Apache-2.0
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
#include "ie_api_impl.hpp"
#include "hetero/hetero_plugin_config.hpp"
#include "ie_iinfer_request.hpp"
-std::map <std::string,InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
- {"FP16", InferenceEngine::Precision::FP16},
- {"Q78", InferenceEngine::Precision::Q78},
- {"I32", InferenceEngine::Precision::I32},
- {"I16", InferenceEngine::Precision::I16},
- {"I8", InferenceEngine::Precision::I8},
- {"U16", InferenceEngine::Precision::U16},
- {"U8", InferenceEngine::Precision::U8}};
-
-std::map <std::string,InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
- {"NCHW", InferenceEngine::Layout::NCHW},
- {"NHWC", InferenceEngine::Layout::NHWC},
- {"OIHW", InferenceEngine::Layout::OIHW},
- {"C", InferenceEngine::Layout::C},
- {"CHW", InferenceEngine::Layout::CHW},
- {"HW", InferenceEngine::Layout::HW},
- {"NC", InferenceEngine::Layout::NC},
- {"CN", InferenceEngine::Layout::CN},
- {"BLOCKED", InferenceEngine::Layout::BLOCKED}};
-#define stringify( name ) # name
+#include "details/ie_cnn_network_tools.h"
+
+std::map<std::string, InferenceEngine::Precision> precision_map = {{"FP32", InferenceEngine::Precision::FP32},
+ {"FP16", InferenceEngine::Precision::FP16},
+ {"Q78", InferenceEngine::Precision::Q78},
+ {"I32", InferenceEngine::Precision::I32},
+ {"I16", InferenceEngine::Precision::I16},
+ {"I8", InferenceEngine::Precision::I8},
+ {"U16", InferenceEngine::Precision::U16},
+ {"U8", InferenceEngine::Precision::U8}};
+
+std::map<std::string, InferenceEngine::Layout> layout_map = {{"ANY", InferenceEngine::Layout::ANY},
+ {"NCHW", InferenceEngine::Layout::NCHW},
+ {"NHWC", InferenceEngine::Layout::NHWC},
+ {"OIHW", InferenceEngine::Layout::OIHW},
+ {"C", InferenceEngine::Layout::C},
+ {"CHW", InferenceEngine::Layout::CHW},
+ {"HW", InferenceEngine::Layout::HW},
+ {"NC", InferenceEngine::Layout::NC},
+ {"CN", InferenceEngine::Layout::CN},
+ {"BLOCKED", InferenceEngine::Layout::BLOCKED}};
+#define stringify(name) # name
#define IE_CHECK_CALL(expr) { \
auto ret = (expr); \
if (ret != InferenceEngine::StatusCode::OK) { \
@@ -34,119 +45,121 @@ std::map <std::string,InferenceEngine::Layout> layout_map = {{"ANY", InferenceEn
} \
-
-InferenceEnginePython::IENetwork InferenceEnginePython::IENetReader::read(std::string const &model,
- std::string const &weights)
-{
+InferenceEnginePython::IENetwork::IENetwork(const std::string &model, const std::string &weights) {
InferenceEngine::CNNNetReader net_reader;
net_reader.ReadNetwork(model);
net_reader.ReadWeights(weights);
- const std::string &net_name = net_reader.getName();
- InferenceEngine::CNNNetwork network = net_reader.getNetwork();
- std::size_t batch_size = network.getBatchSize();
- return {network, net_name, batch_size};
+ name = net_reader.getName();
+ actual = net_reader.getNetwork();
+ batch_size = actual.getBatchSize();
}
-std::map<std::string, InferenceEnginePython::IENetLayer> InferenceEnginePython::IENetwork::getLayers()
-{
- std::map<std::string, InferenceEnginePython::IENetLayer> result;
- std::unordered_set<std::string> visisted;
- const InferenceEngine::InputsDataMap &networkInputs = actual.getInputsInfo();
+void InferenceEnginePython::IENetwork::serialize(const std::string &path_to_xml, const std::string &path_to_bin) {
+ actual.serialize(path_to_xml, path_to_bin);
+}
- using CNNLayerPtrCref = const InferenceEngine::CNNLayerPtr &;
- std::function<void(CNNLayerPtrCref)> DFS = [&](CNNLayerPtrCref layer) {
+const std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>>
+InferenceEnginePython::IENetwork::getLayers() {
+ std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>> result;
+ std::vector<InferenceEngine::CNNLayerPtr> sorted_layers = InferenceEngine::details::CNNNetSortTopologically(actual);
+ for (const auto &layer : sorted_layers) {
InferenceEnginePython::IENetLayer layer_info;
- /* Assumes no cycles in graph */
- for (auto &od : layer->outData)
- {
- for (auto nl : od->getInputTo())
- {
- auto i = visisted.find(nl.second->name);
- if (i != visisted.end())
- {
- continue;
- }
- DFS(nl.second);
- }
- }
- visisted.emplace(layer->name);
+
layer_info.layer_ptr = layer;
+ layer_info.network_ptr = actual;
layer_info.name = layer->name;
layer_info.type = layer->type;
- std::string precision = layer->precision.name();
- layer_info.precision = precision;
+ layer_info.precision = layer->precision.name();
layer_info.params = layer->params;
layer_info.affinity = layer->affinity;
- result[layer->name] = layer_info;
- };
-
- std::set<InferenceEngine::CNNLayerPtr> inputs;
- for (auto input : networkInputs) {
- for (auto l : input.second->getInputData()->inputTo) {
- inputs.insert(l.second);
+ std::vector<std::string> parents;
+ for (const auto &i : layer->insData) {
+ auto data = i.lock();
+ if (data) {
+ parents.emplace_back(data->getName());
+ }
}
+ layer_info.parents = parents;
+ std::vector<std::string> children;
+ for (const auto &data : layer->outData) {
+ auto inputTo = data->getInputTo();
+ for (auto layer_iter : inputTo) {
+ InferenceEngine::CNNLayerPtr layer_in_data = layer_iter.second;
+ if (!layer_in_data) {
+ THROW_IE_EXCEPTION << "Layer which takes data " << data->name << " is nullptr";
+ }
+ children.emplace_back(layer_in_data->name);
+ }
+ }
+ layer_info.children = children;
+ const InferenceEngine::TensorDesc &inputTensorDesc = layer->outData[0]->getTensorDesc();
+ for (const auto &it : layout_map) {
+ if (it.second == inputTensorDesc.getLayout()) {
+ layer_info.layout = it.first;
+ }
+ }
+ auto dims = inputTensorDesc.getDims();
+ std::string string_dims = "";
+ for (const auto &it : dims) {
+ string_dims += std::to_string(it) + " ";
+ }
+ string_dims = string_dims.substr(0, string_dims.size() - 1);
+ layer_info.shape = string_dims;
+ result.emplace_back(std::make_pair(layer->name, layer_info));
}
-
- for (auto &layer : inputs)
- {
- DFS(layer);
- }
-
return result;
-
}
-std::map<std::string, InferenceEnginePython::InputInfo> InferenceEnginePython::IENetwork::getInputs(){
+
+const std::map<std::string, InferenceEnginePython::InputInfo> InferenceEnginePython::IENetwork::getInputs() {
std::map<std::string, InferenceEnginePython::InputInfo> inputs;
const InferenceEngine::InputsDataMap &inputsInfo = actual.getInputsInfo();
- for (auto & in : inputsInfo){
+ for (auto &in : inputsInfo) {
InferenceEnginePython::InputInfo info;
info.actual = *in.second;
const InferenceEngine::TensorDesc &inputTensorDesc = in.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
- for (auto it : precision_map )
+ for (auto it : precision_map)
if (it.second == in.second->getPrecision())
- info.precision = it.first;
- for (auto it : layout_map )
+ info.precision = it.first;
+ for (auto it : layout_map)
if (it.second == in.second->getLayout())
- info.layout = it.first;
+ info.layout = it.first;
inputs[in.first] = info;
}
return inputs;
}
-std::map<std::string, InferenceEnginePython::OutputInfo> InferenceEnginePython::IENetwork::getOutputs(){
+const std::map<std::string, InferenceEnginePython::OutputInfo> InferenceEnginePython::IENetwork::getOutputs() {
std::map<std::string, InferenceEnginePython::OutputInfo> outputs;
const InferenceEngine::OutputsDataMap &outputsInfo = actual.getOutputsInfo();
- for (auto & out : outputsInfo){
+ for (auto &out : outputsInfo) {
InferenceEnginePython::OutputInfo info;
info.actual = out.second;
const InferenceEngine::TensorDesc &inputTensorDesc = out.second->getTensorDesc();
info.dims = inputTensorDesc.getDims();
- for (auto it : precision_map )
+ for (auto it : precision_map)
if (it.second == out.second->getPrecision())
- info.precision = it.first;
- for (auto it : layout_map )
+ info.precision = it.first;
+ for (auto it : layout_map)
if (it.second == out.second->getLayout())
- info.layout = it.first;
+ info.layout = it.first;
outputs[out.first] = info;
}
return outputs;
}
-void InferenceEnginePython::IENetwork::addOutputs(const std::vector<std::string> & out_layers, const std::string &precision)
-{
-
- for (auto && l : out_layers)
- {
+void
+InferenceEnginePython::IENetwork::addOutputs(const std::vector<std::string> &out_layers, const std::string &precision) {
+ for (auto &&l : out_layers) {
InferenceEngine::OutputsDataMap outputsDataMap = actual.getOutputsInfo();
- if (outputsDataMap.find(l) != outputsDataMap.end())
- {
+ if (outputsDataMap.find(l) != outputsDataMap.end()) {
continue;
}
InferenceEngine::CNNLayerPtr cnnLayer = actual.getLayerByName(l.c_str());
std::vector<InferenceEngine::DataPtr> outData = cnnLayer->outData;
if (outData.size() != 1) {
- std::cout << "Layer " << l << " has " << outData.size() << " output blobs and can not be set as output." << std::endl;
+ std::cout << "Layer " << l << " has " << outData.size() << " output blobs and can not be set as output."
+ << std::endl;
continue;
}
actual.addOutput(l);
@@ -155,29 +168,59 @@ void InferenceEnginePython::IENetwork::addOutputs(const std::vector<std::string>
}
}
-void InferenceEnginePython::IENetwork::setBatch(const size_t size)
-{
+void InferenceEnginePython::IENetwork::setBatch(const size_t size) {
actual.setBatchSize(size);
}
-void InferenceEnginePython::IENetwork::reshape(const std::map<std::string, std::vector<size_t>> & input_shapes){
+
+void InferenceEnginePython::IENetwork::reshape(const std::map<std::string, std::vector<size_t>> &input_shapes) {
actual.reshape(input_shapes);
}
-void InferenceEnginePython::InputInfo::setPrecision(std::string precision){
+const std::map<std::string, std::map<std::string, std::vector<float>>> InferenceEnginePython::IENetwork::getStats() {
+ InferenceEngine::ICNNNetworkStats *pstats = nullptr;
+ InferenceEngine::ResponseDesc response;
+ IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) actual).getStats(&pstats, &response));
+ auto statsMap = pstats->getNodesStats();
+ std::map<std::string, std::map<std::string, std::vector<float>>> map;
+ for (const auto &it : statsMap) {
+ std::map<std::string, std::vector<float>> stats;
+ stats.emplace("min", it.second->_minOutputs);
+ stats.emplace("max", it.second->_maxOutputs);
+ map.emplace(it.first, stats);
+ }
+ return map;
+}
+
+void
+InferenceEnginePython::IENetwork::setStats(
+ const std::map<std::string, std::map<std::string, std::vector<float>>> &stats) {
+ InferenceEngine::ICNNNetworkStats *pstats = nullptr;
+ InferenceEngine::ResponseDesc response;
+ IE_CHECK_CALL(((InferenceEngine::ICNNNetwork &) actual).getStats(&pstats, &response));
+ std::map<std::string, InferenceEngine::NetworkNodeStatsPtr> newNetNodesStats;
+ for (const auto &it : stats) {
+ InferenceEngine::NetworkNodeStatsPtr nodeStats = InferenceEngine::NetworkNodeStatsPtr(
+ new InferenceEngine::NetworkNodeStats());
+ newNetNodesStats.emplace(it.first, nodeStats);
+ nodeStats->_minOutputs = it.second.at("min");
+ nodeStats->_maxOutputs = it.second.at("max");
+ }
+ pstats->setNodesStats(newNetNodesStats);
+}
+
+void InferenceEnginePython::InputInfo::setPrecision(std::string precision) {
actual.setPrecision(precision_map[precision]);
}
-void InferenceEnginePython::InputInfo::setLayout(std::string layout){
+void InferenceEnginePython::InputInfo::setLayout(std::string layout) {
actual.setLayout(layout_map[layout]);
}
-void InferenceEnginePython::OutputInfo::setPrecision(std::string precision){
+void InferenceEnginePython::OutputInfo::setPrecision(std::string precision) {
actual->setPrecision(precision_map[precision]);
}
-InferenceEnginePython::IEPlugin::IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs)
-{
-
+InferenceEnginePython::IEPlugin::IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs) {
InferenceEngine::PluginDispatcher dispatcher{plugin_dirs};
actual = dispatcher.getPluginByDevice(device);
const InferenceEngine::Version *pluginVersion;
@@ -188,65 +231,63 @@ InferenceEnginePython::IEPlugin::IEPlugin(const std::string &device, const std::
device_name = device;
}
-void InferenceEnginePython::IEPlugin::setInitialAffinity(InferenceEnginePython::IENetwork &net)
-{
+void InferenceEnginePython::IEPlugin::setInitialAffinity(const InferenceEnginePython::IENetwork &net) {
InferenceEngine::HeteroPluginPtr hetero_plugin(actual);
InferenceEngine::ResponseDesc response;
auto &network = net.actual;
IE_CHECK_CALL(hetero_plugin->SetAffinity(network, {}, &response));
}
-std::set<std::string> InferenceEnginePython::IEPlugin::queryNetwork(InferenceEnginePython::IENetwork &net)
-{
- InferenceEngine::CNNNetwork &network = net.actual;
+
+std::set<std::string> InferenceEnginePython::IEPlugin::queryNetwork(const InferenceEnginePython::IENetwork &net) {
+ const InferenceEngine::CNNNetwork &network = net.actual;
InferenceEngine::QueryNetworkResult queryRes;
actual->QueryNetwork(network, queryRes);
return queryRes.supportedLayers;
}
-void InferenceEnginePython::IENetLayer::setAffinity(const std::string & target_affinity){
+void InferenceEnginePython::IENetLayer::setAffinity(const std::string &target_affinity) {
layer_ptr->affinity = target_affinity;
}
-void InferenceEnginePython::IENetLayer::setParams(const std::map<std::string, std::string> & params_map){
+void InferenceEnginePython::IENetLayer::setParams(const std::map<std::string, std::string> &params_map) {
layer_ptr->params = params_map;
}
-std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::IENetLayer::getWeights(){
+std::map<std::string, InferenceEngine::Blob::Ptr> InferenceEnginePython::IENetLayer::getWeights() {
auto w_layer = std::dynamic_pointer_cast<InferenceEngine::WeightableLayer>(layer_ptr);
// IF current layer is weightable gather weights and biases from casted WeightableLayer and all other blobs
// considered as custom and gathered from blobs field pf CNNLayer.
std::map<std::string, InferenceEngine::Blob::Ptr> weights;
- if (w_layer != nullptr){
- if (w_layer->_weights != nullptr){
+ if (w_layer != nullptr) {
+ if (w_layer->_weights != nullptr) {
weights["weights"] = w_layer->_weights;
}
- if (w_layer->_biases != nullptr){
+ if (w_layer->_biases != nullptr) {
weights["biases"] = w_layer->_biases;
}
- for (auto it : w_layer->blobs){
- if (it.first == "weights" || it.first == "biases"){
+ for (auto it : w_layer->blobs) {
+ if (it.first == "weights" || it.first == "biases") {
continue;
}
weights[it.first] = it.second;
}
- }
- // Otherwise all layer's blobs are considered as custom and gathered from CNNLayer
- else {
+ } else {
+ // Otherwise all layer's blobs are considered as custom and gathered from CNNLayer
std::map<std::string, InferenceEngine::Blob::Ptr> map_placeholder;
- weights = map_placeholder; // If layer has no blobs it should not be missed from weights map
- for (auto it : layer_ptr->blobs){
+ weights = map_placeholder; // If layer has no blobs it should not be missed from weights map
+ for (auto it : layer_ptr->blobs) {
weights[it.first] = it.second;
}
}
return weights;
}
-void InferenceEnginePython::IENetLayer::setPrecision(std::string precision){
+void InferenceEnginePython::IENetLayer::setPrecision(std::string precision) {
layer_ptr->precision = precision_map[precision];
}
-void InferenceEnginePython::IEPlugin::addCpuExtension(const std::string &extension_path)
-{
+
+void InferenceEnginePython::IEPlugin::addCpuExtension(const std::string &extension_path) {
InferenceEngine::ResponseDesc response;
auto extension_ptr = InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(extension_path);
auto extension = std::dynamic_pointer_cast<InferenceEngine::IExtension>(extension_ptr);
@@ -254,78 +295,49 @@ void InferenceEnginePython::IEPlugin::addCpuExtension(const std::string &extensi
}
std::unique_ptr<InferenceEnginePython::IEExecNetwork>
-InferenceEnginePython::IEPlugin::load(InferenceEnginePython::IENetwork &net,
+InferenceEnginePython::IEPlugin::load(const InferenceEnginePython::IENetwork &net,
int num_requests,
- const std::map<std::string, std::string> &config)
-{
+ const std::map<std::string, std::string> &config) {
InferenceEngine::ResponseDesc response;
- auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(net.name, num_requests);
+ auto exec_network = InferenceEnginePython::make_unique<InferenceEnginePython::IEExecNetwork>(net.name,
+ num_requests);
IE_CHECK_CALL(actual->LoadNetwork(exec_network->actual, net.actual, config, &response))
- const InferenceEngine::InputsDataMap &inputs_info = net.actual.getInputsInfo();
- const InferenceEngine::OutputsDataMap &outputs_info = net.actual.getOutputsInfo();
-
for (size_t i = 0; i < num_requests; ++i) {
InferRequestWrap &infer_request = exec_network->infer_requests[i];
IE_CHECK_CALL(exec_network->actual->CreateInferRequest(infer_request.request_ptr, &response))
-
- for (const auto& input : inputs_info) {
- infer_request.inputs[input.first] = nullptr;
- infer_request.request_ptr->GetBlob(input.first.c_str(), infer_request.inputs[input.first], &response);
- }
- for (const auto& output : outputs_info) {
- infer_request.request_ptr->GetBlob(output.first.c_str(), infer_request.outputs[output.first], &response);
- }
}
return exec_network;
}
-void InferenceEnginePython::IEPlugin::setConfig(const std::map<std::string, std::string> & config) {
+void InferenceEnginePython::IEPlugin::setConfig(const std::map<std::string, std::string> &config) {
InferenceEngine::ResponseDesc response;
IE_CHECK_CALL(actual->SetConfig(config, &response))
}
InferenceEnginePython::IEExecNetwork::IEExecNetwork(const std::string &name, size_t num_requests) :
- infer_requests(num_requests), name(name)
-{
+ infer_requests(num_requests), name(name) {
}
-void InferenceEnginePython::IEExecNetwork::infer()
-{
+void InferenceEnginePython::IEExecNetwork::infer() {
InferenceEngine::ResponseDesc response;
InferRequestWrap &request = infer_requests[0];
request.request_ptr->Infer(&response);
}
-InferenceEngine::Blob::Ptr &InferenceEnginePython::InferRequestWrap::getInputBlob(const std::string &blob_name)
+void InferenceEnginePython::InferRequestWrap::getBlobPtr(const std::string &blob_name, InferenceEngine::Blob::Ptr &blob_ptr)
{
- return inputs.at(blob_name);
-}
-
-InferenceEngine::Blob::Ptr &InferenceEnginePython::InferRequestWrap::getOutputBlob(const std::string &blob_name)
-{
- return outputs.at(blob_name);
+ InferenceEngine::ResponseDesc response;
+ IE_CHECK_CALL(request_ptr->GetBlob(blob_name.c_str(), blob_ptr, &response));
}
-std::vector<std::string> InferenceEnginePython::InferRequestWrap::getInputsList() {
- std::vector<std::string> inputs_list;
- inputs_list.reserve(inputs.size());
- std::transform(inputs.begin(), inputs.end(), std::back_inserter(inputs_list), [] (InferenceEngine::BlobMap::value_type it) -> std::string {
- return it.first;
- });
- return inputs_list;
-}
-std::vector<std::string> InferenceEnginePython::InferRequestWrap::getOutputsList() {
- std::vector<std::string> outputs_list;
- outputs_list.reserve(inputs.size());
- std::transform(outputs.begin(), outputs.end(), std::back_inserter(outputs_list), [] (InferenceEngine::BlobMap::value_type it) -> std::string {
- return it.first;
- });
- return outputs_list;
+void InferenceEnginePython::InferRequestWrap::setBatch(int size) {
+ InferenceEngine::ResponseDesc response;
+ IE_CHECK_CALL(request_ptr->SetBatch(size, &response));
}
void InferenceEnginePython::InferRequestWrap::infer() {
@@ -344,13 +356,14 @@ int InferenceEnginePython::InferRequestWrap::wait(int64_t timeout) {
return static_cast<int >(code);
}
-std::map<std::string, InferenceEnginePython::ProfileInfo> InferenceEnginePython::InferRequestWrap::getPerformanceCounts(){
+std::map<std::string, InferenceEnginePython::ProfileInfo>
+InferenceEnginePython::InferRequestWrap::getPerformanceCounts() {
std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> perf_counts;
InferenceEngine::ResponseDesc response;
request_ptr->GetPerformanceCounts(perf_counts, &response);
std::map<std::string, InferenceEnginePython::ProfileInfo> perf_map;
- for (auto it : perf_counts){
+ for (auto it : perf_counts) {
InferenceEnginePython::ProfileInfo profile_info;
switch (it.second.status) {
case InferenceEngine::InferenceEngineProfileInfo::EXECUTED:
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp
new file mode 100644
index 000000000..7bb2dd37a
--- /dev/null
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl.hpp
@@ -0,0 +1,174 @@
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <ie_extension.h>
+#include <iterator>
+
+#include <string>
+#include <utility>
+#include <map>
+#include <vector>
+#include <set>
+
+#include <iostream>
+#include <algorithm>
+
+#include <sstream>
+#include <inference_engine.hpp>
+
+namespace InferenceEnginePython {
+struct IENetLayer {
+ InferenceEngine::CNNLayerPtr layer_ptr;
+ InferenceEngine::CNNNetwork network_ptr;
+ std::string name;
+ std::string type;
+ std::string precision;
+ std::string shape;
+ std::string layout;
+ std::vector<std::string> children;
+ std::vector<std::string> parents;
+ std::string affinity;
+ std::map<std::string, std::string> params;
+
+ void setAffinity(const std::string &target_affinity);
+
+ void setParams(const std::map<std::string, std::string> &params_map);
+
+ std::map<std::string, InferenceEngine::Blob::Ptr> getWeights();
+
+ void setPrecision(std::string precision);
+};
+
+struct InputInfo {
+ InferenceEngine::InputInfo actual;
+ std::vector<size_t> dims;
+ std::string precision;
+ std::string layout;
+
+ void setPrecision(std::string precision);
+
+ void setLayout(std::string layout);
+};
+
+struct OutputInfo {
+ InferenceEngine::DataPtr actual;
+ std::vector<size_t> dims;
+ std::string precision;
+ std::string layout;
+
+ void setPrecision(std::string precision);
+};
+
+struct ProfileInfo {
+ std::string status;
+ std::string exec_type;
+ std::string layer_type;
+ int64_t real_time;
+ int64_t cpu_time;
+ unsigned execution_index;
+};
+
+struct IENetwork {
+ InferenceEngine::CNNNetwork actual;
+ std::string name;
+ std::size_t batch_size;
+
+ void setBatch(const size_t size);
+
+ void addOutputs(const std::vector<std::string> &out_layers, const std::string &precision);
+
+ const std::vector<std::pair<std::string, InferenceEnginePython::IENetLayer>> getLayers();
+
+ const std::map<std::string, InferenceEnginePython::InputInfo> getInputs();
+
+ const std::map<std::string, InferenceEnginePython::OutputInfo> getOutputs();
+
+ void reshape(const std::map<std::string, std::vector<size_t>> &input_shapes);
+
+ void serialize(const std::string &path_to_xml, const std::string &path_to_bin);
+
+ void setStats(const std::map<std::string, std::map<std::string, std::vector<float>>> &stats);
+
+ const std::map<std::string, std::map<std::string, std::vector<float>>> getStats();
+
+ IENetwork(const std::string &model, const std::string &weights);
+
+ IENetwork() = default;
+};
+
+struct InferRequestWrap {
+ InferenceEngine::IInferRequest::Ptr request_ptr;
+
+ void infer();
+
+ void infer_async();
+
+ int wait(int64_t timeout);
+
+ void getBlobPtr(const std::string &blob_name, InferenceEngine::Blob::Ptr &blob_ptr);
+
+ void setBatch(int size);
+
+ std::map<std::string, InferenceEnginePython::ProfileInfo> getPerformanceCounts();
+};
+
+
+struct IEExecNetwork {
+ InferenceEngine::IExecutableNetwork::Ptr actual;
+ std::vector<InferRequestWrap> infer_requests;
+ std::string name;
+
+ IEExecNetwork(const std::string &name, size_t num_requests);
+
+ void infer();
+};
+
+
+struct IEPlugin {
+ std::unique_ptr<InferenceEnginePython::IEExecNetwork> load(const InferenceEnginePython::IENetwork &net,
+ int num_requests,
+ const std::map<std::string, std::string> &config);
+
+ std::string device_name;
+ std::string version;
+
+ void setConfig(const std::map<std::string, std::string> &);
+
+ void addCpuExtension(const std::string &extension_path);
+
+ void setInitialAffinity(const InferenceEnginePython::IENetwork &net);
+
+ IEPlugin(const std::string &device, const std::vector<std::string> &plugin_dirs);
+
+ IEPlugin() = default;
+
+ std::set<std::string> queryNetwork(const InferenceEnginePython::IENetwork &net);
+
+ InferenceEngine::InferenceEnginePluginPtr actual;
+};
+
+template<class T>
+T *get_buffer(InferenceEngine::Blob &blob) {
+ return blob.buffer().as<T *>();
+}
+
+template<class T, class... Args>
+std::unique_ptr<T> make_unique(Args &&... args) {
+ return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
+}
+
+std::string get_version();
+}; // namespace InferenceEnginePython
diff --git a/inference-engine/ie_bridges/python/inference_engine/ie_api_impl_defs.pxd b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd
index ea5e2bb60..78f2a62a0 100644
--- a/inference-engine/ie_bridges/python/inference_engine/ie_api_impl_defs.pxd
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/ie_api_impl_defs.pxd
@@ -1,8 +1,3 @@
-# Copyright (C) 2018 Intel Corporation
-#
-# SPDX-License-Identifier: Apache-2.0
-#
-
from libc.stddef cimport size_t
from libcpp.string cimport string
from libcpp.vector cimport vector
@@ -10,7 +5,6 @@ from libcpp.map cimport map
from libcpp.set cimport set
from libcpp.pair cimport pair
from libcpp.memory cimport unique_ptr, shared_ptr
-from libcpp cimport bool
from libc.stdint cimport int64_t
@@ -28,7 +22,7 @@ cdef extern from "<inference_engine.hpp>" namespace "InferenceEngine":
size_t element_size() const
cdef cppclass Precision:
- const char* name() const
+ const char*name() const
cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
@@ -37,9 +31,11 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
string type
string precision
string affinity
+ string shape
+ string layout
+ vector[string] children
+ vector[string] parents
map[string, string] params
- # map[string, BlobInfo] blob_info
- # map[string, Blob.Ptr] weights;
void setAffinity(const string & target_affinity) except +
void setParams(const map[string, string] & params_map) except +
map[string, Blob.Ptr] getWeights() except +
@@ -58,7 +54,6 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
string layout
void setPrecision(string precision)
-
cdef cppclass ProfileInfo:
string status
string exec_type
@@ -68,51 +63,50 @@ cdef extern from "ie_api_impl.hpp" namespace "InferenceEnginePython":
unsigned int execution_index
cdef cppclass WeightsInfo:
- Blob.Ptr &weights;
- Blob.Ptr &biases;
+ Blob.Ptr & weights;
+ Blob.Ptr & biases;
map[string, Blob.Ptr] custom_blobs;
-
cdef cppclass IEExecNetwork:
vector[InferRequestWrap] infer_requests
cdef cppclass IENetwork:
+ IENetwork() except +
+ IENetwork(const string &, const string &) except +
string name
size_t batch_size
map[string, vector[size_t]] inputs
- map[string, IENetLayer] getLayers() except +
+ const vector[pair[string, IENetLayer]] getLayers() except +
map[string, InputInfo] getInputs() except +
map[string, OutputInfo] getOutputs() except +
void addOutputs(vector[string] &, string &) except +
- void setAffinity(map[string, string] &types_affinity_map, map[string, string] &layers_affinity_map) except +
+ void setAffinity(map[string, string] & types_affinity_map, map[string, string] & layers_affinity_map) except +
void setBatch(size_t size) except +
void setLayerParams(map[string, map[string, string]] params_map) except +
+ void serialize(const string& path_to_xml, const string& path_to_bin) except +
void reshape(map[string, vector[size_t]] input_shapes) except +
+ void setStats(map[string, map[string, vector[float]]] & stats) except +
+ map[string, map[string, vector[float]]] getStats() except +
cdef cppclass IEPlugin:
IEPlugin() except +
IEPlugin(const string &, const vector[string] &) except +
unique_ptr[IEExecNetwork] load(IENetwork & net, int num_requests, const map[string, string]& config) except +
void addCpuExtension(const string &) except +
- void setConfig(const map[string, string]&) except +
+ void setConfig(const map[string, string] &) except +
void setInitialAffinity(IENetwork & net) except +
- set[string] queryNetwork(const IENetwork &net) except +
+ set[string] queryNetwork(const IENetwork & net) except +
string device_name
string version
- cdef cppclass IENetReader:
- IENetwork read(const string &, const string &) except +
-
cdef cppclass InferRequestWrap:
- vector[string] getInputsList() except +
- vector[string] getOutputsList() except +
- Blob.Ptr& getOutputBlob(const string &blob_name) except +
- Blob.Ptr& getInputBlob(const string &blob_name) except +
+ void getBlobPtr(const string &blob_name, Blob.Ptr &blob_ptr)
map[string, ProfileInfo] getPerformanceCounts() except +
void infer() except +
void infer_async() except +
int wait(int64_t timeout) except +
+ void setBatch(int size) except +
- cdef T* get_buffer[T](Blob &)
+ cdef T*get_buffer[T](Blob &)
cdef string get_version()