summaryrefslogtreecommitdiff
path: root/inference-engine/thirdparty/clDNN/api/C
diff options
context:
space:
mode:
Diffstat (limited to 'inference-engine/thirdparty/clDNN/api/C')
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/activation.h65
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/activation_grad.h60
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/apply_adam.h74
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/arg_max_min.h72
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/average_unpooling.h55
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/batch_norm.h63
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/batch_norm_grad.h51
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/cldnn.h758
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/concatenation.h76
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/convolution.h80
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/convolution_grad_input.h60
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/convolution_grad_weights.h69
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/crop.h66
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/custom_gpu_primitive.h70
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/data.h54
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/deconvolution.h69
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/detection_output.h90
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/eltwise.h84
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/embed.h51
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/fully_connected.h65
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_input.h49
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_weights.h57
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/input_layout.h54
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/lookup_table.h61
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/lrn.h73
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/lstm.h136
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/max_unpooling.h60
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/mutable_data.h63
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/mvn.h55
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/normalize.h70
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/permute.h56
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/pooling.h75
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/prior_box.h72
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/proposal.h56
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/region_yolo.h62
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/reorder.h60
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/reorg_yolo.h55
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/reshape.h52
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/roi_pooling.h59
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/scale.h61
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/scale_grad_input.h48
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/scale_grad_weights.h57
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/softmax.h72
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/softmax_loss_grad.h49
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/split.h71
-rw-r--r--inference-engine/thirdparty/clDNN/api/C/upsampling.h67
46 files changed, 3682 insertions, 0 deletions
diff --git a/inference-engine/thirdparty/clDNN/api/C/activation.h b/inference-engine/thirdparty/clDNN/api/C/activation.h
new file mode 100644
index 000000000..86f7ba30f
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/activation.h
@@ -0,0 +1,65 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef ACTIVATION_H
+#define ACTIVATION_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Activation using rectified linear unit or parameterized rectified linear unit.
+/// @details Can get one negative slope or negative slope per channel.
+/// @par Algorithm:
+/// out(i,x,y) = max(0, in(i,x,y)) + slope(i) * min(0, in(i,x,y))
+/// @par Where:
+/// @li out(i,x,y) : value at x, y from i-th feature map after activation.
+/// @li in(i,x,y) : value at x, y from i-th feature map before activation.
+/// @li slope(i) : the slope value of the i-th feature map (can be shared across channels or one slope per channel).
+CLDNN_BEGIN_PRIMITIVE_DESC(activation)
+/// @brief activation function.
+cldnn_activation_func activation_func;
+/// @brief Activation additional params.
+/// activation_relu_negative_slope - additional_params.a is a negative slope
+/// activation_brelu - additional_params.a is a upper bound
+/// activation_linear - additional_params.a/b uses as a*val + b
+cldnn_activation_additional_params additional_params;
+/// @brief Activation additional params stored on a memory object
+/// activation_relu_negative_slope - negative slope per feature map
+/// activation_brelu - upper bound per feature map
+/// activation_linear - a,b per feature map
+cldnn_primitive_id additional_params_input;
+CLDNN_END_PRIMITIVE_DESC(activation)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(activation);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* ACTIVATION_H */
diff --git a/inference-engine/thirdparty/clDNN/api/C/activation_grad.h b/inference-engine/thirdparty/clDNN/api/C/activation_grad.h
new file mode 100644
index 000000000..38e183a50
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/activation_grad.h
@@ -0,0 +1,60 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef ACTIVATION_GRAD_H
+#define ACTIVATION_GRAD_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Activation gradient for rectified linear unit or parameterized rectified linear unit.
+/// @par Algorithm:
+/// out(i,x,y) = input_gradient(i,x,y) * ((input(i,x,y) > 0) + slope(i) * (input(i,x,y) <= 0)
+/// @par Where:
+/// @li out(i,x,y) : value at x, y from i-th feature map after activation.
+/// @li in(i,x,y) : value at x, y from i-th feature map before activation.
+/// @li slope(i) : the slope value of the i-th feature map (can be shared across channels or one slope per channel).
+CLDNN_BEGIN_PRIMITIVE_DESC(activation_grad)
+/// @brief activation gradient function.
+cldnn_activation_grad_func activation_grad_func;
+/// @brief Activation additional params.
+/// activation_relu_negative_slope_grad - additional_params.a is a negative slope
+cldnn_activation_additional_params additional_params;
+/// @brief Activation additional params stored on a memory object
+/// activation_relu_negative_slope_grad - negative slope per feature map
+cldnn_primitive_id additional_params_input;
+CLDNN_END_PRIMITIVE_DESC(activation_grad)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(activation_grad);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* ACTIVATION_GRAD_H */
diff --git a/inference-engine/thirdparty/clDNN/api/C/apply_adam.h b/inference-engine/thirdparty/clDNN/api/C/apply_adam.h
new file mode 100644
index 000000000..25db5e9d3
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/apply_adam.h
@@ -0,0 +1,74 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef APPLY_ADAM_H
+#define APPLY_ADAM_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Apply Adam primitive.
+/// @details Updates output using Adam algorithm. The output of this primitive should be mutable_data type in case user wants to update
+/// variable accross network. If output is not mutable_data then it will be initialized with 0.
+/// "Adam: A Method for Stochastic Optimization" by Diederik P. Kingma, Jimmy Ba
+/// @n See: https://arxiv.org/abs/1412.6980
+///
+/// <b>Algorithm:</b>
+/// @n float lr[t] = lr * sqrt(1 - beta2^t) / (1 - beta1^t);
+/// @n float m[t] = beta1 * m[t-1] + (1 - beta1) * grad[t];
+/// @n float v[t] = beta2 * v[t-1] + (1 - beta2) * grad[t] * grad[t];
+/// @n float result = result - lr[t] * m[t] / (sqrt(v[t]) + epsilon);
+
+CLDNN_BEGIN_PRIMITIVE_DESC(apply_adam)
+/// @brief Primitive id containing m data.
+cldnn_primitive_id m;
+/// @brief Primitive id containing v data.
+cldnn_primitive_id v;
+/// @brief Primitive id containing beta1^t.
+cldnn_primitive_id beta1_power;
+/// @brief Primitive id containing beta2^t.
+cldnn_primitive_id beta2_power;
+/// @brief Learning rate parameter.
+float lr;
+/// @brief Beta1 parameter.
+float beta1;
+/// @brief Beta2 parameter.
+float beta2;
+/// @brief Epsilon.
+float epsilon;
+CLDNN_END_PRIMITIVE_DESC(apply_adam)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(apply_adam);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* APPLY_ADAM_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/arg_max_min.h b/inference-engine/thirdparty/clDNN/api/C/arg_max_min.h
new file mode 100644
index 000000000..017fc1160
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/arg_max_min.h
@@ -0,0 +1,72 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef ARG_MAX_MIN_H
+#define ARG_MAX_MIN_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Enum type to specify output type - index of max or min values
+typedef enum
+{
+ cldnn_arg_max,
+ cldnn_arg_min,
+} cldnn_arg_max_min_out;
+
+/// @brief Enum type to specify axis to maximize/minimize along.
+typedef enum
+{
+ cldnn_arg_max_min_batch,
+ cldnn_arg_max_min_feature,
+ cldnn_arg_max_min_x,
+ cldnn_arg_max_min_y,
+ cldnn_arg_max_min_xyf
+} cldnn_arg_max_min_axis;
+
+/// @brief Finds the index of the k max/min values of input.
+CLDNN_BEGIN_PRIMITIVE_DESC(arg_max_min)
+/// @brief Number of indices to output.
+uint32_t top_k;
+/// @brief Type of output - max or mix.
+cldnn_arg_max_min_out output_type;
+/// @brief Axis to maximize/minimize along. If not set, maximize the flattened x, y ,f dimensions for each index of the first dimension.
+cldnn_arg_max_min_axis axis;
+/// @brief Indicates that the primitive has user defined axis to maximize/minimize along.
+uint32_t with_axis;
+CLDNN_END_PRIMITIVE_DESC(arg_max_min)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(arg_max_min);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* ARG_MAX_MIN.H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/average_unpooling.h b/inference-engine/thirdparty/clDNN/api/C/average_unpooling.h
new file mode 100644
index 000000000..fb40dbb61
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/average_unpooling.h
@@ -0,0 +1,55 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef AVERAGE_UNPOOLING_H
+#define AVERAGE_UNPOOLING_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs "average_unpooling" operation.
+/// @details Reverse operation of average pooling.
+/// Each element in every pooling window is filled with output / window size value. In case of window overlap the elements are added.
+CLDNN_BEGIN_PRIMITIVE_DESC(average_unpooling)
+/// @brief Defines shift in output buffer.
+cldnn_tensor stride;
+/// @brief Pooling kernel size.
+cldnn_tensor size;
+/// @brief Output size of this primitive.
+cldnn_tensor output_size;
+CLDNN_END_PRIMITIVE_DESC(average_unpooling)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(average_unpooling);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* AVERAGE_UNPOOLING_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/batch_norm.h b/inference-engine/thirdparty/clDNN/api/C/batch_norm.h
new file mode 100644
index 000000000..c35351c54
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/batch_norm.h
@@ -0,0 +1,63 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef BATCH_NORM_H
+#define BATCH_NORM_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Batch normalization primitive.
+/// @details Performs batch normalization as described in
+/// "Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift" by Ioffe, Szegedy
+/// @n See: http://arxiv.org/abs/1502.03167
+///
+/// <b>Algorithm:</b>
+/// @n global stats can be computed as:
+/// @n out[i] = in[i] - mean[b] / sqrt(variance[b] + epsilon)
+
+CLDNN_BEGIN_PRIMITIVE_DESC(batch_norm)
+/// @brief Primitive id containing mean data.
+cldnn_primitive_id mean;
+/// @brief Primitive id containing variance.
+cldnn_primitive_id variance;
+/// @brief Primitive id containing inverted variance used in future gradient computing.
+cldnn_primitive_id inv_variance;
+/// @brief Epsilon.
+float epsilon;
+CLDNN_END_PRIMITIVE_DESC(batch_norm)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(batch_norm);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* BATCH_NORM_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/batch_norm_grad.h b/inference-engine/thirdparty/clDNN/api/C/batch_norm_grad.h
new file mode 100644
index 000000000..474d6eff0
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/batch_norm_grad.h
@@ -0,0 +1,51 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef BATCH_NORM_GRAD_H
+#define BATCH_NORM_GRAD_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs backward batch normalization layer.
+/// @details Calculates mean gradient and gradient * input for every feature in data,
+/// then output is calculated as inv_variance * (input_grad - mean_grad_input * input - mean_grad)
+CLDNN_BEGIN_PRIMITIVE_DESC(batch_norm_grad)
+/// @brief Primitive id containing inverted variance from forward pass.
+cldnn_primitive_id inv_variance;
+CLDNN_END_PRIMITIVE_DESC(batch_norm_grad)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(batch_norm_grad);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* BATCH_NORM_GRAD_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/cldnn.h b/inference-engine/thirdparty/clDNN/api/C/cldnn.h
new file mode 100644
index 000000000..ee461a074
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/cldnn.h
@@ -0,0 +1,758 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef CLDNN_H
+#define CLDNN_H
+
+// exporting symbols form dynamic library
+#ifdef EXPORT_NEURAL_SYMBOLS
+# if defined(_MSC_VER)
+// Microsoft
+# define CLDNN_API __declspec(dllexport)
+# elif defined(__GNUC__)
+// GCC
+# define CLDNN_API __attribute__((visibility("default")))
+# else
+# define CLDNN_API
+# pragma warning Unknown dynamic link import/export semantics.
+# endif
+#else //import dll
+# if defined(_MSC_VER)
+// Microsoft
+# define CLDNN_API __declspec(dllimport)
+# elif defined(__GNUC__)
+// GCC
+# define CLDNN_API
+# else
+# define CLDNN_API
+# pragma warning Unknown dynamic link import/export semantics.
+# endif
+#endif
+
+#include <stdint.h>
+#include <stddef.h>
+
+/// @addtogroup c_api C API
+/// @{
+
+/// @defgroup c_memory Memory Management
+
+/// @defgroup c_topology Network Topology
+
+/// @defgroup c_engine Execution Engine
+
+/// @defgroup c_network Network Execution
+
+/// @defgroup c_error Error Handling
+
+/// @defgroup c_version Version Information
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @addtogroup c_error
+/// @{
+#define CLDNN_SUCCESS 0
+#define CLDNN_ERROR -1
+#define CLDNN_INVALID_ARG -2
+#define CLDNN_OUT_OF_RESOURCES -3
+#define CLDNN_DEVICE_ERROR -4
+#define CLDNN_UNSUPPORTED_SIZE -5
+#define CLDNN_UNSUPPORTED_FORMAT -6
+#define CLDNN_DIMENSION_MISMATCH -7
+#define CLDNN_ALLOC_SIZE_EXCEEDED -8
+#define CLDNN_GLOBAL_SIZE_EXCEEDED -9
+
+/// @brief Represents errors status for all API calls
+typedef int32_t cldnn_status;
+/// @}
+
+/// @addtogroup c_version
+/// @{
+/// @brief Represents version information of API.
+typedef struct
+{
+ int32_t major; ///< Major version component (major version of clDNN API interface).
+ int32_t minor; ///< Minor version component (minor version of API interface - correlated with IE API version).
+ int32_t build; ///< Build version component (version/revision of official Open Source drop of clDNN library).
+ int32_t revision; ///< Revision version component (incremental identifier of current build/compilation).
+} cldnn_version;
+/// @}
+
+/// @ingroup c_engine
+/// @brief Engine object
+typedef struct cldnn_engine_impl* cldnn_engine;
+
+/// @ingroup c_network
+/// @brief Event object
+typedef struct cldnn_event_impl* cldnn_event;
+
+/// @ingroup c_topology
+/// @brief Network topology to be defined by user
+typedef struct cldnn_topology_impl* cldnn_topology;
+
+/// @ingroup c_program
+/// @brief Compiled program build from @ref cldnn_topology by @ref cldnn_engine
+typedef struct cldnn_program_impl* cldnn_program;
+
+/// @ingroup c_network
+/// @brief Executable network allocated from @ref cldnn_program
+typedef struct cldnn_network_impl* cldnn_network;
+
+/// @ingroup c_memory
+/// @brief Memory object
+typedef struct cldnn_memory_impl* cldnn_memory;
+
+/// @addtogroup c_engine
+/// @{
+
+/// @brief Defines available engine types
+typedef enum /*:int32_t*/
+{
+ cldnn_engine_ocl ///< OpenCL engine
+} cldnn_engine_type;
+
+/// @brief Priority modes.
+typedef enum /*:int16_t*/
+{
+ cldnn_priority_disabled,
+ cldnn_priority_low,
+ cldnn_priority_med,
+ cldnn_priority_high
+} cldnn_priority_mode_type;
+
+/// @brief Throttle modes.
+typedef enum /*:int16_t*/
+{
+ cldnn_throttle_disabled,
+ cldnn_throttle_low,
+ cldnn_throttle_med,
+ cldnn_throttle_high
+} cldnn_throttle_mode_type;
+
+/// @brief Configuration parameters for created engine.
+typedef struct
+{
+ uint32_t enable_profiling; ///< Enable per-primitive profiling.
+ uint32_t meaningful_kernels_names; ///< Generate meaniful names fo OpenCL kernels.
+ uint32_t dump_custom_program; ///< dump the custom generated program to files
+ const char* compiler_options; ///< OpenCL compiler options string.
+ const char* single_kernel_name; ///< If provided, runs specific layer.
+ uint32_t enable_parallelisation; ///< Enables parallel execution of primitives which don't depend on each other. Disabled by default.
+ const char* engine_log; ///< Specifies a file to which engine log should be dumped. Null/empty values means no logging.
+ const char* sources_dumps_dir; ///< Specifies a directory where sources of cldnn::program objects should be dumped. Null/empty values means no loggins.
+ /*cldnn_priority_mode_type*/ int16_t priority_mode; ///< Priority mode (support of OpenCL priority hints in command queue).
+ /*cldnn_throttle_mode_type*/ int16_t throttle_mode; ///< Placeholder for throttle mode (support of throttle hints in command queue). It has no effect for now and should be set to cldnn_throttle_disabled.
+ uint32_t enable_memory_pool; ///< Enables memory usage optimization. memory objects will be reused when possible.
+} cldnn_engine_configuration;
+
+/// @brief Information about the engine returned by cldnn_get_engine_info().
+typedef struct
+{
+ uint32_t cores_count; ///< Number of available HW cores.
+ uint32_t core_frequency; ///< Clock frequency in MHz.
+
+ uint64_t max_work_group_size; ///< Maximum number of work-items in a work-group executing a kernel using the data parallel execution model.
+ uint64_t max_local_mem_size; ///< Maximum size of local memory arena in bytes.
+ uint64_t max_global_mem_size; ///< Maximum size of global device memory in bytes.
+ uint64_t max_alloc_mem_size; ///< Maximum size of memory object allocation in bytes.
+
+ uint64_t max_image2d_width; ///< Maximum image 2d width supported by the device.
+ uint64_t max_image2d_height; ///< Maximum image 2d height supported by the device.
+
+ // Flags (for layout compatibility fixed size types are used).
+ uint8_t supports_fp16; ///< Does engine support FP16.
+ uint8_t supports_fp16_denorms; ///< Does engine support denormalized FP16.
+ uint8_t supports_subgroups_short; ///< Does engine support cl_intel_subgroups_short.
+ uint8_t supports_image; ///< Does engine support images (CL_DEVICE_IMAGE_SUPPORT cap).
+} cldnn_engine_info;
+/// @}
+
+/// @addtogroup c_network
+/// @{
+
+/// @brief user-defined event handler callback.
+typedef void(*cldnn_event_handler)(void*);
+
+/// @brief Profiling information for an executed network primitive.
+/// @details Every @ref cldnn_event associated with @ref cldnn_network_output.
+/// can contain one or more profiling information intervals.
+typedef struct
+{
+ const char* name; ///< Profiling interval name.
+ uint64_t nanoseconds;
+} cldnn_profiling_interval;
+
+/// @brief Network build option types.
+typedef enum /*:int32_t*/
+{
+ cldnn_build_option_fusing, ///< Allow primitives fusing during network build.
+ cldnn_build_option_optimize_data, ///< Enable implicit reordering for user input.
+ cldnn_build_option_debug, ///< Enable debug mode.
+ cldnn_build_option_outputs, ///< User selected list of network outputs.
+ cldnn_build_option_learning_config, ///< User defined learning parameters.
+ cldnn_build_option_tuning_config, ///< Tuning config.
+ cldnn_build_option_graph_dumps_dir, ///< Specifies a directory to which stages of network compilation should be dumped.
+ cldnn_build_option_serialization, ///< Specifies a name of files to which serialization should be dumped.
+ cldnn_build_option_load_program ///< Specifies a name of load_program process.
+} cldnn_build_option_type;
+
+/// @brief Tuning modes.
+typedef enum /*:int32_t*/
+{
+ cldnn_tuning_disabled, ///< Tuning is disabled.
+ cldnn_tuning_use_cache, ///< Tuning using the cached data (no on-line tuning for non-existing data).
+ cldnn_tuning_tune_and_cache, ///< Tuning using the cached data if exist, tune and update cache otherwise.
+} cldnn_tuning_mode_type;
+
+/// @brief Tuning config.
+struct cldnn_tuning_config
+{
+ const int32_t mode; ///< #cldnn_tuning_mode_type.
+ const char* cache_file_path; ///< A path to the tuning cache file.
+};
+
+/// @brief Learning params.
+struct cldnn_learning_params
+{
+ const float momentum;
+ const float weights_decay;
+};
+
+/// @brief Represents network build option.
+typedef struct
+{
+ int32_t type; ///< #cldnn_build_option_type.
+ const void* data; ///< option parameter - e.g list of outputs.
+} cldnn_build_option;
+
+/// @brief Output information for executed @a cldnn_network.
+/// @details User should wait for event before accessing the memory.
+typedef struct
+{
+ cldnn_event event; ///< Event to be waited.
+ cldnn_memory memory; ///< Output memory.
+ ///< User should wait for the event before access this field.
+} cldnn_network_output;
+
+/// @}
+
+/// @addtogroup c_memory
+/// @{
+
+/// @brief Represents memory formats (orders).
+/// @n In CNN most of data is describe as 4 dimensional blocks. In Intel(R) clDNN library we describe memory with 4 letters
+/// - b - number of blocks in batch. For weights formats: output features - conv, neurons - inner product
+/// - f - number of feature maps, features or channels. For weights formats: input features - conv, inputs, inner product
+/// - x - spatial, width
+/// - y - spatial, height
+/// /n
+/// For explanation how each format type is implemented in memory we will use naming shown bellow (b=2,f=3,y=3,x=3):
+/// \image html layout_memory_representation.jpg
+typedef enum /*:int32_t*/
+{
+ cldnn_format_yxfb, ///< batch first, feature and than spatials \n \image html yxfb.jpg
+ cldnn_format_byxf, ///< used in bitmaps, input from user i.e b images of RGB format \n \image html byxf.jpg
+ cldnn_format_bfyx, ///< the most common format for activations in clDNN. \n \image html bfyx.jpg
+ cldnn_format_fyxb, ///< format not used inside clDNN, but supported in reorder as extension for user provided formats.
+ cldnn_format_os_iyx_osv16, ///< format used only for convolution weights: os - output feature maps slice, i - input feature maps, yx - spatials, sv16 - 16 values of single slice.
+ ///< \n \image html os_iyx_osv16.jpg
+ cldnn_format_bs_xs_xsv8_bsv8, ///< format used only for fully connected weights: bs - batch slice, xs - x slice, bsv8 - 8 values of single slice.
+ ///< \n \image html bs_xs_xsv8_bsv8.jpg
+ cldnn_format_bs_xs_xsv8_bsv16,///< format used only for fully connected weights: bs - batch slice, xs - x slice, bsv16 - 16 values of single slice.
+ ///< \n \image html bs_xs_xsv8_bsv16.jpg
+ cldnn_format_bs_x_bsv16, ///< format used only for fully connected weights fp16 batch=1 : bs - batch slice (responses slice), bsv16 - 16 values of single batch slice, x - flattened plane of (fyx).
+ ///< \n \image html bs_x_bsv16.jpg
+ cldnn_format_bf8_xy16, ///< format used only for convolution 1x1 input, xy aligned to 16, f aligned to 8
+ ///< \n \image html bf8_xy16.jpg
+ cldnn_format_image_2d_weights_c4_fyx_b, ///< image format for weights, image 2d, 4-channel, width size is f*y*x/4 (4-channels filled with fyx data), height is b
+ ///< \n \image html image_2d_weights_c4_fyx_b.jpg
+ cldnn_format_image_2d_weights_c1_b_fyx, ///< image format for weights, image 2d, single channel, width size is b, height is f*y*x
+ ///< \n \image html image_2d_weights_c1_b_fyx.jpg
+ cldnn_format_byxf_af32, /// < \n format for input for primitives using MMAD
+ cldnn_format_os_is_yx_isa8_osv8_isv4, /// < \n format for weights for MMAD convolutions, stored as ((aligned_to_8(O)/8) * (aligned_to_32(I)/32) * Y * X * ( 8 ) * ( 8 ) * ( 4 )
+ cldnn_format_format_num, ///< number of format types
+ cldnn_format_any = -1
+} cldnn_format_type;
+
+#define CLDNN_FLOAT_TYPE_MASK 0x80
+#define CLDNN_UINT_TYPE_MASK 0x40
+
+#define CLDNN_TENSOR_BATCH_DIM_MAX 1
+#define CLDNN_TENSOR_FEATURE_DIM_MAX 1
+#define CLDNN_TENSOR_SPATIAL_DIM_MAX 2
+#define CLDNN_TENSOR_DIM_MAX 8
+
+/// @brief N-dimensional vector. Mostly used to represent memory size.
+typedef struct
+{
+ size_t batch_num;
+ size_t feature_num;
+ size_t spatial_num;
+ int32_t sizes[CLDNN_TENSOR_DIM_MAX];
+} cldnn_tensor;
+
+/// @brief Padding information.
+typedef struct
+{
+ cldnn_tensor lower_size; ///< Lower padding sizes. For spatials, it means size of left (X) and top (Y) padding.
+ cldnn_tensor upper_size; ///< Upper padding sizes. For spatials, it means size of right (X) and bottom (Y) padding.
+ float filling_value; ///< Filling value for an element of padding. If data type of elements is different than float it is converted
+ ///< to it using round-towards-nearest-even (for floating-point data types) or round-towards-zero (for integral
+ ///< data types).
+} cldnn_padding;
+
+/// @brief Data type stored in memory.
+typedef enum /*:size_t*/
+{
+ cldnn_i8 = sizeof(int8_t),
+ cldnn_f16 = sizeof(int16_t) | CLDNN_FLOAT_TYPE_MASK,
+ cldnn_f32 = sizeof(float) | CLDNN_FLOAT_TYPE_MASK,
+ cldnn_u8 = sizeof(uint8_t) | CLDNN_UINT_TYPE_MASK // TODO: move to top of list and re-compile inference engine
+
+} cldnn_data_type;
+
+/// @brief Memory layout description.
+typedef struct
+{
+ size_t data_type; ///< data type (@ref cldnn_data_type) stored in memory.
+ int32_t format; ///< Memor format (@ref cldnn_format_type)
+ cldnn_tensor size; ///< N-dimensional vector describes size (in elements) of memory (excluding padding).
+ cldnn_padding padding; ///< Explicitly added padding to memory buffer.
+} cldnn_layout;
+/// @}
+
+/// @addtogroup c_topology
+/// @{
+
+/// @brief Represents reference to an array of floats.
+typedef struct
+{
+ const float* data; ///< Pointer to float array.
+ size_t size; ///< Size (in floats) of the array.
+} cldnn_float_arr;
+
+/// @brief Represents reference to an array of uint16_t.
+typedef struct
+{
+ const uint16_t* data; ///< Pointer to uint16_t array.
+ size_t size; ///< Size (in uint16_t) of the array.
+} cldnn_uint16_t_arr;
+
+/// @brief Represents reference to an array of tensor.
+typedef struct
+{
+ const cldnn_tensor* data; ///< Pointer to tensor array.
+ size_t size; ///< Size (in tensor) of the array.
+} cldnn_tensor_arr;
+
+/// @brief Globally unique primitive's type id
+typedef const struct cldnn_primitive_type* cldnn_primitive_type_id;
+
+/// @brief Unique @p id of a primitive within a topology.
+typedef const char* cldnn_primitive_id;
+
+/// @brief Represents reference to an array of primitive ids.
+typedef struct
+{
+ const cldnn_primitive_id* data; ///< Pointer to ids array.
+ size_t size; ///< Number of ids in the array.
+} cldnn_primitive_id_arr;
+
+/// @brief Custom primitive kernel source code
+typedef const char* cldnn_kernel_code;
+/// @brief Custom primitive kernel source code array
+typedef cldnn_kernel_code* cldnn_kernels_code;
+/// @brief Custom primitive kernel entry point
+typedef const char* cldnn_kernel_entry_point;
+/// @brief Custom primitive kernel build options
+typedef const char* cldnn_kernel_build_options;
+/// @brief Custom primitive kernel workgroup sizes
+typedef const size_t* cldnn_work_group_sizes;
+
+/// @brief Custom primitive kernel argument type
+typedef enum cldnn_arg_type_t
+{
+ arg_input,
+ arg_output,
+} cldnn_arg_type;
+
+/// @brief Custom primitive kernel argument index
+typedef uint32_t cldnn_arg_index;
+
+/// @brief Custom primitive kernel argument type
+typedef struct cldnn_arg_t
+{
+ cldnn_arg_type arg_type;
+ cldnn_arg_index index;
+} cldnn_arg;
+
+/// @brief Custom primitive kernel argument array
+typedef const cldnn_arg* cldnn_kernel_arguments;
+
+/// @brief activation functions
+typedef enum cldnn_activation_func_t
+{
+ activation_none, // val
+ activation_logistic, // 1/(1 + exp(-val))
+ activation_hyperbolic_tan, // tanh(val)
+ activation_relu, // max(0, val)
+ activation_relu_negative_slope, // max(0, val) + a * min(0, val) (a is additional param)
+ activation_clamp, // max(a, min(b, val) (a,b are additional param)
+ activation_softrelu, // log(1 + exp(val))
+ activation_abs, // abs(val)
+ activation_linear, // a*val + b (a,b are additional params)
+ activation_square, // val*val
+ activation_sqrt, // sqrt(val)
+ activation_elu, // max(0, val) + a * (exp(min(0, val) - 1) (a is additional param)
+} cldnn_activation_func;
+
+/// @brief activation gradient functions
+typedef enum cldnn_activation_grad_func_t
+{
+ activation_grad_none, // val
+ activation_grad_relu, // val * (input > 0)
+ activation_grad_relu_negative_slope, // val * ((input > 0) + a * (input <= 0) (a is additional param)
+} cldnn_activation_grad_func;
+
+/// @brief activation additional params
+typedef struct cldnn_activation_additional_params_t
+{
+ float a, b;
+} cldnn_activation_additional_params;
+
+
+/// @brief reorder mean operation modes
+typedef enum cldnn_reorder_mean_mode_t
+{
+ mean_none, // val
+ mean_subtract, // val - mean
+ mean_mul, // val * mean
+ mean_div, // val/mean
+} cldnn_reorder_mean_mode;
+
+/// @brief Begin primitive description definition
+/// @details Defines @p 'cldnn_primitive_type_desc' structure with first 5 fields
+/// common for all primitive descriptors. Other fields should be added after this macro.
+/// primitive descriptor definition should be closed by @ref CLDNN_END_PRIMITIVE_DESC.
+#define CLDNN_BEGIN_PRIMITIVE_DESC(PType) struct cldnn_##PType##_desc {\
+ cldnn_primitive_type_id type; /**< @brief Primitive type identificator. */\
+ cldnn_primitive_id id; /**< @brief Primitive id unique within a topology. */\
+ cldnn_primitive_id_arr input; /**< @brief Input primitives ids. */\
+ cldnn_padding output_padding; /**< @brief Output padding information. */
+
+/// @brief Close primitive descriptor definition.
+#define CLDNN_END_PRIMITIVE_DESC(PType) };
+
+#define CLDNN_PRIMITIVE_DESC(PType) cldnn_##PType##_desc
+
+/// @brief Basic primitive descriptor structure.
+CLDNN_BEGIN_PRIMITIVE_DESC(primitive)
+CLDNN_END_PRIMITIVE_DESC(primitive)
+
+/// @}
+
+/// @addtogroup c_version
+/// @{
+/// @brief Get information about version of clDNN.
+CLDNN_API cldnn_version cldnn_get_version(cldnn_status* status);
+/// @}
+
+/// @addtogroup c_topology
+/// @{
+
+/// @brief Create empty network topology
+CLDNN_API cldnn_topology cldnn_create_topology(cldnn_status* status);
+
+/// @brief Add new primitive to the topology.
+/// @param[in] dto The pointer to a structure defined by @ref CLDNN_BEGIN_PRIMITIVE_DESC and @ref CLDNN_END_PRIMITIVE_DESC
+CLDNN_API void cldnn_add_primitive(cldnn_topology topology, const struct CLDNN_PRIMITIVE_DESC(primitive)* dto, cldnn_status* status);
+
+/// @brief Change input layout of the topology.
+/// @param[in] id of the input layout in the topology
+/// @param[in] new_layout of the input layout
+CLDNN_API void cldnn_change_input_layout(cldnn_topology topology, cldnn_primitive_id id, cldnn_layout new_layout, cldnn_status* status);
+
+/// @brief Return all primitives id from topology.
+/// @details Function fills user provided buffer by primitive ids. Each id is followed by '\0'.
+/// @param[in] ids Pointer to user-allocated buffer to store names.
+/// @param[in] size Size (in chars) of the buffer.
+/// @param[out] size_ret Required size (in chars) to store result.
+CLDNN_API void cldnn_get_primitive_ids(cldnn_topology topology, char* ids, size_t size, size_t* size_ret, cldnn_status* status);
+
+/// @brief Increment reference counter for the topology object.
+CLDNN_API void cldnn_retain_topology(cldnn_topology topology, cldnn_status* status);
+
+/// @brief Decrement reference counter for the topology object. Deletes object when counter becomes zero.
+CLDNN_API void cldnn_release_topology(cldnn_topology topology, cldnn_status* status);
+/// @}
+
+/// @addtogroup c_engine
+/// @{
+
+/// @brief number of available engines of the particular type
+CLDNN_API uint32_t cldnn_get_engine_count(/*cldnn_engine_type*/ int32_t type, cldnn_status* status);
+
+/// @brief Release pending memory allocated in OpenCL context.
+/// @param[in] type Engine type @ref cldnn_engine_type. Only OCL engine is supported.
+/// @details OpenCL does not guarantee that the memory will be released (even with cl:Buffers releaed).
+/// Use this function to force releasing whole pending memory.
+CLDNN_API void cldnn_release_pending_memory(cldnn_engine engine, cldnn_status* status);
+
+/// @brief Create new engine of the specified @p type, @p engine_num, and @p configuration options.
+/// @param[in] type Engine type @ref cldnn_engine_type. Only OCL engine is supported.
+/// @param[in] engine_num Engine index. Should be 0.
+/// @param[in] configuration Pointer to engine configuration options.
+CLDNN_API cldnn_engine cldnn_create_engine(/*cldnn_engine_type*/ int32_t type, uint32_t engine_num, const cldnn_engine_configuration* configuration, cldnn_status* status);
+
+/// @brief Increment reference counter for the engine object.
+CLDNN_API void cldnn_retain_engine(cldnn_engine engine, cldnn_status* status);
+
+/// @brief Decrement reference counter for the engine object. Deletes object when counter becomes zero.
+CLDNN_API void cldnn_release_engine(cldnn_engine engine, cldnn_status* status);
+
+/// @brief Returns engine information. See @ref cldnn_engine_info for details.
+CLDNN_API cldnn_engine_info cldnn_get_engine_info(cldnn_engine engine, cldnn_status* status);
+
+/// @brief Returns the @ref cldnn_engine_type for the particular engine
+CLDNN_API /*cldnn_engine_type*/ int32_t cldnn_get_engine_type(cldnn_engine engine, cldnn_status* status);
+
+/// @brief Returns total size of all resources allocated using given engine
+CLDNN_API int64_t cldnn_get_temp_used_device_memory_size(cldnn_engine engine, cldnn_status* status);
+/// @}
+
+/// @brief Returns max size of resources allocated using given engine
+CLDNN_API int64_t cldnn_get_max_used_device_memory_size(cldnn_engine engine, cldnn_status* status);
+
+/// @addtogroup c_network
+/// @{
+
+/// @brief Creates an event which can be set by user.
+CLDNN_API cldnn_event cldnn_create_user_event(cldnn_engine engine, cldnn_status* status);
+
+/// @brief Checks if an event was created by user.
+CLDNN_API int32_t cldnn_is_user_event(cldnn_event event, cldnn_status* status);
+
+/// @brief Increment reference counter for the event object.
+CLDNN_API void cldnn_retain_event(cldnn_event event, cldnn_status* status);
+
+/// @brief Decrement reference counter for the event object. Deletes object when counter becomes zero.
+CLDNN_API void cldnn_release_event(cldnn_event event, cldnn_status* status);
+
+/// @brief Waits for event completion or error.
+CLDNN_API void cldnn_wait_for_event(cldnn_event event, cldnn_status* status);
+
+/// @brief Set event status to @p completed.
+CLDNN_API void cldnn_set_event(cldnn_event event, cldnn_status* status);
+
+/// @brief Register call back to be called on event completion.
+/// @param[in] handler Pointer to @ref cldnn_event_handler call-back function.
+/// @param[in] param user-defined value to be passed to the call back function.
+CLDNN_API void cldnn_add_event_handler(cldnn_event event, cldnn_event_handler handler, void* param, cldnn_status* status);
+
+/// @brief Returns the profiling information for an network primitive associated with event.
+/// @param[in] profiling Pointer to the array of @ref cldnn_profiling_interval where information to be stored.
+/// @param[in] size Number of elements in the array of @ref cldnn_profiling_interval.
+/// @param[out] size_ret Number of elements required to store profiling information.
+CLDNN_API void cldnn_get_event_profiling_info(cldnn_event event, cldnn_profiling_interval* profiling, size_t size, size_t* size_ret, cldnn_status* status);
+/// @}
+
+/// @addtogroup c_program
+/// @{
+
+/// @brief Builds executable program based on user-defined @p topology by specified @p engine.
+/// @param[in] engine The engine which will be used to build the program.
+/// @param[in] topology The user-defined topology on which the network will be based.
+/// @param[in] options The pointer of array of @ref cldnn_build_option which define network build options.
+/// @param[in] options_num Number of elements in the @p options array.
+CLDNN_API cldnn_program cldnn_build_program(cldnn_engine engine, cldnn_topology topology, cldnn_build_option* options, size_t options_num, cldnn_status* status);
+
+/// @brief Increment reference counter for the program object.
+CLDNN_API void cldnn_retain_program(cldnn_program program, cldnn_status* status);
+
+/// @brief Decrement reference counter for the program object. Deletes object when counter becomes zero.
+CLDNN_API void cldnn_release_program(cldnn_program program, cldnn_status* status);
+/// @}
+
+/// @addtogroup c_network
+/// @{
+
+/// @brief Builds and allocates executable network based on user-defined @p topology by specified @p engine. This is a shorthand for cldnn_build_program and cldnn_allocate_network.
+/// @param[in] engine The engine which will be used to build the metwork.
+/// @param[in] topology The user-defined topology on which the network will be based.
+/// @param[in] options The pointer of array of @ref cldnn_build_option which define network build options.
+/// @param[in] options_num Number of elements in the @p options array.
+CLDNN_API cldnn_network cldnn_build_network(cldnn_engine engine, cldnn_topology topology, cldnn_build_option* options, size_t options_num, cldnn_status* status);
+
+/// @brief Allocates memory for a new network which will be able to execute specified @p program.
+/// @param[in] program The program object which holds binaries compiled from some topology and engine. Multiple network objects can share the same program.
+CLDNN_API cldnn_network cldnn_allocate_network(cldnn_program program, cldnn_status* status);
+
+/// @brief Increment reference counter for the network object.
+CLDNN_API void cldnn_retain_network(cldnn_network network, cldnn_status* status);
+
+/// @brief Decrement reference counter for the network object. Deletes object when counter becomes zero.
+CLDNN_API void cldnn_release_network(cldnn_network network, cldnn_status* status);
+
+/// @brief Provides user input data to the network (for @p input_layout primitives).
+/// @param[in] id Primitive @p id of @p input_layout primitive defined in @p topology.
+/// @param[in] mem Memory object with user data which @p layout matches the @p input_layout defined in @p topology.
+/// @details User should set the input data for every @p input_layout primitive defined in @p topology
+/// by calling this function before call to cldnn_execute_network().
+CLDNN_API void cldnn_set_network_input(cldnn_network network, cldnn_primitive_id id, cldnn_memory mem, cldnn_status* status);
+
+/// @brief Sets learning rate for training primitives in network.
+/// @param[in] lr Learning rate.
+CLDNN_API void cldnn_set_learning_rate(cldnn_network network, float lr, cldnn_status* status);
+
+/// @brief Returns learning rate value.
+CLDNN_API float cldnn_get_learning_rate(cldnn_network network, cldnn_status* status);
+
+/// @brief Returns information about particular primitive.
+/// @details Function fills user provided buffer by primitive description.
+/// @param[in] id Primitive @p id of @p input_layout primitive defined in @p topology.
+/// @param[in] info Pointer to user-allocated buffer to store names.
+/// @param[in] size Size (in chars) of the buffer.
+/// @param[out] size_ret Required size (in chars) to store result.
+/// @returns pointer to array of chars with detailed information about particular primitive.
+CLDNN_API void cldnn_get_primitive_info(cldnn_network network, cldnn_primitive_id id, char* info, size_t size, size_t* size_ret, cldnn_status* status);
+
+/// @brief Returns @p engine associated with the @p network.
+CLDNN_API cldnn_engine cldnn_get_network_engine(cldnn_network network, cldnn_status* status);
+
+/// @brief Returns @p program associated with the @p network.
+CLDNN_API cldnn_program cldnn_get_network_program(cldnn_network network, cldnn_status* status);
+
+/// @brief Returns names of network outputs.
+/// @details Function fills user provided buffer by primitive names. Each name is followed by '\0'.
+/// Empty name "\0\0" means end of data.
+/// @param[in] names Pointer to user-allocated buffer to store names.
+/// @param[in] size Size (in chars) of the buffer.
+/// @param[out] size_ret Required size (in chars) to store result.
+CLDNN_API void cldnn_get_network_output_names(cldnn_network network, char* names, size_t size, size_t* size_ret, cldnn_status* status);
+
+/// @brief Returns names of executed primitives.
+/// @details Function fills user provided buffer by primitive names. Each name is followed by '\0'.
+/// Empty name "\0\0" means end of data.
+/// @param[in] names Pointer to user-allocated buffer to store names.
+/// @param[in] size Size (in chars) of the buffer.
+/// @param[out] size_ret Required size (in chars) to store result.
+CLDNN_API void cldnn_get_network_executed_primitive_names(cldnn_network network, char* names, size_t size, size_t* size_ret, cldnn_status* status);
+
+/// @brief Returns names of all primitives in network.
+/// @details Function fills user provided buffer by primitive names. Each name is followed by '\0'.
+/// Empty name "\0\0" means end of data.
+/// @param[in] names Pointer to user-allocated buffer to store names.
+/// @param[in] size Size (in chars) of the buffer.
+/// @param[out] size_ret Required size (in chars) to store result.
+CLDNN_API void cldnn_get_network_all_primitive_names(cldnn_network network, char* names, size_t size, size_t* size_ret, cldnn_status* status);
+
+/// @brief Returns names of all primitives in network before graph optimization.
+/// @details Function fills user provided buffer by primitive names. Each name is followed by '\0'.
+/// Empty name "\0\0" means end of data.
+/// @param[in] names Pointer to user-allocated buffer to store names.
+/// @param[in] size Size (in chars) of the buffer.
+/// @param[out] size_ret Required size (in chars) to store result.
+CLDNN_API void cldnn_get_network_all_primitive_org_names(cldnn_network network, char* names, size_t size, size_t* size_ret, cldnn_status* status);
+
+/// @brief Executes network.
+/// @details User should call cldnn_set_network_input() for every @p input_layout defined in tho source @p topology.
+/// Function returns immediately, even if @p dependencies are not set yet.
+/// @params dependencies Pointer to an array of @ref cldnn_events to be waited for network execution.
+/// @param deps_num Number of elements in the @p dependencies array.
+CLDNN_API void cldnn_execute_network(cldnn_network network, cldnn_event* dependencies, size_t deps_num, cldnn_status* status);
+
+/// @brief Returns executed network output information.
+/// @details User should call this function after cldnn_execute_network() to get result of network execution.
+/// @param name Output name to get the result.
+/// @returns @ref cldnn_network_output structure with the output information.
+/// To work with the result of this function, user should first wait for cldnn_network_output::event
+/// before getting an access to cldnn_network_output::memory.
+CLDNN_API cldnn_network_output cldnn_get_network_output(cldnn_network network, const char* name, cldnn_status* status);
+
+/// @brief Returns @ref memory corresponding to output with @p name.
+/// @details User can call this function even before calling cldnn_execute_network(), but then content of memory is uninitialized.
+/// @param name Output name to get the result.
+/// @returns @ref cldnn_memory structure with the output information.
+CLDNN_API cldnn_memory cldnn_get_network_output_memory(cldnn_network network, const char* name, cldnn_status* status);
+
+/// @brief Returns @ref event corresponding to output with @p name.
+/// @details User can call this function even before calling cldnn_execute_network(), but then content of memory is uninitialized.
+/// @param name Output name to get the result.
+/// @returns @ref cldnn_event structure with the output information.
+CLDNN_API cldnn_event cldnn_get_network_output_event(cldnn_network network, const char* name, cldnn_status* status);
+/// @}
+
+/// @addtogroup c_memory
+/// @{
+
+/// @brief Allocate memory on @p engine using specified @p layout.
+CLDNN_API cldnn_memory cldnn_allocate_memory(cldnn_engine engine, cldnn_layout layout, cldnn_status* status);
+/// @brief Create memory object attached to the buffer allocated by user.
+/// @note User is responsible for buffer deallocation. Buffer lifetime should be bigger than lifetime of the memory object.
+CLDNN_API cldnn_memory cldnn_attach_memory(cldnn_layout layout, void* pointer, size_t size, cldnn_status* status);
+/// @brief Checks if two memory objects refer to the same underlaying buffer.
+CLDNN_API int32_t cldnn_is_the_same_buffer(cldnn_memory mem1, cldnn_memory mem2, cldnn_status* status);
+/// @brief Increment reference counter for the memory object.
+CLDNN_API void cldnn_retain_memory(cldnn_memory memory, cldnn_status* status);
+/// @brief Decrement reference counter for the memory object. Deletes object when counter becomes zero.
+CLDNN_API void cldnn_release_memory(cldnn_memory memory, cldnn_status* status);
+/// @brief Locks memory buffer. Provides direct access to memory data.
+/// @returns Direct pointer to the memory data.
+CLDNN_API void* cldnn_lock_memory(cldnn_memory memory, cldnn_status* status);
+/// @brief Unlocks memory locked by cldnn_lock_memory(cldnn_memory memory, cldnn_status* status).
+CLDNN_API void cldnn_unlock_memory(cldnn_memory memory, cldnn_status* status);
+/// @brief Returns memory layout
+/// @returns @ref cldnn_layout which describes memory.
+CLDNN_API cldnn_layout cldnn_get_memory_layout(cldnn_memory memory, cldnn_status* status);
+/// @brief Returns reference to the engine associated with memory object.
+/// @returns The engine associated with memory object. Or NULL if memory was attached to user-allocated buffer.
+CLDNN_API cldnn_engine cldnn_get_memory_engine(cldnn_memory memory, cldnn_status* status);
+/// @brief converts float(32 bit) to half_t(fp16 bit)
+/// @returns 16bit half_t
+CLDNN_API uint16_t cldnn_float_to_half(float,cldnn_status*);
+/// @brief converts half_t(f16 bit) to float(32 bit)
+/// @returns 32bit float
+CLDNN_API float cldnn_half_to_float(uint16_t, cldnn_status*);
+
+/// @}
+
+/// @addtogroup c_error
+/// @{
+
+/// @brief If cldnn function returns status different than CLDNN_SUCCESS, user call this function to get more details.
+/// @returns pointer to array of chars with more detailed description of last error.
+/// @note If sequence of error occure, description of only last error will avaiable
+CLDNN_API const char* cldnn_get_last_error_message();
+/// @}
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+
+//primitives
+#ifdef __cplusplus
+#define CLDNN_DECLARE_PRIMITIVE_TYPE_ID(PType) extern "C" CLDNN_API cldnn_primitive_type_id cldnn_##PType##_type_id(cldnn_status* status)
+#else
+#define CLDNN_DECLARE_PRIMITIVE_TYPE_ID(PType) CLDNN_API cldnn_primitive_type_id cldnn_##PType##_type_id(cldnn_status* status)
+#endif
+
+
+#endif /* CLDNN_H */
diff --git a/inference-engine/thirdparty/clDNN/api/C/concatenation.h b/inference-engine/thirdparty/clDNN/api/C/concatenation.h
new file mode 100644
index 000000000..f70510451
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/concatenation.h
@@ -0,0 +1,76 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef CONCATENATION_H
+#define CONCATENATION_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum
+{
+ cldnn_concatenation_along_b = 0,
+ cldnn_concatenation_along_f = CLDNN_TENSOR_BATCH_DIM_MAX,
+ cldnn_concatenation_along_x = CLDNN_TENSOR_BATCH_DIM_MAX + CLDNN_TENSOR_FEATURE_DIM_MAX,
+ cldnn_concatenation_along_y = cldnn_concatenation_along_x + 1
+} cldnn_concatenation_axis;
+
+/// @details Concatenation is used to concatenate multiple sources into one destination along specified dimension.
+/// Note that all other dimensions (except the one along which concatenation take place) must have the same value in each source
+/// and each source should have the same format.
+/// @par Alogrithm:
+/// \code
+/// int outputIdx = 0
+/// for(i : input)
+/// {
+/// for(f : i.features)
+/// {
+/// output[outputIdx] = f
+/// outputIdx += 1
+/// }
+/// }
+/// \endcode
+/// @par Where:
+/// @li input : data structure holding all source inputs for this primitive
+/// @li output : data structure holding output data for this primitive
+/// @li i.features : number of features in currently processed input
+/// @li outputIdx : index of destination feature
+CLDNN_BEGIN_PRIMITIVE_DESC(concatenation)
+/// @brief Dimension along which concatenation should take place.
+cldnn_concatenation_axis axis;
+CLDNN_END_PRIMITIVE_DESC(concatenation)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(concatenation);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* CONCATENATION_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/convolution.h b/inference-engine/thirdparty/clDNN/api/C/convolution.h
new file mode 100644
index 000000000..4be5c23d3
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/convolution.h
@@ -0,0 +1,80 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef CONVOLUTION_H
+#define CONVOLUTION_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs forward spatial convolution with weight sharing.
+/// Also supports built-in Relu @CLDNN_PRIMITIVE_DESC{activation} available by setting it in arguments.
+/// @details Parameters are defined in context of "direct" convolution, but actual algorithm is not implied.
+CLDNN_BEGIN_PRIMITIVE_DESC(convolution)
+/// @brief Defines a shift, relative to (0,0) position of the input buffer, where (0,0) point of the convolution window should start calculations.
+cldnn_tensor input_offset;
+/// @brief Defines shift in input buffer between adjacent calculations of output values.
+cldnn_tensor stride;
+/// @brief Defines gaps in the input - dilation rate k=1 is normal convolution, k=2 means skipping one pixel per input, k=4 means skipping 3 pixels.
+/// As an example in one dimension, a filter w of size 3 would compute over input x the following: w[0]*x[0] + w[1]*x[1] + w[2]*x[2] for dilation of 1.
+/// For dilation 2 the filter would instead compute w[0]*x[0] + w[1]*x[2] + w[2]*x[4].
+cldnn_tensor dilation;
+/// @brief Enable Relu activation.
+uint32_t with_activation;
+/// @brief Relu activation slope.
+float activation_negative_slope;
+/// @brief On how many cards split the computation to.
+uint32_t split;
+/// @brief Indicates that the primitive has user-defined output size (non-zero value).
+uint32_t with_output_size;
+/// @brief User-defined output data size of the primitive (w/o padding).
+cldnn_tensor output_size;
+/// @brief Array of primitive ids containing weights data. Size of array should be equivalent to @p split.
+cldnn_primitive_id_arr weights;
+/// @brief Array of primitive ids containing bias data. Size of array should be equivalent to @p split.
+cldnn_primitive_id_arr bias;
+/// @brief List of primitive ids containing weights quanitization factors per output feature map.
+cldnn_primitive_id_arr weights_quantization_factors;
+/// @brief List of primitive ids containing output calibration factors per output feature map.
+cldnn_primitive_id_arr output_calibration_factors;
+/// @brief Input quantization factor
+float input_quantization_factor;
+/// @brief Output quantization factor
+float output_quantization_factor;
+
+CLDNN_END_PRIMITIVE_DESC(convolution)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(convolution);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* CONVOLUTION_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/convolution_grad_input.h b/inference-engine/thirdparty/clDNN/api/C/convolution_grad_input.h
new file mode 100644
index 000000000..002e648f0
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/convolution_grad_input.h
@@ -0,0 +1,60 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef CONVOLUTION_GRAD_INPUT_H
+#define CONVOLUTION_GRAD_INPUT_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs transposed convolution.
+/// @details convolution_grad_input is similar to convolution layer with the weights flipped on the axis and stride and input padding parameters used in opposite sense as in convolution.
+CLDNN_BEGIN_PRIMITIVE_DESC(convolution_grad_input)
+/// @brief Defines a shift, relative to (0,0) position of the input buffer, where (0,0) point of the convolution_grad_input window should start calculations.
+cldnn_tensor input_offset;
+/// @brief Defines the spatial dimensions of stride of adjacent elements in input buffer.
+cldnn_tensor stride;
+/// @brief On how many cards split the computation to.
+uint32_t split;
+/// @brief Indicates that the primitive has user-defined output size (non-zero value).
+uint32_t with_output_size;
+/// @brief User-defined output data size of the primitive (w/o padding).
+cldnn_tensor output_size;
+/// @brief Array of primitive ids containing weights data. Size of array should be equivalent to @p split.
+cldnn_primitive_id_arr weights;
+CLDNN_END_PRIMITIVE_DESC(convolution_grad_input)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(convolution_grad_input);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* CONVOLUTION_GRAD_INPUT_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/convolution_grad_weights.h b/inference-engine/thirdparty/clDNN/api/C/convolution_grad_weights.h
new file mode 100644
index 000000000..aacd8ff89
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/convolution_grad_weights.h
@@ -0,0 +1,69 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef CONVOLUTION_GRAD_WEIGHTS_H
+#define CONVOLUTION_GRAD_WEIGHTS_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs backward convolution operation for weights and biases.
+/// @details convolution_grad_weights updates weights and bias mutable data for training purposes.
+/// @details Please note that this primitive was not heavily tested and currently only batch=1 is enabled for this primitive.
+CLDNN_BEGIN_PRIMITIVE_DESC(convolution_grad_weights)
+/// @brief Defines a shift, relative to (0,0) position of the input buffer, where (0,0) point of the convolution_grad_weights window should start calculations.
+cldnn_tensor input_offset;
+/// @brief Defines the spatial dimensions of stride of adjacent elements in input buffer.
+cldnn_tensor stride;
+/// @brief Defines gaps in the input - dilation rate k=1 is normal convolution, k=2 means skipping one pixel per input, k=4 means skipping 3 pixels.
+/// As an example in one dimension, a filter w of size 3 would compute over input x the following: w[0]*x[0] + w[1]*x[1] + w[2]*x[2] for dilation of 1.
+/// For dilation 2 the filter would instead compute w[0]*x[0] + w[1]*x[2] + w[2]*x[4].
+cldnn_tensor dilation;
+/// @brief On how many cards split the computation to.
+uint32_t split;
+/// @brief Array of primitive ids containing weights data. Size of array should be equivalent to @p split.
+cldnn_primitive_id_arr weights;
+/// @brief Array of primitive ids containing bias data. Size of array should be equivalent to @p split or should be empty (if not using bias).
+cldnn_primitive_id_arr bias;
+/// @brief Primitive id containing convolution gradient data. Used for proper order of gradient calculation. Leave empty if primitive is last in backward pass.
+cldnn_primitive_id conv_grad;
+/// @brief Array of primitive ids containing weights gradient data calculated in previous iteration. Amount of primitives and their memory sizes should be same as weights.
+cldnn_primitive_id_arr prev_weights_grad;
+/// @brief Array of primitive ids containing bias gradient data calculated in previous iteration. Amount of primitives and their memory sizes should be same as biases.
+cldnn_primitive_id_arr prev_bias_grad;
+CLDNN_END_PRIMITIVE_DESC(convolution_grad_weights)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(convolution_grad_weights);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* CONVOLUTION_GRAD_WEIGHTS_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/crop.h b/inference-engine/thirdparty/clDNN/api/C/crop.h
new file mode 100644
index 000000000..fd977f0fd
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/crop.h
@@ -0,0 +1,66 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef CROP_H
+#define CROP_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs crop operation on input.
+/// @details Crops the input to the shape of reference_input accross all dimensions taking into account specified input offsets.
+/// @n
+/// @n\b Examples
+/// @n Crop without offset example:
+/// \image html crop_no_offset.jpg
+/// @n Crop with offset example:
+/// \image html crop_w_offset.jpg
+/// @n
+/// @n\b Requirements
+/// @n - Input, reference and offset layout (order) has to be the same
+/// @n - Input size cannot be greater than reference size in any dimension
+/// @n - All sizes have to have positive numbers
+/// @n - Reference size plus offset cannot exceed input size
+/// @n Breaking any of this conditions will cause exeption throw.
+
+CLDNN_BEGIN_PRIMITIVE_DESC(crop)
+/// @brief Reference input tensor with the required dimensions.
+cldnn_tensor reference_input;
+/// @brief Input offsets.
+cldnn_tensor offsets;
+CLDNN_END_PRIMITIVE_DESC(crop)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(crop);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* CROP_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/custom_gpu_primitive.h b/inference-engine/thirdparty/clDNN/api/C/custom_gpu_primitive.h
new file mode 100644
index 000000000..b97cd5afd
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/custom_gpu_primitive.h
@@ -0,0 +1,70 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef CUSTOM_GPU_PRIMITIVE_H
+#define CUSTOM_GPU_PRIMITIVE_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief This primitive executes a custom kernel provided by the application
+/// @details The application is required to provide all relevant details for executing the custom kernel
+/// such as: sources, entry point, work sizes and parameter bindings.
+CLDNN_BEGIN_PRIMITIVE_DESC(custom_gpu_primitive)
+/// @brief Source code for the kernel
+cldnn_primitive_id_arr kernels_code;
+/// @brief The name of the entry point function in the kernel
+cldnn_kernel_entry_point kernel_entry_point;
+/// @brief Argument bindings for the entry point function
+cldnn_kernel_arguments kernel_arguments;
+/// @brief The number of arguments used by the kernel
+int kernel_arguments_num;
+/// @brief The kernel's build options
+cldnn_kernel_build_options build_options;
+/// @brief The output layout declared by the primitive
+cldnn_layout output_layout;
+/// @brief The global working sizes
+cldnn_work_group_sizes gws;
+/// @brief The number of global work sizes
+int gws_num;
+/// @brief The local working sizes
+cldnn_work_group_sizes lws;
+/// @brief The number of local work sizes
+int lws_num;
+
+CLDNN_END_PRIMITIVE_DESC(custom_gpu_primitive)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(custom_gpu_primitive);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* CUSTOM_GPU_PRIMITIVE_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/data.h b/inference-engine/thirdparty/clDNN/api/C/data.h
new file mode 100644
index 000000000..eb48b5594
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/data.h
@@ -0,0 +1,54 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef DATA_H
+#define DATA_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Provides input data to topology.
+/// @details This primitive allows to pass data which is known at topology creation (constants).
+/// For example, weights and biases for scoring networks.
+/// @note Passing data at topology may improve network performance if data optimization is enabled.
+CLDNN_BEGIN_PRIMITIVE_DESC(data)
+/// @brief Memory object which contains data.
+/// @note If memory is attached by ::cldnn_attach_memory(),
+/// attached buffer should be valid on ::cldnn_build_network() call.
+cldnn_memory mem;
+CLDNN_END_PRIMITIVE_DESC(data)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(data);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* DATA_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/deconvolution.h b/inference-engine/thirdparty/clDNN/api/C/deconvolution.h
new file mode 100644
index 000000000..dd1b8e512
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/deconvolution.h
@@ -0,0 +1,69 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef DECONVOLUTION_H
+#define DECONVOLUTION_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs transposed convolution.
+/// Also supports built-in Relu @CLDNN_PRIMITIVE_DESC{activation} available by setting it in arguments.
+/// @details Deconvolution is similar to convolution layer with the weights flipped on the axis and stride and input padding parameters used in opposite sense as in convolution.
+CLDNN_BEGIN_PRIMITIVE_DESC(deconvolution)
+/// @brief Defines a shift, relative to (0,0) position of the input buffer, where (0,0) point of the deconvolution window should start calculations.
+cldnn_tensor input_offset;
+/// @brief Defines the spatial dimensions of stride of adjacent elements in input buffer.
+cldnn_tensor stride;
+/// @brief Enables Relu activation.
+uint32_t with_activation;
+/// @brief Relu activation slope.
+float activation_negative_slope;
+/// @brief On how many cards split the computation to.
+uint32_t split;
+/// @brief Indicates that the primitive has user-defined output size (non-zero value).
+uint32_t with_output_size;
+/// @brief User-defined output data size of the primitive (w/o padding).
+cldnn_tensor output_size;
+/// @brief Array of primitive ids containing weights data. Size of array should be equivalent to @p split.
+cldnn_primitive_id_arr weights;
+/// @brief Array of primitive ids containing bias data. Size of array should be equivalent to @p split or should be empty (if not using bias).
+cldnn_primitive_id_arr bias;
+/// @brief Indicates that deconvolution is used for convolution backward computation (convolution_grad_input)
+uint32_t gradient;
+CLDNN_END_PRIMITIVE_DESC(deconvolution)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(deconvolution);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* DECONVOLUTION_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/detection_output.h b/inference-engine/thirdparty/clDNN/api/C/detection_output.h
new file mode 100644
index 000000000..38d71d5cb
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/detection_output.h
@@ -0,0 +1,90 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef DETECTION_OUTPUT_H
+#define DETECTION_OUTPUT_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Select method for coding the prior-boxes in Detection Output layer ( @CLDNN_PRIMITIVE_DESC{detection_output} ).
+typedef enum /*:int32_t*/
+{
+ cldnn_code_type_corner,
+ cldnn_code_type_center_size,
+ cldnn_code_type_corner_size,
+} cldnn_prior_box_code_type;
+
+/// @brief Generates a list of detections based on location and confidence predictions by doing non maximum suppression.
+/// @details Each row is a 7 dimension vector, which stores: [image_id, label, confidence, xmin, ymin, xmax, ymax].
+/// If number of detections per image is lower than keep_top_k, will write dummy results at the end with image_id=-1.
+CLDNN_BEGIN_PRIMITIVE_DESC(detection_output)
+/// @brief Number of classes to be predicted.
+uint32_t num_classes;
+/// @brief Number of total bounding boxes to be kept per image after NMS step.
+uint32_t keep_top_k;
+/// @brief If not 0, bounding box are shared among different classes.
+uint32_t share_location;
+/// @brief Background label id (-1 if there is no background class).
+int background_label_id;
+/// @brief Threshold for NMS step.
+float nms_threshold;
+/// @brief Maximum number of results to be kept in NMS.
+int top_k;
+/// @brief Used for adaptive NMS.
+float eta;
+/// @brief Type of coding method for bounding box. See #cldnn_prior_box_code_type.
+int32_t code_type;
+/// @brief If not 0, variance is encoded in target; otherwise we need to adjust the predicted offset accordingly.
+uint32_t variance_encoded_in_target;
+/// @brief Only keep detections with confidences larger than this threshold.
+float confidence_threshold;
+/// @brief Number of elements in a single prior description (4 if priors calculated using PriorBox layer, 5 - if Proposal)
+int32_t prior_info_size;
+/// @brief Offset of the box coordinates w.r.t. the beginning of a prior info record
+int32_t prior_coordinates_offset;
+/// @brief If true, priors are normalized to [0; 1] range.
+uint32_t prior_is_normalized;
+/// @brief Width of input image.
+int32_t input_width;
+/// @brief Height of input image.
+int32_t input_height;
+/// @brief Decrease label id to skip background label equal to 0. Can't be used simultaneously with background_label_id.
+int32_t decrease_label_id;
+/// @brief Clip decoded boxes
+int32_t clip;
+CLDNN_END_PRIMITIVE_DESC(detection_output)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(detection_output);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* DETECTION_OUTPUT_H */
diff --git a/inference-engine/thirdparty/clDNN/api/C/eltwise.h b/inference-engine/thirdparty/clDNN/api/C/eltwise.h
new file mode 100644
index 000000000..11e3129f0
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/eltwise.h
@@ -0,0 +1,84 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef ELTWISE_H
+#define ELTWISE_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Select mode for eltwise layer ( @CLDNN_PRIMITIVE_DESC{eltwise} ​).
+typedef enum /*:int32_t*/
+{
+ /// @brief Eltwise sum.
+ cldnn_eltwise_sum,
+ /// @brief Eltwise subtract.
+ cldnn_eltwise_sub,
+ /// @brief Eltwise max.
+ cldnn_eltwise_max,
+ /// @brief Eltwise product (Hadamard).
+ cldnn_eltwise_prod,
+ /// @brief Eltwise div.
+ cldnn_eltwise_div,
+ /// @brief Eltwise min.
+ cldnn_eltwise_min,
+ /// @brief Eltwise pow.
+ cldnn_eltwise_pow,
+ /// @brief Eltwise mod.
+ cldnn_eltwise_mod
+} cldnn_eltwise_mode;
+
+/// @brief Performs elementwise operations (sum, subtract, max or product) on two input primitives
+/// Also supports built-in Relu @CLDNN_PRIMITIVE_DESC{activation} available by setting it in arguments.
+/// @notes
+/// - both inputs have to have equal sizes in all dimensions
+/// - format of both inputs has to be the same
+CLDNN_BEGIN_PRIMITIVE_DESC(eltwise)
+/// @brief Primitive id containing output quanitization factors per output feature map.
+cldnn_primitive_id output_calibration_factors;
+/// @brief Output quantization factor
+float output_quantization_factor;
+/// @brief Eltwise mode. See #cldnn_eltwise_mode.
+int32_t mode; /*cldnn_eltwise_mode*/
+/// @brief Blob-wise coefficient for SUM operation
+cldnn_float_arr coefficients;
+/// @brief Enables Relu activation.
+uint32_t with_activation;
+/// @brief Relu activation slope.
+float activation_negative_slope;
+CLDNN_END_PRIMITIVE_DESC(eltwise)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(eltwise);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* ELTWISE_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/embed.h b/inference-engine/thirdparty/clDNN/api/C/embed.h
new file mode 100644
index 000000000..199289d1f
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/embed.h
@@ -0,0 +1,51 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef EMBED_H
+#define EMBED_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+CLDNN_BEGIN_PRIMITIVE_DESC(embed)
+
+/// @brief Primitive id containing weights data.
+cldnn_primitive_id weights;
+/// @brief Primitive id containing bias data.
+cldnn_primitive_id bias;
+
+CLDNN_END_PRIMITIVE_DESC(embed)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(embed);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* EMBED_H */
diff --git a/inference-engine/thirdparty/clDNN/api/C/fully_connected.h b/inference-engine/thirdparty/clDNN/api/C/fully_connected.h
new file mode 100644
index 000000000..8ab083da8
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/fully_connected.h
@@ -0,0 +1,65 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef FULLY_CONNECTED_H
+#define FULLY_CONNECTED_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs forward fully connected layer (inner product).
+/// Also supports built-in Relu @CLDNN_PRIMITIVE_DESC{activation} available by setting it in arguments.
+CLDNN_BEGIN_PRIMITIVE_DESC(fully_connected)
+/// @brief Enable Relu activation.
+uint32_t with_activation;
+/// @brief Relu activation slope.
+float activation_negative_slope;
+/// @brief Primitive id containing weights data.
+cldnn_primitive_id weights;
+/// @brief Primitive id containing bias data.
+cldnn_primitive_id bias;
+/// @brief Primitive id containing weights quanitization factors per output feature map.
+cldnn_primitive_id weights_quantization_factors;
+/// @brief Primitive id containing output quanitization factors per output feature map.
+cldnn_primitive_id output_calibration_factors;
+/// @brief Input quantization factor
+float input_quantization_factor;
+/// @brief Output quantization factor
+float output_quantization_factor;
+
+CLDNN_END_PRIMITIVE_DESC(fully_connected)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(fully_connected);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* FULLY_CONNECTED_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_input.h b/inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_input.h
new file mode 100644
index 000000000..d7b55086a
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_input.h
@@ -0,0 +1,49 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef FULLY_CONNECTED_GRAD_INPUT_H
+#define FULLY_CONNECTED_GRAD_INPUT_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs backward fully connected layer (inner product) for input.
+CLDNN_BEGIN_PRIMITIVE_DESC(fully_connected_grad_input)
+/// @brief Primitive id containing weights data.
+cldnn_primitive_id weights;
+CLDNN_END_PRIMITIVE_DESC(fully_connected_grad_input)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(fully_connected_grad_input);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* FULLY_CONNECTED_GRAD_INPUT_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_weights.h b/inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_weights.h
new file mode 100644
index 000000000..ca576dbca
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/fully_connected_grad_weights.h
@@ -0,0 +1,57 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef fully_connected_grad_weights_GRAD_WEIGHTS_H
+#define fully_connected_grad_weights_GRAD_WEIGHTS_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs backward fully connected layer (inner product) for weights and biases.
+CLDNN_BEGIN_PRIMITIVE_DESC(fully_connected_grad_weights)
+/// @brief Primitive id containing weights data.
+cldnn_primitive_id weights;
+/// @brief Primitive id containing bias data.
+cldnn_primitive_id bias;
+/// @brief Primitive id containing fully connected gradient data. Used for proper order of gradient calculation. Leave empty if primitive is last in backward pass.
+cldnn_primitive_id fc_grad;
+/// @brief Primitive id containing weight gradient calculated in previous iteration. Memory size should be same as weights.
+cldnn_primitive_id prev_weights_grad;
+/// @brief Primitive id containing bias gradient calculated in previous iteration. Memory size should be same as bias.
+cldnn_primitive_id prev_bias_grad;
+CLDNN_END_PRIMITIVE_DESC(fully_connected_grad_weights)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(fully_connected_grad_weights);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* fully_connected_grad_weights_GRAD_WEIGHTS_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/input_layout.h b/inference-engine/thirdparty/clDNN/api/C/input_layout.h
new file mode 100644
index 000000000..a847557d8
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/input_layout.h
@@ -0,0 +1,54 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef INPUT_LAYOUT_H
+#define INPUT_LAYOUT_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Provides input layout for a data to be passed later to network.
+/// @details This primitive allows to define the layout for input data
+/// which will be passed to network before execution.
+/// For example, network input images.
+/// @note User should call network::set_input_data() for every @p input_layout primitive before network execution.
+/// @sa network::set_input_data(), cldnn::data
+CLDNN_BEGIN_PRIMITIVE_DESC(input_layout)
+/// @brief Defines layout for the data will be passed to network.
+cldnn_layout layout;
+CLDNN_END_PRIMITIVE_DESC(input_layout)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(input_layout);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* INPUT_LAYOUT_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/lookup_table.h b/inference-engine/thirdparty/clDNN/api/C/lookup_table.h
new file mode 100644
index 000000000..d29dc93f2
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/lookup_table.h
@@ -0,0 +1,61 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef LOOKUP_TABLE_H
+#define LOOKUP_TABLE_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Enum type to specify axis to return values from.
+typedef enum
+{
+ cldnn_lookup_table_batch,
+ cldnn_lookup_table_feature,
+ cldnn_lookup_table_x,
+ cldnn_lookup_table_y,
+ cldnn_lookup_table_xyf
+} cldnn_lookup_table_axis;
+
+/// @brief Returns values from data on which given indices are pointing at.
+CLDNN_BEGIN_PRIMITIVE_DESC(lookup_table)
+/// @brief Axis to return values from. If not set, returns data which index is pointing at in the flattened x, y, f dimensions for each batch.
+cldnn_lookup_table_axis axis;
+/// @brief Indicates that the primitive has user defined axis to return values from.
+uint32_t with_axis;
+CLDNN_END_PRIMITIVE_DESC(lookup_table)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(lookup_table);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* LOOKUP_TABLE.H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/lrn.h b/inference-engine/thirdparty/clDNN/api/C/lrn.h
new file mode 100644
index 000000000..1f7dca8bf
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/lrn.h
@@ -0,0 +1,73 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef LRN_H
+#define LRN_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum /*:int32_t*/
+{
+ cldnn_lrn_norm_region_across_channel,
+ cldnn_lrn_norm_region_within_channel
+} cldnn_lrn_norm_region;
+
+/// @brief Local response normalization
+/// @details LRN layer as described in chapter 3.3 of "ImageNet Classification with Deep Convolutional
+/// Neural Networks" by Khrizevsky, Sutskever, Hinton. @n See: http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf
+/// @par Alogrithm:
+/// b(i,x,y) = a(i,x,y) / (k+alpha*sum(min(N-1, i+n/2); j=max(0,i-n/2); a(j,x,y)^2))
+/// @par Where:
+/// @li b(i,x,y) : value at x, y from i-th feature map after normalization
+/// @li a(i,x,y) : value at x, y from i-th feature map before normalization
+/// @li N : number of feature maps
+/// @li n : size of normalization
+/// @li k, alpha, beta : hyper parameters (equal to 2, 10e-4, 0.75 in paper).
+CLDNN_BEGIN_PRIMITIVE_DESC(lrn)
+/// @brief Size of normalization.
+uint32_t size;
+/// @brief Hyper parameter "k".
+float k;
+/// @brief Hyper parameter "alpha".
+float alpha;
+/// @brief Hyper parameter "beta".
+float beta;
+/// @brief Normalize across or within channel
+cldnn_lrn_norm_region norm_region;
+CLDNN_END_PRIMITIVE_DESC(lrn)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(lrn);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* LRN_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/lstm.h b/inference-engine/thirdparty/clDNN/api/C/lstm.h
new file mode 100644
index 000000000..4abd78118
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/lstm.h
@@ -0,0 +1,136 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef LSTM_H
+#define LSTM_H
+
+#include <stdbool.h>
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum /*:int32_t*/
+{
+ cldnn_lstm_offset_order_iofz = 0, // ONNX
+ cldnn_lstm_offset_order_ifoz // Caffe
+} cldnn_lstm_offset_order;
+
+
+/// @brief Performs forward Long Short-Term Memory (LSTM) layer.
+/// @details The current implementation of LSTM supports Peepholes.
+/// it = f(Xt*(Wi^T) + Ht-1*Ri + Pi (.) Ct-1 + Wbi + Rbi)
+/// ft = f(Xt*(Wf^T) + Ht-1*Rf + Pf (.) Ct-1 + Wbf + Rbf)
+/// ct = g(Xt*(Wc^T) + Ht-1*Rc + Wbc + Rbc)
+/// Ct = ft (.) Ct-1 + it (.) ct
+/// ot = f(Xt*(Wo^T) + Ht-1*Ro + Po (.) Ct + Wbo + Rbo)
+/// Ht = ot (.) h(Ct)
+/// Where f = Sigmoid, g = Tanh, and h = Tanh.
+CLDNN_BEGIN_PRIMITIVE_DESC(lstm)
+/// @brief Array of primitive ids containing weight matrices for input, output, forget, and cell gates.
+cldnn_primitive_id weights;
+/// @brief Array of primitive ids containing recurrent weight matrices for input, output, forget, and cell gates.
+cldnn_primitive_id recurrent;
+/// @brief Array of primitive ids containing bias vectors for input, output, forget, and cell gates.
+cldnn_primitive_id bias;
+/// @brief Array of primitive ids containing the initial value of the hidden data (Ht-1).
+cldnn_primitive_id initial_hidden;
+/// @brief Array of primitive ids containing the initial value of the cell state data (Ct-1).
+cldnn_primitive_id initial_cell;
+/// @brief Array of primitive ids containing peephole weight vectors for input, output, and forget gates.
+cldnn_primitive_id peepholes;
+/// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified.
+float clip;
+/// @brief Couple the input and forget gates if input_forget is 1. Default is 0.
+bool input_forget;
+/// @brief A list of 3 activation functions for the input, output, forget, cell, and hidden.
+cldnn_activation_func activations[3];
+/// @brief Optional scaling values used by some activation functions. The values are consumed in the order of activation functions.
+cldnn_activation_additional_params activation_params[3];
+/// @brief Weights, recurrent weights, and biases order. [iofz] : ONNX, [ifoz] : Caffe
+cldnn_lstm_offset_order offset_order;
+// NOT SUPPORTED YET
+// /// @brief Number of directions default = 1, bidirectional = 2.
+// uint32_t num_directions;
+// /// @brief The sequence output for the hidden. This is not clearly specified in the ONNX definition.
+// uint32_t output_sequence;
+CLDNN_END_PRIMITIVE_DESC(lstm)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(lstm);
+
+
+
+/// @brief LSTM Layer GEMM helper primitive.
+/// @details The current helper primitive performs fused GEMM operations.
+CLDNN_BEGIN_PRIMITIVE_DESC(lstm_gemm)
+/// @brief Array of primitive ids containing weight matrices for input, output, forget, and cell gates.
+cldnn_primitive_id weights;
+/// @brief Array of primitive ids containing recurrent weight matrices for input, output, forget, and cell gates.
+cldnn_primitive_id recurrent;
+/// @brief Array of primitive ids containing bias vectors for input, output, forget, and cell gates.
+cldnn_primitive_id bias;
+/// @brief Array of primitive ids containing the initial value of the hidden data (Ht-1).
+cldnn_primitive_id hidden;
+// NOT SUPPORTED YET
+// /// @brief Number of directions default = 1, bidirectional = 2.
+// uint32_t num_directions;
+CLDNN_END_PRIMITIVE_DESC(lstm_gemm)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(lstm_gemm);
+
+
+
+/// @brief LSTM Layer element-wise helper primitive.
+/// @details The current helper primitive performs fused element-wise operations.
+CLDNN_BEGIN_PRIMITIVE_DESC(lstm_elt)
+/// @brief Array of primitive ids containing the initial value of the cell state data (Ct-1).
+cldnn_primitive_id cell;
+/// @brief Cell clip threshold T. It is applied to the input of activations [-T, T]. No clip is applied if it is not specified.
+float clip;
+/// @brief Couple the input and forget gates if input_forget is 1. Default is 0.
+bool input_forget;
+/// @brief A list of 3 activation functions for the input, output, forget, cell, and hidden.
+cldnn_activation_func activations[3];
+/// @brief Optional scaling values used by some activation functions. The values are consumed in the order of activation functions.
+cldnn_activation_additional_params activation_params[3];
+/// @brief Weights, recurrent weights, and biases order. [iofz] : ONNX, [ifoz] : Caffe
+cldnn_lstm_offset_order offset_order;
+// NOT SUPPORTED YET
+// /// @brief Number of directions default = 1, bidirectional = 2.
+// uint32_t num_directions;
+// /// @brief The sequence output for the hidden. This is not clearly specified in the ONNX definition.
+// uint32_t output_sequence;
+CLDNN_END_PRIMITIVE_DESC(lstm_elt)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(lstm_elt);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* LSTM_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/max_unpooling.h b/inference-engine/thirdparty/clDNN/api/C/max_unpooling.h
new file mode 100644
index 000000000..d6dc0760b
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/max_unpooling.h
@@ -0,0 +1,60 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef MAX_UNPOOLING_H
+#define MAX_UNPOOLING_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs "max_unpooling" operation.
+/// @details Reverse operation of max pooling, based on the argmax data where indices of each max pooling region are stored.
+CLDNN_BEGIN_PRIMITIVE_DESC(max_unpooling)
+/// @brief Primitive id which contains indices of each max pooling region. Indices must be in flattened bfyx format with no padding. Needs to be fp32 data type.
+cldnn_primitive_id argmax;
+/// @brief Defines a shift, relative to (0,0) position of the input buffer, where (0,0) point of the pooling window should start calculations. Used only for output size computation.
+cldnn_tensor input_offset;
+/// @brief Defines shift in input buffer between adjacent calculations of output values. Used only for output size computation.
+cldnn_tensor stride;
+/// @brief Pooling kernel size. Used only for output size computation.
+cldnn_tensor size;
+/// @brief Indicates that the primitive has user-defined output size (non-zero value).
+uint32_t with_output_size;
+/// @brief User-defined output data size of the primitive (w/o padding).
+cldnn_tensor output_size;
+CLDNN_END_PRIMITIVE_DESC(max_unpooling)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(max_unpooling);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* MAX_UNPOOLING_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/mutable_data.h b/inference-engine/thirdparty/clDNN/api/C/mutable_data.h
new file mode 100644
index 000000000..74af6b7ac
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/mutable_data.h
@@ -0,0 +1,63 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef MUTABLE_DATA_H
+#define MUTABLE_DATA_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Enum type to specify function for weights filling.
+typedef enum
+{
+ zero,
+ xavier
+} cldnn_filler_type;
+
+/// @brief Provides mutable data.
+/// @details This primitive allows to pass data which can be written to during training.
+/// For example, weights and biases for scoring networks.
+/// This primitive can be also set as other primitive's output. In this case the underlying buffer will be the same in mutable_data and preceding primitive.
+CLDNN_BEGIN_PRIMITIVE_DESC(mutable_data)
+/// @brief Memory object which contains data.
+/// @note If memory is attached by ::cldnn_attach_memory(),
+/// attached buffer should be valid on ::cldnn_build_network() call.
+cldnn_memory mem;
+/// @brief Specifies function which will be used to fill data.
+cldnn_filler_type fill_type;
+CLDNN_END_PRIMITIVE_DESC(mutable_data)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(mutable_data);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* MUTABLE_DATA_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/mvn.h b/inference-engine/thirdparty/clDNN/api/C/mvn.h
new file mode 100644
index 000000000..b324b1687
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/mvn.h
@@ -0,0 +1,55 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef MVN_H
+#define MVN_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Mean Variance Normalization primitive.
+/// @details Normalizes the input to have 0-mean and/or unit (1) variance.
+
+CLDNN_BEGIN_PRIMITIVE_DESC(mvn)
+/// @brief Determines if the normalization is done across or within channels.
+uint32_t across_channels;
+/// @brief Determines if normalize variance is applied.
+uint32_t normalize_variance;
+/// @brief Epsilon for not dividing by zero while normalizing.
+float epsilon;
+CLDNN_END_PRIMITIVE_DESC(mvn)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(mvn);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* MVN_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/normalize.h b/inference-engine/thirdparty/clDNN/api/C/normalize.h
new file mode 100644
index 000000000..14d542b41
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/normalize.h
@@ -0,0 +1,70 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef NORMALIZE_H
+#define NORMALIZE_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Normalizes the input using an L2 norm and multiplies the output with scale value.
+/// The scale can be equal for all channels or one scale per channel.
+/// @details The L2 norm is computed as:<br>
+/// Across spatial mode (across_spatial=true)-<br>
+/// norm(i,x,y) = sqrt( &Sigma;( in(f,w,h)^2 ) + epsilon ) where f in range (0,num_of_features), w in range (0,input_width), h in range (0,input_height).<br>
+/// The summation is performed over all the pixels in the batch.<br>
+/// Within spatial mode (across_spatial=false)-<br>
+/// norm(i,x,y) = sqrt( &Sigma;( in(f,x,y)^2 ) + epsilon ) where f in range (0,num_of_features).<br>
+/// The summation is performed over this (x,y) position on all the features.<br>
+/// @par Algorithm:
+/// out(i,x,y) = ( in(i,x,y) / norm(i,x,y) ) * scale(i)
+/// @par Where:
+/// @li out(i,x,y) : value at x, y from i-th feature map after normalization.
+/// @li in(i,x,y) : value at x, y from i-th feature map before normalization.
+/// @li norm(i,x,y) : L2 norm as described above.
+/// @li scale(i) : the scale value of the i-th feature map.
+CLDNN_BEGIN_PRIMITIVE_DESC(normalize)
+/// @brief Scale input primitive id with values needed for scaling after the normalization.
+/// Scale x dimension should be 1 (if all channels have the same scale) or equal to input feature size (one scale per channel).
+/// All other dimensions should be 1.
+cldnn_primitive_id scale_input;
+/// @brief Determines if the normalization is done across or within spatial (see documentation above).
+uint32_t across_spatial;
+/// @brief Epsilon for not dividing by zero while normalizing.
+float epsilon;
+CLDNN_END_PRIMITIVE_DESC(normalize)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(normalize);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* NORMALIZE_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/permute.h b/inference-engine/thirdparty/clDNN/api/C/permute.h
new file mode 100644
index 000000000..e4c7b574a
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/permute.h
@@ -0,0 +1,56 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef permute_H
+#define permute_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Permutes data in the memory, with respect to provided order.
+/// @details Permute order is set as vector with positions meaning corresponding to tensor.
+/// Vector values represent dimensions to be permuted in bfyx format. For example: <br>
+/// input_dimensions = tensor{ 5, 3, 6, 3 } <br>
+/// permute_order = { 2, 3, 1, 0 } <br>
+/// output_dimensions = { 6, 3, 3, 5 } <br>
+/// <br>
+/// When permute_order is { 0, 1, 2, 3 } then input_dimensions = output_dimensions
+CLDNN_BEGIN_PRIMITIVE_DESC(permute)
+/// @brief Array of permuted output order in bfyx format.
+cldnn_uint16_t_arr permute_order;
+CLDNN_END_PRIMITIVE_DESC(permute)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(permute);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* permute_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/pooling.h b/inference-engine/thirdparty/clDNN/api/C/pooling.h
new file mode 100644
index 000000000..2a458c4da
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/pooling.h
@@ -0,0 +1,75 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef POOLING_H
+#define POOLING_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Select method for Pooling layer ( @CLDNN_PRIMITIVE_DESC{pooling} ).
+typedef enum /*:int32_t*/
+{
+ /// @brief Maximum-pooling method.
+ cldnn_pooling_max,
+ /// @brief Average-pooling method.
+ cldnn_pooling_average,
+ /// @brief Average-pooling method without values which are outside of the input.
+ cldnn_pooling_average_no_padding,
+ /// @brief Maximum-pooling method with additional buffer to store argmax indices.
+ cldnn_pooling_max_with_argmax
+} cldnn_pooling_mode;
+
+/// @brief Performs "pooling" operation which is a form of non-linear down-sampling.
+/// @details Pools the input image by taking the max, average, etc. within regions.
+CLDNN_BEGIN_PRIMITIVE_DESC(pooling)
+/// @brief Primitive id which contains indices of each max pooling region. Indices must be in flattened bfyx format with no padding. Needs to be fp32 data type.
+cldnn_primitive_id argmax;
+/// @brief Pooling method. See #cldnn_pooling_mode.
+int32_t mode;
+/// @brief Defines a shift, relative to (0,0) position of the input buffer, where (0,0) point of the pooling window should start calculations.
+cldnn_tensor input_offset;
+/// @brief Defines shift in input buffer between adjacent calculations of output values.
+cldnn_tensor stride;
+/// @brief Pooling kernel size.
+cldnn_tensor size;
+/// @brief Indicates that the primitive has user-defined output size (non-zero value).
+uint32_t with_output_size;
+/// @brief User-defined output data size of the primitive (w/o padding).
+cldnn_tensor output_size;
+CLDNN_END_PRIMITIVE_DESC(pooling)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(pooling);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* POOLING_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/prior_box.h b/inference-engine/thirdparty/clDNN/api/C/prior_box.h
new file mode 100644
index 000000000..b7b81f4f2
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/prior_box.h
@@ -0,0 +1,72 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef PRIOR_BOX_H
+#define PRIOR_BOX_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Generates a set of default bounding boxes with different sizes and aspect ratios.
+/// @details The prior-boxes are shared across all the images in a batch (since they have the same width and height).
+/// First feature stores the mean of each prior coordinate.
+/// Second feature stores the variance of each prior coordinate.
+CLDNN_BEGIN_PRIMITIVE_DESC(prior_box)
+/// @brief Image width and height.
+cldnn_tensor img_size;
+/// @brief Minimum box sizes in pixels.
+cldnn_float_arr min_sizes;
+/// @brief Maximum box sizes in pixels.
+cldnn_float_arr max_sizes;
+/// @brief Various of aspect ratios. Duplicate ratios will be ignored.
+cldnn_float_arr aspect_ratios;
+/// @brief If not 0, will flip each aspect ratio. For example, if there is aspect ratio "r", aspect ratio "1.0/r" we will generated as well.
+uint32_t flip;
+/// @brief If not 0, will clip the prior so that it is within [0, 1].
+uint32_t clip;
+/// @brief Variance for adjusting the prior boxes.
+cldnn_float_arr variance;
+/// @brief Step width.
+float step_width;
+/// @brief Step height.
+float step_height;
+/// @brief Offset to the top left corner of each cell.
+float offset;
+/// @broef If false, only first min_size is scaled by aspect_ratios
+uint32_t scale_all_sizes;
+CLDNN_END_PRIMITIVE_DESC(prior_box)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(prior_box);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* PRIOR_BOX_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/proposal.h b/inference-engine/thirdparty/clDNN/api/C/proposal.h
new file mode 100644
index 000000000..23e449780
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/proposal.h
@@ -0,0 +1,56 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef PROPOSAL_H
+#define PROPOSAL_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CLDNN_ROI_VECTOR_SIZE 5
+
+CLDNN_BEGIN_PRIMITIVE_DESC(proposal)
+ int max_proposals;
+ float iou_threshold;
+ int min_bbox_size;
+ int feature_stride;
+ int pre_nms_topn;
+ int post_nms_topn;
+ cldnn_float_arr ratios;
+ cldnn_float_arr scales;
+CLDNN_END_PRIMITIVE_DESC(proposal)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(proposal);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* PROPOSAL_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/region_yolo.h b/inference-engine/thirdparty/clDNN/api/C/region_yolo.h
new file mode 100644
index 000000000..5bb0786a3
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/region_yolo.h
@@ -0,0 +1,62 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef REGION_YOLO_H
+#define REGION_YOLO_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /// @brief region softmax specific for yolo2 topology
+ /// @details
+ /// @par Algorithm:
+ ///
+ /// @par Where:
+ ///
+ CLDNN_BEGIN_PRIMITIVE_DESC(region_yolo)
+ /// @brief paramter coords
+ uint32_t coords;
+ /// @brief paramter classes
+ uint32_t classes;
+ /// @brief Number of anchors
+ uint32_t num;
+ /// @brief Apply softmax after logistic
+ uint32_t do_softmax;
+ /// @brief Number of really used anchors
+ uint32_t mask_size;
+ CLDNN_END_PRIMITIVE_DESC(region_yolo)
+
+ CLDNN_DECLARE_PRIMITIVE_TYPE_ID(region_yolo);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* REGION_YOLO_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/reorder.h b/inference-engine/thirdparty/clDNN/api/C/reorder.h
new file mode 100644
index 000000000..67c504f35
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/reorder.h
@@ -0,0 +1,60 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef REORDER_H
+#define REORDER_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Changes how data is ordered in memory. Value type is not changed & all information is preserved.
+/// @details Corresponding values are bitwise equal before/after reorder.
+/// Also merged with subtraction layer, which can subtract, multiply or divide values based on mean_mode value, while doing reordering.
+/// NOTE THAT THIS WILL SUBTRACT THE SAME VALUES FROM EACH BATCH.
+CLDNN_BEGIN_PRIMITIVE_DESC(reorder)
+/// @brief Requested memory format.
+cldnn_format_type output_format;
+/// @brief Requested memory data type.
+cldnn_data_type output_data_type;
+/// @brief Primitive id to get mean subtract values. Ignored if subtract_per_featrue is set.
+cldnn_primitive_id mean_subtract;
+/// @brief Array of mean subtract values.
+cldnn_float_arr subtract_per_feature;
+/// @brief Mode of mean execution
+cldnn_reorder_mean_mode mean_mode;
+CLDNN_END_PRIMITIVE_DESC(reorder)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(reorder);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* REORDER_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/reorg_yolo.h b/inference-engine/thirdparty/clDNN/api/C/reorg_yolo.h
new file mode 100644
index 000000000..90b67712d
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/reorg_yolo.h
@@ -0,0 +1,55 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef REORG_YOLO_H
+#define REORG_YOLO_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /// @brief yolo2 topology specific data reorganization primitive
+ /// @details
+ /// @par Algorithm:
+ ///
+ /// @par Where:
+ ///
+ CLDNN_BEGIN_PRIMITIVE_DESC(reorg_yolo)
+ /// @brief paramter stride
+ uint32_t stride;
+
+ CLDNN_END_PRIMITIVE_DESC(reorg_yolo)
+
+ CLDNN_DECLARE_PRIMITIVE_TYPE_ID(reorg_yolo);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* REORG_YOLO_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/reshape.h b/inference-engine/thirdparty/clDNN/api/C/reshape.h
new file mode 100644
index 000000000..59aa0e0b1
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/reshape.h
@@ -0,0 +1,52 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef RESHAPE_H
+#define RESHAPE_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Changes information about inputs's layout effectively creating new memory which share underlaying buffer
+/// but is interpreted in a different way (different shape).
+/// @note reshape primitive is supposed only to reinterpret shape of the memory therefore it's not possible to change
+/// neither data type nor format of the input buffer and total number of elements in input and output (excluding paddings) must match.
+/// Please note that there is no guarantee that underlying data will be in proper format if primitive was explicitly added to output list.
+CLDNN_BEGIN_PRIMITIVE_DESC(reshape)
+/// @brief Requested memory shape.
+cldnn_tensor output_shape;
+CLDNN_END_PRIMITIVE_DESC(reshape)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(reshape);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* RESHAPE_H */
diff --git a/inference-engine/thirdparty/clDNN/api/C/roi_pooling.h b/inference-engine/thirdparty/clDNN/api/C/roi_pooling.h
new file mode 100644
index 000000000..846d1ee94
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/roi_pooling.h
@@ -0,0 +1,59 @@
+/*
+// Copyright (c) 2017 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef ROI_POOLING_H
+#define ROI_POOLING_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+CLDNN_BEGIN_PRIMITIVE_DESC(roi_pooling)
+/// @brief Pooling method. See #cldnn_pooling_mode.
+int32_t mode;
+
+/// @brief Output width.
+int pooled_width;
+/// @brief Output height.
+int pooled_height;
+/// @brief Ratio of the coordinates used in RoIs to the width (and height) of the input data.
+float spatial_scale;
+
+/// @brief Group size as defined by PSRoIPooling when > 0, else if 0 means regular RoIPooling.
+int group_sz;
+CLDNN_END_PRIMITIVE_DESC(roi_pooling)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(roi_pooling);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* ROI_POOLING_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/scale.h b/inference-engine/thirdparty/clDNN/api/C/scale.h
new file mode 100644
index 000000000..26f7b2501
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/scale.h
@@ -0,0 +1,61 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef SCALE_H
+#define SCALE_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs elementwise product of input and scale_input.
+/// @details Scale input dimension should be equal to input dimension or be 1 if it is not there.<br>
+/// Input size : 2x3x4x5(BFYX)<br>
+/// Possible scale inputs sizes :<br>
+/// 2x3x4x5 - works the same as(axis == 0 == -4) in caffe<br>
+/// 1x3x4x5 - works the same as(axis == 1 == -3) in caffe<br>
+/// 1x1x4x5 - works the same as(axis == 2 == -2) in caffe<br>
+/// 1x1x1x5 - works the same as(axis == 3 == -1) in caffe<br>
+/// 1x1x1x1 - works the same as empty shape(scalar) in caffe<br>
+/// When scale_input is the same as input, the behavior is the same as @CLDNN_PRIMITIVE_DESC{eltwise} with product operation.<br>
+/// Performs scale over feature when the scale feature size is equal to input feature size.<br>
+/// Performs scale over feature in batch when the scale feature and scale batch sizes are equal to input feature and input batch sizes.<br>
+/// Optionally it can also add provided biases by setting bias_term.<br>
+CLDNN_BEGIN_PRIMITIVE_DESC(scale)
+/// @brief Primitive id containing bias data.
+cldnn_primitive_id bias;
+CLDNN_END_PRIMITIVE_DESC(scale)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(scale);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* SCALE_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/scale_grad_input.h b/inference-engine/thirdparty/clDNN/api/C/scale_grad_input.h
new file mode 100644
index 000000000..73050e770
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/scale_grad_input.h
@@ -0,0 +1,48 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef SCALE_GRAD_INPUT_H
+#define SCALE_GRAD_INPUT_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs scale primitive backward for input.
+CLDNN_BEGIN_PRIMITIVE_DESC(scale_grad_input)
+
+CLDNN_END_PRIMITIVE_DESC(scale_grad_input)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(scale_grad_input);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* SCALE_GRAD_INPUT_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/scale_grad_weights.h b/inference-engine/thirdparty/clDNN/api/C/scale_grad_weights.h
new file mode 100644
index 000000000..a50fe5cf7
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/scale_grad_weights.h
@@ -0,0 +1,57 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef SCALE_GRAD_WEIGHTS_H
+#define SCALE_GRAD_WEIGHTS_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs scale layer backward for scale_input and biases.
+CLDNN_BEGIN_PRIMITIVE_DESC(scale_grad_weights)
+/// @brief Scale input primitive id.
+cldnn_primitive_id scale_input;
+/// @brief Primitive id containing bias data.
+cldnn_primitive_id bias;
+/// @brief Primitive id containing scale gradient data calculated in previous iteration.
+cldnn_primitive_id prev_scale_grad;
+/// @brief Primitive id containing bias gradient data calculated in previous iteration.
+cldnn_primitive_id prev_bias_grad;
+/// @brief Primitive id which uses weights and biases updated in this primitive.
+cldnn_primitive_id scale_grad;
+CLDNN_END_PRIMITIVE_DESC(scale_grad_weights)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(scale_grad_weights);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* SCALE_GRAD_WEIGHTS_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/softmax.h b/inference-engine/thirdparty/clDNN/api/C/softmax.h
new file mode 100644
index 000000000..bf5e45d11
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/softmax.h
@@ -0,0 +1,72 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef SOFTMAX_H
+#define SOFTMAX_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Enum type to specify softmax's normalization scope (see cldnn_softmax_desc::dimension).
+typedef enum
+{
+ cldnn_softmax_normalize_f,
+ cldnn_softmax_normalize_x,
+ cldnn_softmax_normalize_y,
+ cldnn_softmax_normalize_fyx,
+} cldnn_softmax_dimension;
+
+/// @brief Normalizes results so they sum to 1. The scope of normalization is defined by a member @p dimension.
+/// @details
+/// @par Algorithm:
+/// b = e^a/sum(N-1; j=0; e^j)
+/// @par Where:
+/// @li N : number of values to normalize
+/// @li b : value after normalization
+/// @li a : value before normalization
+CLDNN_BEGIN_PRIMITIVE_DESC(softmax)
+/// @brief Defines a scope of a single softmax normalization.
+/// @details
+/// Being given a 4-dimensional input, which consists of b,f,y,x dimensions, softmax normalizes data which are divided into multiple independent sets.
+/// Specific behavior is determined by this parameter, as follows:
+/// - when set to @link cldnn_softmax_dimension cldnn_softmax_normalize_x @endlink each input row is normalized independently,
+/// - when set to @link cldnn_softmax_dimension cldnn_softmax_normalize_y @endlink each input column is normalized independently,
+/// - when set to @link cldnn_softmax_dimension cldnn_softmax_normalize_f @endlink each in-depth vector of input is normalized independently,
+/// - when set to @link cldnn_softmax_dimension cldnn_softmax_normalize_fyx @endlink each 3d image within input is normalized independently,
+cldnn_softmax_dimension dimension;
+CLDNN_END_PRIMITIVE_DESC(softmax)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(softmax);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* SOFTMAX_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/softmax_loss_grad.h b/inference-engine/thirdparty/clDNN/api/C/softmax_loss_grad.h
new file mode 100644
index 000000000..76c7ab26b
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/softmax_loss_grad.h
@@ -0,0 +1,49 @@
+/*
+// Copyright (c) 2018 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef SOFTMAX_LOSS_GRAD_H
+#define SOFTMAX_LOSS_GRAD_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Backward pass for Softmax log loss.
+/// @details The output values are the same as input_prob, except for the correct one based on the label which is subtracted by 1.
+CLDNN_BEGIN_PRIMITIVE_DESC(softmax_loss_grad)
+
+CLDNN_END_PRIMITIVE_DESC(softmax_loss_grad)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(softmax_loss_grad);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* SOFTMAX_LOSS_GRAD_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/split.h b/inference-engine/thirdparty/clDNN/api/C/split.h
new file mode 100644
index 000000000..190fe29c9
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/split.h
@@ -0,0 +1,71 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef SPLIT_H
+#define SPLIT_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Performs split operation on input.
+/// @details splits the input data into n parts, for each user provides name and offsets.
+/// @n User cannot use split primitive directly.
+/// @n It is needed to refer to the output ids with the name "<split_prim_id>:<split_output_id>".
+/// @n
+/// @n\b Assumptions
+/// @n - offsets1 < offsets2 < offsets3 < ...
+/// @n - size[n] = offsets[n+1] - offsets[n];
+/// @n - last element: size[n] = split_input.size - offsets[n];
+/// @n - no buffer overlapping, as the output size is calculated using offset and input size
+/// @n - split primitive id cannot be used by any other primitive (user needs to use output_ids only)
+/// @n Breaking any of this conditions will cause exeption throw.
+/// @n
+/// @n\b Example:
+/// @n Splitting output to 2 parts by the features:
+/// @n input_size = { 2, 4, 3, 5 };
+/// @n split_id = "split";
+/// @n output_ids_offsets[0] = { "out0", { 0,0,0,0 } };
+/// @n output_ids_offsets[1] = { "out1", { 0,2,0,0 } };
+/// @n After split there would be 2 primitives: "split:out0" and "split:out1" which contain 2 feature maps (lower and upper)
+
+CLDNN_BEGIN_PRIMITIVE_DESC(split)
+/// @brief List of output_ids.
+cldnn_primitive_id_arr output_ids;
+/// @brief Array of tensors with offsets.
+cldnn_tensor_arr output_offsets;
+CLDNN_END_PRIMITIVE_DESC(split)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(split);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* SPLIT_H */
+
diff --git a/inference-engine/thirdparty/clDNN/api/C/upsampling.h b/inference-engine/thirdparty/clDNN/api/C/upsampling.h
new file mode 100644
index 000000000..a9a3e28ea
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/api/C/upsampling.h
@@ -0,0 +1,67 @@
+/*
+// Copyright (c) 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+*/
+
+///////////////////////////////////////////////////////////////////////////////////////////////////
+#ifndef upsampling_H
+#define upsampling_H
+
+#include "cldnn.h"
+/// @addtogroup c_api C API
+/// @{
+/// @addtogroup c_topology Network Topology
+/// @{
+/// @addtogroup c_primitives Primitives
+/// @{
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/// @brief Sample mode for upsampling layer ( @CLDNN_PRIMITIVE_DESC{upsampling} ​).
+typedef enum /*:int32_t*/
+{
+ /// @brief upsampling nearest neighbor.
+ cldnn_upsampling_nearest,
+ /// @brief upsampling bilinear.
+ cldnn_upsampling_bilinear,
+} cldnn_upsampling_sample_type;
+
+/// @brief Performs nearest neighbor/bilinear upsampling
+/// Also supports built-in Relu @ref activation available by setting it in arguments.
+CLDNN_BEGIN_PRIMITIVE_DESC(upsampling)
+/// @param scale Upsampling scale.
+uint32_t scale;
+/// @param num_filter Input filter. Only used by bilinear sample_type.
+uint32_t num_filter;
+/// @param sample_type Upsampling method (nearest neighbor/bilinear).
+int32_t sample_type; /*cldnn_sample_type*/
+/// @brief Enables Relu activation.
+uint32_t with_activation;
+/// @brief Relu activation slope.
+float activation_negative_slope;
+CLDNN_END_PRIMITIVE_DESC(upsampling)
+
+CLDNN_DECLARE_PRIMITIVE_TYPE_ID(upsampling);
+
+#ifdef __cplusplus
+}
+#endif
+
+/// @}
+/// @}
+/// @}
+#endif /* upsampling_H */
+