summaryrefslogtreecommitdiff
path: root/inference-engine/thirdparty/clDNN/src/include
diff options
context:
space:
mode:
authorAlexey Suhov <alexey.suhov@intel.com>2019-10-04 19:26:43 +0300
committerAlexey Suhov <alexey.suhov@intel.com>2019-10-04 19:26:43 +0300
commit0923303e0201c5b59386ab146d0e30b2ef79272d (patch)
treedd4d200dd17f110de7ce141063e726a6bb2dd9b2 /inference-engine/thirdparty/clDNN/src/include
parentba6e22b1b5ee4cbefcc30e8d9493cddb0bb3dfdf (diff)
downloaddldt-0923303e0201c5b59386ab146d0e30b2ef79272d.tar.gz
dldt-0923303e0201c5b59386ab146d0e30b2ef79272d.tar.bz2
dldt-0923303e0201c5b59386ab146d0e30b2ef79272d.zip
Publishing 2019 R3 content
Diffstat (limited to 'inference-engine/thirdparty/clDNN/src/include')
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/activation_grad_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/activation_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/api_impl.h125
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/apply_adam_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/arg_max_min_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/average_unpooling_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/batch_norm_grad_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/batch_norm_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/binary_convolution_inst.h46
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/border_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/broadcast_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/concatenation_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/condition_inst.h8
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/contract_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/convolution_grad_weights_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/convolution_inst.h70
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/crop_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/custom_gpu_primitive_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/data_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/deconvolution_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/deformable_convolution_inst.h4
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/depth_to_space_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/detection_output_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/eltwise_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/embed_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/engine_impl.h11
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/error_handler.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/event_impl.h23
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_input_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_weights_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/fully_connected_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/fused_conv_bn_scale_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h6
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/gather_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/gather_tree_inst.h49
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/gemm_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/generic_layer.h35
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/generic_layer.hpp18
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/index_select_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/input_layout_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/internal_primitive.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/internal_primitive_type_base.h7
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/kernel_selector_helper.h90
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h187
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lookup_table_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lrn_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_input_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_timeloop_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lstm_elt_inst.h6
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lstm_gemm_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/lstm_inst.h10
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/max_unpooling_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/memory_impl.h18
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/memory_pool.h5
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/meta_utils.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/mutable_data_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/mvn_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/network_impl.h5
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/normalize_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/one_hot_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/pass_manager.h54
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/permute_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/pooling_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/primitive_inst.h14
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/primitive_type.h36
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/primitive_type_base.h9
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/prior_box_inst.h4
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/program_helpers.h14
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/program_impl.h14
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/program_node.h91
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/proposal_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/pyramid_roi_align_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/quantize_inst.h10
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/reduce_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/region_yolo_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/reorder_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/reorg_yolo_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/reshape_inst.h10
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/reverse_sequence_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/roi_pooling_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/scale_grad_input_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/scale_grad_weights_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/scale_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/select_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/shuffle_channels_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/sliding_window_utils.h4
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/softmax_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/softmax_loss_grad_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/split_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/strided_slice_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/tile_inst.h2
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/to_string_utils.h13
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/topology_impl.h9
-rw-r--r--inference-engine/thirdparty/clDNN/src/include/upsampling_inst.h2
95 files changed, 502 insertions, 629 deletions
diff --git a/inference-engine/thirdparty/clDNN/src/include/activation_grad_inst.h b/inference-engine/thirdparty/clDNN/src/include/activation_grad_inst.h
index 993a7f62e..adc349025 100644
--- a/inference-engine/thirdparty/clDNN/src/include/activation_grad_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/activation_grad_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/activation_grad.hpp"
+#include "api/activation_grad.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/activation_inst.h b/inference-engine/thirdparty/clDNN/src/include/activation_inst.h
index 53e9f3459..d5f5e4085 100644
--- a/inference-engine/thirdparty/clDNN/src/include/activation_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/activation_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/activation.hpp"
+#include "api/activation.hpp"
#include "primitive_inst.h"
#include <memory>
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/api_impl.h b/inference-engine/thirdparty/clDNN/src/include/api_impl.h
deleted file mode 100644
index 456a24b4c..000000000
--- a/inference-engine/thirdparty/clDNN/src/include/api_impl.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
-// Copyright (c) 2016 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-*/
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-#pragma once
-
-#include "api/C/cldnn.h"
-#include "api/CPP/cldnn_defs.h"
-
-#include <functional>
-#include <stdexcept>
-#include <string>
-
-#define API_CAST(api_type, impl_type) \
- inline api_type api_cast(impl_type* value) { return reinterpret_cast<api_type>(value); } \
- inline impl_type* api_cast(api_type value) { return reinterpret_cast<impl_type*>(value); }
-
-namespace cldnn {
-struct last_err {
- /// @breif Sets the message of last error
- void set_last_error_message(const std::string& msg) { _msg = msg; }
-
- void set_last_exception(const std::exception& ex) { _msg = ex.what(); }
-
- /// @breif Gets the message of last error
- const std::string& get_last_error_message() { return _msg; }
- static last_err& instance();
-
-private:
- std::string _msg;
- last_err() : _msg("Operation succeed") {}
-};
-
-// float <--> half convertors
-float half_to_float(uint16_t value);
-uint16_t float_to_half(float value);
-} // namespace cldnn
-
-template <typename T>
-T exception_handler(cldnn_status default_error,
- cldnn_status* status,
- const T& default_result,
- std::function<T()> func) {
- // NOTE for implementer: status should not be modified after successful func() call
- try {
- if (status)
- *status = CLDNN_SUCCESS;
- return func();
- } catch (const cldnn::error& err) {
- if (status)
- *status = err.status();
- cldnn::last_err::instance().set_last_exception(err);
-
-#ifndef NDEBUG
- static_cast<void>(default_result);
- throw;
-#endif
- } catch (const std::exception& err) {
- if (status)
- *status = default_error;
- cldnn::last_err::instance().set_last_exception(err);
-
-#ifndef NDEBUG
- static_cast<void>(default_result);
- throw;
-#endif
- } catch (...) {
- if (status)
- *status = default_error;
- cldnn::last_err::instance().set_last_error_message("error unknown");
-
-#ifndef NDEBUG
- static_cast<void>(default_result);
- throw;
-#endif
- }
-
-#ifdef NDEBUG
- return default_result;
-#endif
-}
-
-inline void exception_handler(cldnn_status default_error, cldnn_status* status, std::function<void()> func) {
- // NOTE for implementer: status should not be modified after successful func() call
- try {
- if (status)
- *status = CLDNN_SUCCESS;
- func();
- } catch (const cldnn::error& err) {
- if (status)
- *status = err.status();
- cldnn::last_err::instance().set_last_exception(err);
-#ifndef NDEBUG
- throw;
-#endif
- } catch (const std::exception& err) {
- if (status)
- *status = default_error;
- cldnn::last_err::instance().set_last_exception(err);
-
-#ifndef NDEBUG
- throw;
-#endif
- } catch (...) {
- if (status)
- *status = default_error;
- cldnn::last_err::instance().set_last_error_message("error unknown");
-#ifndef NDEBUG
- throw;
-#endif
- }
-}
diff --git a/inference-engine/thirdparty/clDNN/src/include/apply_adam_inst.h b/inference-engine/thirdparty/clDNN/src/include/apply_adam_inst.h
index 50bd523e0..c754b4e41 100644
--- a/inference-engine/thirdparty/clDNN/src/include/apply_adam_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/apply_adam_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/apply_adam.hpp"
+#include "api/apply_adam.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/arg_max_min_inst.h b/inference-engine/thirdparty/clDNN/src/include/arg_max_min_inst.h
index e181bb860..939d22113 100644
--- a/inference-engine/thirdparty/clDNN/src/include/arg_max_min_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/arg_max_min_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/arg_max_min.hpp"
+#include "api/arg_max_min.hpp"
#include "primitive_inst.h"
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/average_unpooling_inst.h b/inference-engine/thirdparty/clDNN/src/include/average_unpooling_inst.h
index 4a600b514..be91e855b 100644
--- a/inference-engine/thirdparty/clDNN/src/include/average_unpooling_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/average_unpooling_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/average_unpooling.hpp"
+#include "api/average_unpooling.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/batch_norm_grad_inst.h b/inference-engine/thirdparty/clDNN/src/include/batch_norm_grad_inst.h
index 5eb6564c7..0d2c617b0 100644
--- a/inference-engine/thirdparty/clDNN/src/include/batch_norm_grad_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/batch_norm_grad_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/batch_norm_grad.hpp"
+#include "api/batch_norm_grad.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/batch_norm_inst.h b/inference-engine/thirdparty/clDNN/src/include/batch_norm_inst.h
index c0d265e41..485131eba 100644
--- a/inference-engine/thirdparty/clDNN/src/include/batch_norm_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/batch_norm_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/batch_norm.hpp"
+#include "api/batch_norm.hpp"
#include "primitive_inst.h"
#include "mutable_data_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/binary_convolution_inst.h b/inference-engine/thirdparty/clDNN/src/include/binary_convolution_inst.h
index ad12bfd9a..027499c55 100644
--- a/inference-engine/thirdparty/clDNN/src/include/binary_convolution_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/binary_convolution_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/binary_convolution.hpp"
+#include "api/binary_convolution.hpp"
#include "primitive_inst.h"
#include <memory>
@@ -25,14 +25,6 @@
namespace cldnn {
-struct fused_primitive_desc {
- std::shared_ptr<const primitive> prim;
- size_t dep_start_idx;
- std::vector<primitive_id> deps;
- cldnn_activation_func_t activation;
- cldnn_activation_additional_params activation_params;
-};
-
template <>
struct typed_program_node<binary_convolution> : public typed_program_node_base<binary_convolution> {
using parent = typed_program_node_base<binary_convolution>;
@@ -63,39 +55,9 @@ public:
bool bias_term() const { return false; }
- void add_fused_primitive(const program_node* p) {
- fused_primitive_desc local_desc;
- local_desc.prim = p->get_primitive();
- local_desc.dep_start_idx = this->get_dependencies().size();
- local_desc.activation = cldnn_activation_func_t::activation_none;
- if (p->get_fused_activation_func() != cldnn_activation_func_t::activation_none) {
- local_desc.activation = p->get_fused_activation_func();
- local_desc.activation_params = p->get_fused_activation_params();
- }
-
- for (size_t i = 1; i < p->get_dependencies().size(); i++) {
- auto& dep = p->get_dependency(i);
- this->dependencies.push_back(&dep);
- local_desc.deps.push_back(dep.id());
- dep.users.push_back(this);
- }
- fused_prims.push_back(local_desc);
- }
-
- const std::vector<fused_primitive_desc>& get_fused_primitives() const { return fused_prims; }
-
- size_t get_fused_inputs_count() const {
- size_t count = 0;
- for (auto& fp : get_fused_primitives()) {
- count += fp.deps.size();
- }
- return count;
- }
-
private:
int32_t split;
bool depthwise_sep_opt;
- std::vector<fused_primitive_desc> fused_prims;
};
using binary_convolution_node = typed_program_node<binary_convolution>;
@@ -118,12 +80,6 @@ public:
return dep_memory(1 + index);
}
-
- memory_impl& fused_memory(size_t dep_id) const { return dep_memory(1 + node.get_split() + dep_id); }
-
- bool has_fused_primitives() const { return !node.get_fused_primitives().empty(); }
-
- size_t get_fused_mem_count() const { return node.get_fused_inputs_count(); }
};
using binary_convolution_inst = typed_primitive_inst<binary_convolution>;
diff --git a/inference-engine/thirdparty/clDNN/src/include/border_inst.h b/inference-engine/thirdparty/clDNN/src/include/border_inst.h
index 4c39a4a07..5d750f621 100644
--- a/inference-engine/thirdparty/clDNN/src/include/border_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/border_inst.h
@@ -15,7 +15,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include <api/CPP/border.hpp>
+#include <api/border.hpp>
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/broadcast_inst.h b/inference-engine/thirdparty/clDNN/src/include/broadcast_inst.h
index c1cfd97a0..aa9cd1cd0 100644
--- a/inference-engine/thirdparty/clDNN/src/include/broadcast_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/broadcast_inst.h
@@ -15,7 +15,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include <api/CPP/broadcast.hpp>
+#include <api/broadcast.hpp>
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/concatenation_inst.h b/inference-engine/thirdparty/clDNN/src/include/concatenation_inst.h
index 9d2c54474..9899a3e12 100644
--- a/inference-engine/thirdparty/clDNN/src/include/concatenation_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/concatenation_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/concatenation.hpp"
+#include "api/concatenation.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/condition_inst.h b/inference-engine/thirdparty/clDNN/src/include/condition_inst.h
index 5bff4f777..c03931c8f 100644
--- a/inference-engine/thirdparty/clDNN/src/include/condition_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/condition_inst.h
@@ -15,7 +15,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include <api/CPP/condition.hpp>
+#include <api/condition.hpp>
#include "network_impl.h"
#include "primitive_inst.h"
@@ -49,7 +49,7 @@ private:
void add_or_change_input_layout(const program_node& node) {
auto layout = node.get_dependency(0).get_output_layout();
auto input_id = node.as<condition>().result_id();
- if (_program == (program_impl::ptr) nullptr) { // if first run, create input_layout
+ if (_topology.get_primitives().count(input_id) == 0) {
_topology.add(std::make_shared<input_layout>(input_id, layout));
for (auto& prim : _topology.get_primitives()) {
for (auto& inp : prim.second->input) {
@@ -68,8 +68,8 @@ public:
typed_program_node(std::shared_ptr<primitive> prim, program_impl& prog)
: parent(prim, prog),
- _branch_true(*api_cast(this->get_primitive()->topology_true.get())),
- _branch_false(*api_cast(this->get_primitive()->topology_false.get())) {}
+ _branch_true(*this->get_primitive()->topology_true.get()),
+ _branch_false(*this->get_primitive()->topology_false.get()) {}
program_node& input() const { return get_dependency(0); }
program_node& compare() const { return get_dependency(1); }
diff --git a/inference-engine/thirdparty/clDNN/src/include/contract_inst.h b/inference-engine/thirdparty/clDNN/src/include/contract_inst.h
index 29e4c793a..08ff773e1 100644
--- a/inference-engine/thirdparty/clDNN/src/include/contract_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/contract_inst.h
@@ -15,7 +15,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include <api/CPP/contract.hpp>
+#include <api/contract.hpp>
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/convolution_grad_weights_inst.h b/inference-engine/thirdparty/clDNN/src/include/convolution_grad_weights_inst.h
index bda5093cd..698ad1f8d 100644
--- a/inference-engine/thirdparty/clDNN/src/include/convolution_grad_weights_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/convolution_grad_weights_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/convolution_grad_weights.hpp"
+#include "api/convolution_grad_weights.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/convolution_inst.h b/inference-engine/thirdparty/clDNN/src/include/convolution_inst.h
index 6922b9089..d71f0a8bd 100644
--- a/inference-engine/thirdparty/clDNN/src/include/convolution_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/convolution_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/convolution.hpp"
+#include "api/convolution.hpp"
#include "primitive_inst.h"
#include <memory>
@@ -30,15 +30,6 @@ struct typed_program_node<convolution> : public typed_program_node_base<convolut
using parent = typed_program_node_base<convolution>;
public:
- struct fused_primitive_desc {
- std::shared_ptr<const primitive> prim;
- size_t dep_start_idx;
- std::vector<primitive_id> deps;
- cldnn_activation_func_t activation;
- cldnn_activation_additional_params activation_params;
- };
-
-
typed_program_node(std::shared_ptr<primitive> prim, program_impl& prog)
: parent(prim, prog),
split(this->get_primitive()->split()),
@@ -89,7 +80,6 @@ public:
if (static_cast<int32_t>(idx) >= this->get_split())
throw std::range_error("quantization factor offset too big");
-
return get_dependency(1 + (1 + 1 * bias_term()) * this->get_split() + idx + get_trans_dep_offset());
}
@@ -108,57 +98,12 @@ public:
get_trans_dep_offset());
}
- program_node& fused_eltwise(size_t idx = 0) const {
- if (static_cast<int32_t>(idx) >= this->get_split())
- throw std::range_error("eltwise offset too big");
-
- int index = 1 + this->get_split()
- + (bias_term() ? this->get_split() : 0)
- + (weights_quantization_term() ? this->get_split() : 0)
- + (output_calibration_term() ? this->get_split() : 0);
- return get_dependency(static_cast<size_t>(index));
- }
-
- void add_fused_primitive(const program_node *p) {
- fused_primitive_desc local_desc;
- local_desc.prim = p->get_primitive();
- local_desc.dep_start_idx = this->get_dependencies().size();
- local_desc.activation = cldnn_activation_func_t::activation_none;
- if (p->get_fused_activation_func() != cldnn_activation_func_t::activation_none) {
- local_desc.activation = p->get_fused_activation_func();
- local_desc.activation_params = p->get_fused_activation_params();
- }
-
- for (size_t i = 0; i < p->get_dependencies().size(); i++) {
- auto& dep = p->get_dependency(i);
- if (dep.id() == this->id())
- continue;
-
- this->dependencies.push_back(&dep);
- local_desc.deps.push_back(dep.id());
- dep.users.push_back(this);
- }
- fused_prims.push_back(local_desc);
- }
-
- const std::vector<fused_primitive_desc>& get_fused_primitives() const {
- return fused_prims;
- }
-
bool bias_term() const { return get_primitive()->bias.size() > 0; }
bool weights_quantization_term() const { return get_primitive()->weights_quantization_factors.size() > 0; }
bool output_calibration_term() const { return get_primitive()->output_calibration_factors.size() > 0; }
- size_t get_fused_inputs_count() const {
- size_t count = 0;
- for (auto& fp : get_fused_primitives()) {
- count += fp.deps.size();
- }
- return count;
- }
-
float get_input_qf() const { return input_qf; }
float get_output_qf() const { return output_qf; }
@@ -171,7 +116,6 @@ private:
uint32_t groups;
uint32_t deformable_groups;
bool deformable_mode;
- std::vector<fused_primitive_desc> fused_prims;
};
using convolution_node = typed_program_node<convolution>;
@@ -235,23 +179,11 @@ public:
}
}
- memory_impl& fused_memory(size_t dep_id) const {
- int index = 1 + node.get_split()
- + (bias_term() ? node.get_split() : 0)
- + (weights_quantization_factors_term() ? node.get_split() : 0)
- + (output_calibration_factors_term() ? node.get_split() : 0);
- return dep_memory(index + dep_id);
- }
-
bool bias_term() const { return node.bias_term(); }
bool weights_quantization_factors_term() const { return node.weights_quantization_term(); }
bool output_calibration_factors_term() const { return node.output_calibration_term(); }
-
- bool has_fused_primitives() const { return !node.get_fused_primitives().empty(); }
-
- size_t get_fused_mem_count() const { return node.get_fused_inputs_count(); }
};
using convolution_inst = typed_primitive_inst<convolution>;
diff --git a/inference-engine/thirdparty/clDNN/src/include/crop_inst.h b/inference-engine/thirdparty/clDNN/src/include/crop_inst.h
index f57320b3b..75369a82e 100644
--- a/inference-engine/thirdparty/clDNN/src/include/crop_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/crop_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/crop.hpp"
+#include "api/crop.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/custom_gpu_primitive_inst.h b/inference-engine/thirdparty/clDNN/src/include/custom_gpu_primitive_inst.h
index cb6d7a8ad..e964281a8 100644
--- a/inference-engine/thirdparty/clDNN/src/include/custom_gpu_primitive_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/custom_gpu_primitive_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/custom_gpu_primitive.hpp"
+#include "api/custom_gpu_primitive.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/data_inst.h b/inference-engine/thirdparty/clDNN/src/include/data_inst.h
index 2aee4f1fb..782e41d5e 100644
--- a/inference-engine/thirdparty/clDNN/src/include/data_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/data_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/data.hpp"
+#include "api/data.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/deconvolution_inst.h b/inference-engine/thirdparty/clDNN/src/include/deconvolution_inst.h
index f968654b1..e06c8454b 100644
--- a/inference-engine/thirdparty/clDNN/src/include/deconvolution_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/deconvolution_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/deconvolution.hpp"
+#include "api/deconvolution.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/deformable_convolution_inst.h b/inference-engine/thirdparty/clDNN/src/include/deformable_convolution_inst.h
index ff744684f..a622e5191 100644
--- a/inference-engine/thirdparty/clDNN/src/include/deformable_convolution_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/deformable_convolution_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/convolution.hpp"
+#include "api/convolution.hpp"
#include "primitive_inst.h"
#include <memory>
@@ -112,8 +112,6 @@ public:
using deformable_conv_inst = typed_primitive_inst<deformable_conv>;
-
-
template <>
struct typed_program_node<deformable_interp> : public typed_program_node_base<deformable_interp> {
using parent = typed_program_node_base<deformable_interp>;
diff --git a/inference-engine/thirdparty/clDNN/src/include/depth_to_space_inst.h b/inference-engine/thirdparty/clDNN/src/include/depth_to_space_inst.h
index 297dc0327..835222c9f 100644
--- a/inference-engine/thirdparty/clDNN/src/include/depth_to_space_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/depth_to_space_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/depth_to_space.hpp"
+#include "api/depth_to_space.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/detection_output_inst.h b/inference-engine/thirdparty/clDNN/src/include/detection_output_inst.h
index 3503e0e05..9e495e0d9 100644
--- a/inference-engine/thirdparty/clDNN/src/include/detection_output_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/detection_output_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/detection_output.hpp"
+#include "api/detection_output.hpp"
#include "primitive_inst.h"
#include "topology_impl.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/eltwise_inst.h b/inference-engine/thirdparty/clDNN/src/include/eltwise_inst.h
index 773e7e6cd..c2d020496 100644
--- a/inference-engine/thirdparty/clDNN/src/include/eltwise_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/eltwise_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/eltwise.hpp"
+#include "api/eltwise.hpp"
#include "primitive_inst.h"
#include <memory>
#include "topology_impl.h"
diff --git a/inference-engine/thirdparty/clDNN/src/include/embed_inst.h b/inference-engine/thirdparty/clDNN/src/include/embed_inst.h
index 2878ce2eb..e5ae3bb39 100644
--- a/inference-engine/thirdparty/clDNN/src/include/embed_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/embed_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/embed.hpp"
+#include "api/embed.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/engine_impl.h b/inference-engine/thirdparty/clDNN/src/include/engine_impl.h
index 039ad8767..52df865ea 100644
--- a/inference-engine/thirdparty/clDNN/src/include/engine_impl.h
+++ b/inference-engine/thirdparty/clDNN/src/include/engine_impl.h
@@ -16,8 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/memory.hpp"
-#include "api_impl.h"
+#include "api/memory.hpp"
#include "event_impl.h"
#include "refcounted_obj.h"
#include "implementation_map.h"
@@ -53,14 +52,14 @@ public:
explicit engine_impl(const engine_configuration& conf);
~engine_impl();
engine_types type() const { return engine_types::ocl; }
- refcounted_obj_ptr<memory_impl> allocate_memory(layout layout, uint16_t stream_id);
- refcounted_obj_ptr<memory_impl> allocate_memory(layout layout,
+ refcounted_obj_ptr<memory_impl> allocate_memory(const layout& layout, uint16_t stream_id);
+ refcounted_obj_ptr<memory_impl> allocate_memory(const layout& layout,
primitive_id,
uint32_t,
std::set<primitive_id>,
uint16_t stream_id,
bool reusable = true);
- refcounted_obj_ptr<memory_impl> reinterpret_buffer(const memory_impl& memory, layout new_layout);
+ refcounted_obj_ptr<memory_impl> reinterpret_buffer(const memory_impl& memory, const layout& new_layout);
bool is_the_same_buffer(const memory_impl& mem1, const memory_impl& mem2);
refcounted_obj_ptr<event_impl> create_user_event(uint16_t stream_id, bool set = false);
@@ -134,5 +133,3 @@ private:
memory_pool _memory_pool;
};
} // namespace cldnn
-
-API_CAST(::cldnn_engine, cldnn::engine_impl)
diff --git a/inference-engine/thirdparty/clDNN/src/include/error_handler.h b/inference-engine/thirdparty/clDNN/src/include/error_handler.h
index e8a0a401a..517b9c0d4 100644
--- a/inference-engine/thirdparty/clDNN/src/include/error_handler.h
+++ b/inference-engine/thirdparty/clDNN/src/include/error_handler.h
@@ -21,7 +21,7 @@
#include <array>
#include <algorithm>
#include <type_traits>
-#include "api/CPP/layout.hpp"
+#include "api/layout.hpp"
#include <string>
#include <utility>
diff --git a/inference-engine/thirdparty/clDNN/src/include/event_impl.h b/inference-engine/thirdparty/clDNN/src/include/event_impl.h
index a340a1af0..3bfe644bc 100644
--- a/inference-engine/thirdparty/clDNN/src/include/event_impl.h
+++ b/inference-engine/thirdparty/clDNN/src/include/event_impl.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api_impl.h"
+#include "api/event.hpp"
#include "refcounted_obj.h"
#include <list>
@@ -33,18 +33,23 @@ public:
void wait();
bool is_set();
virtual bool is_valid() const { return _attached; }
- virtual void reset() { _attached = false; }
+ virtual void reset() {
+ _attached = false;
+ _set = false;
+ _profiling_captured = false;
+ _profiling_info.clear();
+ }
// returns true if handler has been successfully added
- bool add_event_handler(cldnn_event_handler handler, void* data);
+ bool add_event_handler(event_handler handler, void* data);
- const std::list<cldnn_profiling_interval>& get_profiling_info();
+ const std::list<instrumentation::profiling_interval>& get_profiling_info();
private:
std::mutex _handlers_mutex;
- std::list<std::pair<cldnn_event_handler, void*>> _handlers;
+ std::list<std::pair<event_handler, void*>> _handlers;
bool _profiling_captured = false;
- std::list<cldnn_profiling_interval> _profiling_info;
+ std::list<instrumentation::profiling_interval> _profiling_info;
protected:
bool _set = false;
@@ -54,11 +59,11 @@ protected:
virtual void wait_impl() = 0;
virtual bool is_set_impl() = 0;
- virtual bool add_event_handler_impl(cldnn_event_handler, void*) { return true; }
+ virtual bool add_event_handler_impl(event_handler, void*) { return true; }
// returns whether profiling info has been captures successfully and there's no need to call this impl a second time
// when user requests to get profling info
- virtual bool get_profiling_info_impl(std::list<cldnn_profiling_interval>&) { return true; }
+ virtual bool get_profiling_info_impl(std::list<instrumentation::profiling_interval>&) { return true; }
};
struct user_event : virtual public event_impl {
@@ -78,5 +83,3 @@ private:
};
} // namespace cldnn
-
-API_CAST(::cldnn_event, cldnn::event_impl)
diff --git a/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_input_inst.h b/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_input_inst.h
index 309e7b249..e015f853b 100644
--- a/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_input_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_input_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/fully_connected_grad_input.hpp"
+#include "api/fully_connected_grad_input.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_weights_inst.h b/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_weights_inst.h
index ee4384e64..9b63ea64e 100644
--- a/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_weights_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/fully_connected_grad_weights_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/fully_connected_grad_weights.hpp"
+#include "api/fully_connected_grad_weights.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/fully_connected_inst.h b/inference-engine/thirdparty/clDNN/src/include/fully_connected_inst.h
index bfeaaab5e..8162664f8 100644
--- a/inference-engine/thirdparty/clDNN/src/include/fully_connected_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/fully_connected_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/fully_connected.hpp"
+#include "api/fully_connected.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/fused_conv_bn_scale_inst.h b/inference-engine/thirdparty/clDNN/src/include/fused_conv_bn_scale_inst.h
index 473701984..460f02c56 100644
--- a/inference-engine/thirdparty/clDNN/src/include/fused_conv_bn_scale_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/fused_conv_bn_scale_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api_extension/CPP/fused_conv_bn_scale.hpp"
+#include "api_extension/fused_conv_bn_scale.hpp"
#include "primitive_inst.h"
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h b/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h
index ede85cb8e..f7fe3afac 100644
--- a/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api_extension/CPP/fused_conv_eltwise.hpp"
+#include "api_extension/fused_conv_eltwise.hpp"
#include "primitive_inst.h"
#include <memory>
@@ -39,9 +39,9 @@ public:
if (get_primitive()->eltw.with_activation) {
auto slope = get_primitive()->eltw.activation_negative_slope;
if (slope == 0.f) {
- this->set_fused_activation(activation_relu, {});
+ this->add_fused_activation(activation_func::relu, {});
} else {
- this->set_fused_activation(activation_relu_negative_slope, { slope, 0.f });
+ this->add_fused_activation(activation_func::relu_negative_slope, { slope, 0.f });
}
}
}
diff --git a/inference-engine/thirdparty/clDNN/src/include/gather_inst.h b/inference-engine/thirdparty/clDNN/src/include/gather_inst.h
index 8b7c6fb49..e54a0a6a5 100644
--- a/inference-engine/thirdparty/clDNN/src/include/gather_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/gather_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/gather.hpp"
+#include "api/gather.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/gather_tree_inst.h b/inference-engine/thirdparty/clDNN/src/include/gather_tree_inst.h
new file mode 100644
index 000000000..64caaac2e
--- /dev/null
+++ b/inference-engine/thirdparty/clDNN/src/include/gather_tree_inst.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2019 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <api/gather_tree.hpp>
+
+#include "primitive_inst.h"
+#include <string>
+#include <memory>
+
+namespace cldnn {
+template <>
+struct typed_program_node<gather_tree> : typed_program_node_base<gather_tree> {
+private:
+ using parent = typed_program_node_base<gather_tree>;
+public:
+ using parent::parent;
+ typed_program_node(const std::shared_ptr<gather_tree> prim, program_impl& prog) : parent(prim, prog) {
+ }
+ program_node& input() const { return get_dependency(0); }
+};
+
+using gather_tree_node = typed_program_node<gather_tree>;
+
+template <>
+class typed_primitive_inst<gather_tree> : public typed_primitive_inst_base<gather_tree> {
+ using parent = typed_primitive_inst_base<gather_tree>;
+
+public:
+ static layout calc_output_layout(gather_tree_node const& node);
+ static std::string to_string(gather_tree_node const& node);
+ typed_primitive_inst(network_impl& network, gather_tree_node const& node);
+};
+
+using gather_tree_inst = typed_primitive_inst<gather_tree>;
+
+} // namespace cldnn
diff --git a/inference-engine/thirdparty/clDNN/src/include/gemm_inst.h b/inference-engine/thirdparty/clDNN/src/include/gemm_inst.h
index ff2aea283..1db6ee319 100644
--- a/inference-engine/thirdparty/clDNN/src/include/gemm_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/gemm_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/gemm.hpp"
+#include "api/gemm.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/generic_layer.h b/inference-engine/thirdparty/clDNN/src/include/generic_layer.h
deleted file mode 100644
index cadb4797e..000000000
--- a/inference-engine/thirdparty/clDNN/src/include/generic_layer.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
-// Copyright (c) 2016 Intel Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-*/
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-#pragma once
-
-#include "api/C/cldnn.h"
-
-namespace cldnn {
-/// @brief Changes how data is ordered in memory. Value type is not changed & all information is preserved.
-/// @details Corresponding values are bitwise equal before/after reorder.
-/// Also merged with subtraction layer, which can subtract values while doing reordering.
-CLDNN_BEGIN_PRIMITIVE_DESC(generic_layer)
-/// @brief Requested memory layout.
-cldnn_layout output_layout;
-const void* generic_params;
-
-CLDNN_END_PRIMITIVE_DESC(generic_layer)
-
-CLDNN_DECLARE_PRIMITIVE_TYPE_ID(generic_layer);
-
-} // namespace cldnn \ No newline at end of file
diff --git a/inference-engine/thirdparty/clDNN/src/include/generic_layer.hpp b/inference-engine/thirdparty/clDNN/src/include/generic_layer.hpp
index dbd92a9b9..47f830531 100644
--- a/inference-engine/thirdparty/clDNN/src/include/generic_layer.hpp
+++ b/inference-engine/thirdparty/clDNN/src/include/generic_layer.hpp
@@ -16,9 +16,8 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "generic_layer.h"
-#include "api/CPP/primitive.hpp"
-#include "api/CPP/memory.hpp"
+#include "api/primitive.hpp"
+#include "api/memory.hpp"
#include "kernel_selector_helper.h"
#include <vector>
@@ -35,7 +34,7 @@ namespace cldnn {
/// @details Corresponding values are bitwise equal before/after reorder.
/// Also merged with subtraction layer, which can subtract values while doing reordering.
/// NOTE THAT THIS WILL SUBTRACT THE SAME VALUES FROM EACH BATCH.
-struct generic_layer : public primitive_base<generic_layer, CLDNN_PRIMITIVE_DESC(generic_layer)> {
+struct generic_layer : public primitive_base<generic_layer> {
CLDNN_DECLARE_PRIMITIVE(generic_layer)
/// @brief Constructs generic_layer primitive which takes mean subtract values from another primitive.
@@ -50,23 +49,12 @@ struct generic_layer : public primitive_base<generic_layer, CLDNN_PRIMITIVE_DESC
const padding& output_padding = padding())
: primitive_base(id, {input}, output_padding), output_layout(output_layout), generic_params(generic_params) {}
- /// @brief Constructs a copy from basic C API @CLDNN_PRIMITIVE_DESC{generic_layer}
- generic_layer(const dto* dto)
- : primitive_base(dto),
- output_layout(dto->output_layout),
- generic_params(*static_cast<const kernel_selector::generic_kernel_params* const>(dto->generic_params)) {}
-
/// @brief Requested memory layout.
layout output_layout;
const kernel_selector::generic_kernel_params generic_params;
protected:
std::vector<std::reference_wrapper<const primitive_id>> get_dependencies() const override { return {}; }
-
- void update_dto(dto& dto) const override {
- dto.output_layout = output_layout;
- dto.generic_params = &generic_params;
- }
};
/// @}
/// @}
diff --git a/inference-engine/thirdparty/clDNN/src/include/index_select_inst.h b/inference-engine/thirdparty/clDNN/src/include/index_select_inst.h
index 42b796c41..dbb2b6157 100644
--- a/inference-engine/thirdparty/clDNN/src/include/index_select_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/index_select_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/index_select.hpp"
+#include "api/index_select.hpp"
#include "primitive_inst.h"
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/input_layout_inst.h b/inference-engine/thirdparty/clDNN/src/include/input_layout_inst.h
index 324a1a647..bf33de6f5 100644
--- a/inference-engine/thirdparty/clDNN/src/include/input_layout_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/input_layout_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/input_layout.hpp"
+#include "api/input_layout.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/internal_primitive.h b/inference-engine/thirdparty/clDNN/src/include/internal_primitive.h
index 36dff33b8..f815e0628 100644
--- a/inference-engine/thirdparty/clDNN/src/include/internal_primitive.h
+++ b/inference-engine/thirdparty/clDNN/src/include/internal_primitive.h
@@ -15,7 +15,7 @@
*/
#pragma once
-#include "api/CPP/primitive.hpp"
+#include "api/primitive.hpp"
#include "primitive_type.h"
diff --git a/inference-engine/thirdparty/clDNN/src/include/internal_primitive_type_base.h b/inference-engine/thirdparty/clDNN/src/include/internal_primitive_type_base.h
index cd799ecd4..605ab5bf7 100644
--- a/inference-engine/thirdparty/clDNN/src/include/internal_primitive_type_base.h
+++ b/inference-engine/thirdparty/clDNN/src/include/internal_primitive_type_base.h
@@ -26,15 +26,10 @@
namespace cldnn {
template <class PType>
-struct internal_primitive_type_base : public ::cldnn_primitive_type {
+struct internal_primitive_type_base : public primitive_type {
static_assert(meta::is_internal_primitive<PType>::value,
"Primitive type passed to internal_primitive_type_base should derive from internal_primitive");
- [[noreturn]] std::shared_ptr<primitive> from_dto(const CLDNN_PRIMITIVE_DESC(primitive) *) const override {
- throw std::runtime_error(
- "Trying to create an internal primitive from dto - internal primitives are intransferable by design");
- }
-
[[noreturn]] std::shared_ptr<cldnn::program_node> create_node(program_impl&,
const std::shared_ptr<primitive>) const override {
throw std::runtime_error(
diff --git a/inference-engine/thirdparty/clDNN/src/include/kernel_selector_helper.h b/inference-engine/thirdparty/clDNN/src/include/kernel_selector_helper.h
index 5dfadf6a5..785fdce72 100644
--- a/inference-engine/thirdparty/clDNN/src/include/kernel_selector_helper.h
+++ b/inference-engine/thirdparty/clDNN/src/include/kernel_selector_helper.h
@@ -14,8 +14,12 @@
#pragma once
-#include "api/C/cldnn.h"
-#include "api/CPP/tensor.hpp"
+#include "api/cldnn.hpp"
+#include "api/tensor.hpp"
+#include "api/eltwise.hpp"
+#include "api/scale.hpp"
+#include "api/quantize.hpp"
+#include "api/activation.hpp"
#include "kernel_selector_params.h"
#include "kernel_selector_common.h"
@@ -23,6 +27,8 @@
#include <cstdint>
#include <string>
+#include <vector>
+#include <memory>
using namespace cldnn;
@@ -101,9 +107,9 @@ std::string to_host_version(const cldnn::version_t& version);
kernel_selector::data_tensor convert_data_tensor(const layout& l, uint32_t split = 1, const tensor view_offset = tensor {});
kernel_selector::weights_tensor convert_weights_tensor(const layout& l);
layout from_weights_tensor(const kernel_selector::weights_tensor& t);
-kernel_selector::activation_function get_kernel_selector_activation_param(cldnn_activation_func activation_func);
+kernel_selector::activation_function get_kernel_selector_activation_param(activation_func activation_func);
kernel_selector::activation_function get_kernel_selector_activation_grad_param(
- cldnn_activation_grad_func activation_grad_func);
+ activation_grad_func activation_grad_func);
template <typename T = std::uint32_t>
kernel_selector::dim_tensor<T> convert_dim_vector(const tensor& t) {
@@ -117,28 +123,37 @@ kernel_selector::dim_tensor<T> convert_dim_vector(const tensor& t) {
}
template <typename p_type>
-inline void convert_activation_func_params(const p_type primitive, kernel_selector::base_activation_params& params) {
+inline void convert_activation_func_params(const p_type primitive, std::vector<kernel_selector::base_activation_params>& params) {
const float negative_slope = primitive->activation_negative_slope;
if (negative_slope != 0.0f) {
- params.m = negative_slope;
- params.function = kernel_selector::activation_function::RELU_NEGATIVE_SLOPE;
+ params.emplace_back(kernel_selector::activation_function::RELU_NEGATIVE_SLOPE, negative_slope, 0.0f);
} else {
- params.function = kernel_selector::activation_function::RELU;
+ params.emplace_back(kernel_selector::activation_function::RELU, 0.0f, 0.0f);
}
}
template <typename arg_t>
-inline void convert_fused_activation_func_params(const arg_t& arg, kernel_selector::base_activation_params& params) {
- params.m = arg.get_fused_activation_params().a;
- params.n = arg.get_fused_activation_params().b;
- params.function = get_kernel_selector_activation_param(arg.get_fused_activation_func());
+inline void convert_fused_activation_func_params(const arg_t& arg, std::vector<kernel_selector::base_activation_params>& params) {
+ for (size_t i = 0; i < arg.get_fused_activations_funcs().size(); i++) {
+ params.emplace_back(get_kernel_selector_activation_param(arg.get_fused_activations_funcs()[i]),
+ arg.get_fused_activations_params()[i].a,
+ arg.get_fused_activations_params()[i].b);
+ }
+}
+
+template <typename p_type>
+inline void convert_new_activation_func(const p_type primitive, std::vector<kernel_selector::base_activation_params>& params) {
+ params.insert(params.begin(), {get_kernel_selector_activation_param(primitive->activation_function),
+ primitive->additional_params.a,
+ primitive->additional_params.b});
}
template <typename p_type>
-inline void convert_new_activation_func(const p_type primitive, kernel_selector::base_activation_params& params) {
- params.function = get_kernel_selector_activation_param(primitive->activation_func);
- params.m = primitive->additional_params.a;
- params.n = primitive->additional_params.b;
+inline void convert_new_activation_grad_func(const p_type primitive, std::vector<kernel_selector::base_activation_params>& params) {
+ params.insert(params.begin(), {get_kernel_selector_activation_grad_param(primitive->activation_grad_function),
+ primitive->additional_params.a,
+ primitive->additional_params.b,
+ true});
}
void set_params(const program_node& node, kernel_selector::params& params);
@@ -157,7 +172,44 @@ inline params_t get_default_params(const arg_t& arg, uint32_t split = 1) {
params.layerID = arg.id();
- convert_fused_activation_func_params(arg, params.activation);
+ convert_fused_activation_func_params(arg, params.activations);
+ size_t op_id = 0;
+ for (auto& fused_prim : arg.get_fused_primitives()) {
+ using op_type = kernel_selector::base_params::fused_operation_desc::Type;
+ kernel_selector::base_params::fused_operation_desc desc;
+ if (fused_prim.prim->type == eltwise::type_id()) {
+ desc.type = op_type::ELTWISE;
+ } else if (fused_prim.prim->type == scale::type_id()) {
+ desc.type = op_type::SCALE;
+ } else if (fused_prim.prim->type == quantize::type_id()) {
+ desc.type = op_type::QUANTIZE;
+ } else if (fused_prim.prim->type == activation::type_id()) {
+ desc.type = op_type::ACTIVATION;
+ std::shared_ptr<const primitive> p = fused_prim.prim;
+ auto activation_prim = std::static_pointer_cast<const activation>(p);
+ desc.activation.m = activation_prim->additional_params.a;
+ desc.activation.n = activation_prim->additional_params.b;
+ desc.activation.function = get_kernel_selector_activation_param(activation_prim->activation_function);
+ } else {
+ throw std::runtime_error("Invalid fused primitive type in " + arg.id() + " node");
+ }
+
+ desc.dep_idx_start = fused_prim.dep_start_idx;
+ desc.dep_size = fused_prim.deps.size();
+ desc.op_id = op_id++;
+ desc.output_tensor = convert_data_tensor(fused_prim.output_layout);
+
+ for (size_t i = desc.dep_idx_start; i < desc.dep_idx_start + desc.dep_size; i++) {
+ desc.tensors.push_back(convert_data_tensor(arg.get_dependency(i).get_output_layout()));
+ }
+
+ if (fused_prim.activation != activation_func::none) {
+ desc.activation.m = fused_prim.activation_params.a;
+ desc.activation.n = fused_prim.activation_params.b;
+ desc.activation.function = get_kernel_selector_activation_param(fused_prim.activation);
+ }
+ params.fused_ops.push_back(desc);
+ }
return params;
}
@@ -186,8 +238,8 @@ inline params_t get_weights_bias_default_params(const arg_t& arg, uint32_t split
params.bias.push_back(convert_data_tensor(layout(bias_layout.data_type,
bias_layout.format,
{bias_layout.size.batch[0],
- bias_layout.size.feature[0],
- bias_layout.size.spatial[0] / static_cast<int>(groups),
+ bias_layout.size.feature[0] / static_cast<int>(groups),
+ bias_layout.size.spatial[0],
bias_layout.size.spatial[1]}))
.FlattenFeatureAndSpatials());
}
diff --git a/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h b/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h
index 27f8ab5ba..b381e68eb 100644
--- a/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h
+++ b/inference-engine/thirdparty/clDNN/src/include/layout_optimizer.h
@@ -53,20 +53,51 @@ class primitive_inst;
// it's programmers responsiblity to choose between 'get_reorder', which creates reorder to best format
// for given primitive (or nullptr if it's already optimal) and user shall insert it into it's own topology.
// (note: layout_optimizer has internal caching mechanism, so if there's already reorder added for given (mem,format)
-// pair during 'get_reorder' call, it will be reused);
-// or 'add_weights_for_optimization' which, beside creating the reorder, adds both primitives (data and reorder) to its
-// internal network which allows later to call 'optimize' and get already reordered data to be exchanged in target
-// topology.
+// pair during 'get_reorder' call, it will be reused).
+
+class reorder_factory {
+public:
+ // pair.first is reorder (may be nullptr if reorder is not needed), pair.second tells if returned reorder was cached
+ // (no need to add it to 'ouputs' etc.) for pair.first == nullptr, pair.second == true
+ std::pair<std::shared_ptr<reorder>, bool> get_reorder(primitive_id src_id, layout in_layout, layout out_layout);
+
+ std::vector<std::pair<std::shared_ptr<primitive>, bool>> get_weights_reorder(
+ primitive_id input_id,
+ const layout& old_layout,
+ const kernel_selector::weights_reorder_params& reorder_params);
+
+private:
+ struct cache_key {
+ primitive_id data_source;
+ layout expected_layout;
+
+ friend bool operator==(cache_key const& lhs, cache_key const& rhs) {
+ return lhs.data_source == rhs.data_source && lhs.expected_layout == rhs.expected_layout;
+ }
+
+ friend bool operator!=(cache_key const& lhs, cache_key const& rhs) { return !(lhs == rhs); }
+
+ friend bool operator<(cache_key const& lhs, cache_key const& rhs) {
+ if (lhs.data_source != rhs.data_source)
+ return (lhs.data_source < rhs.data_source);
+ return lhs.expected_layout < rhs.expected_layout;
+ }
+ };
+
+ std::map<cache_key, std::shared_ptr<reorder>> _cached_reorders;
+ std::map<cache_key, std::shared_ptr<generic_layer>> _cached_generic_reorders;
+};
+
class layout_optimizer {
public:
- enum class data_type { weights, bias, input };
enum class optimization_attributes_type {
splitted_convolution,
group_convolution,
deformable_convolution,
bfyx_only_layer,
only_fsv32_layers,
- bfyx_f16_network
+ bfyx_f16_network,
+ bfzyx_f16_network
};
struct optimization_attributes {
@@ -76,6 +107,7 @@ public:
int32_t bfyx_only_layer = 0;
int32_t only_fsv32_layers = 0;
int32_t bfyx_f16_network = 0;
+ int32_t bfzyx_f16_network = 0;
};
private:
@@ -83,142 +115,79 @@ private:
// TODO: Remove once we will get full support for input/output padding in all primitive implementations.
bool _output_size_handling_enabled;
- struct cache_key {
- primitive_id data_source;
- layout expected_layout;
-
- friend bool operator==(cache_key const& lhs, cache_key const& rhs) {
- return lhs.data_source == rhs.data_source && lhs.expected_layout == rhs.expected_layout;
- }
-
- friend bool operator!=(cache_key const& lhs, cache_key const& rhs) { return !(lhs == rhs); }
-
- friend bool operator<(cache_key const& lhs, cache_key const& rhs) {
- if (lhs.data_source != rhs.data_source)
- return (lhs.data_source < rhs.data_source);
- return lhs.expected_layout < rhs.expected_layout;
- }
- };
-
- std::map<cache_key, std::shared_ptr<reorder>> _cached_reorders;
- std::map<cache_key, std::shared_ptr<generic_layer>> _cached_generic_layers;
-
layout get_expected_layout(layout const& current_layout,
- data_type type,
convolution_node const& node,
layout const& output_or_weights_layout);
layout get_expected_layout(layout const& current_layout,
- data_type type,
deconvolution_node const& node,
layout const& output_or_weights_layout);
layout get_expected_layout(layout const& current_layout,
- data_type type,
- fully_connected_node const& node,
- layout const& output_or_weights_layout);
- layout get_expected_layout(layout const& current_layout,
- data_type type,
detection_output_node const& node,
layout const& output_or_weights_layout);
layout get_expected_layout(layout const& current_layout,
- data_type type,
- embed_node const& node,
- layout const& output_or_weights_layout);
- layout get_expected_layout(layout const& current_layout,
- data_type type,
- lstm_gemm_node const& node,
- layout const& output_or_weights_layout);
- layout get_expected_layout(layout const& current_layout,
- data_type type,
binary_convolution_node const& node,
layout const& output_or_weights_layout);
bool convolution_bfyx_opt(const layout& output_layout,
const layout& weights_layout,
std::shared_ptr<const convolution> conv);
- bool convolution_byxf_opt(const layout& output_layout,
+ bool convolution_byxf_opt(const layout& input_layout,
+ const layout& output_layout,
const layout& weights_layout,
std::shared_ptr<const convolution> conv);
bool convolution_bfyx_f16_opt(const layout& output_layout,
const layout& weights_layout,
std::shared_ptr<const convolution> conv);
+ bool convolution_bfzyx_f16_opt(const layout& output_layout,
+ const layout& weights_layout,
+ std::shared_ptr<const convolution> conv);
+ bool deconvolution_bfzyx_f16_opt(const layout& output_layout,
+ const layout& weights_layout,
+ std::shared_ptr<const deconvolution> conv);
bool users_for_convolution_byxf_opt(program_node const& node, uint32_t depth);
- bool deps_depth_in_same_format(program_node const& node, const cldnn::format format, uint32_t depth);
-
- // pair.first is reorder (may be nullptr if reorder is not needed), pair.second tells if returned reorder was cached
- // (no need to add it to 'ouputs' etc.) for pair.first == nullptr, pair.second == true
- std::pair<std::shared_ptr<cldnn::reorder>, bool> create_reorder_if_needed(const layout& current_layout,
- const cldnn::primitive_id& memid,
- layout const& expected_layout);
-
- std::pair<std::shared_ptr<cldnn::generic_layer>, bool> create_reorder_from_given_source(
- const cldnn::primitive_id& memid,
- layout const& expected_layout,
- const kernel_selector::weights_reorder_params& reorder_params);
+ bool deps_for_convolution_byxf_opt(program_node const& node, uint32_t depth);
public:
explicit layout_optimizer(bool output_size_handling_enabled = true);
- // this method creates reorder for data, which is currently in 'data_layout' format, to best format in context of
- // 'user' primitive. data is used by 'user' in a way described by 'type' (i.e. weights/bias/input). id shall be
- // primitive_id of data's source (used as reorder's input and for cache checks). user_layout is optional parameter
- // (required for weights and bias, optional for input) which tells what kind of output 'user'
- // is supposed to compute - it's used for example to decide if weights shall be converted to fp16.
- //
- // if 'data_layout' is already optimal, nullptr is returned
- // currently optimizations are supported only for convolution and fully-connected.
- //
- // returns a pair<reorder,bool> - where pair.first is a pointer to the reorder primitive and pair.second tells if
- // it's been reused from cache, pair.second == false means this is a newly created primitive and probably needs to be
- // added to topology etc.
- template <class T>
- auto get_reorder(layout const& data_layout,
- primitive_id const& id,
- data_type type,
- T& node,
- layout const& user_layout) ->
- typename std::enable_if<meta::is_any_of<T,
- convolution_node,
- fully_connected_node,
- deconvolution_node,
- detection_output_node,
- embed_node,
- lstm_gemm_node,
- binary_convolution_node>::value,
- meta::deduce_ret_type_t<decltype(&layout_optimizer::create_reorder_if_needed)>>::type {
- auto expected_layout = get_expected_layout(data_layout, type, node, user_layout);
- return create_reorder_if_needed(data_layout, id, expected_layout);
- }
+ format get_preferred_format(program_node& node) {
+ format expected = format::any;
+ auto output_layout = node.get_output_layout();
+
+ if (node.is_type<convolution>()) {
+ auto& conv_node = node.as<convolution>();
+ auto weights_layout = conv_node.weights(0).get_output_layout();
+ expected = get_expected_layout(output_layout, conv_node, weights_layout).format;
+ } else if (node.is_type<binary_convolution>()) {
+ auto& bconv_node = node.as<binary_convolution>();
+ auto weights_layout = bconv_node.weights(0).get_output_layout();
+ expected = get_expected_layout(output_layout, bconv_node, weights_layout).format;
+ } else if (node.is_type<detection_output>()) {
+ expected = get_expected_layout(
+ output_layout,
+ node.as<detection_output>(),
+ layout{ data_types::f32, format::bfyx, tensor{} }).format;
+ } else if (node.is_type<reorder>() || node.is_type<input_layout>()) {
+ expected = node.get_output_layout().format;
+ } else if (node.is_type<deconvolution>()) {
+ auto& deconv_node = node.as<deconvolution>();
+ auto weights_layout = deconv_node.weights(0).get_output_layout();
+ expected = get_expected_layout(output_layout, deconv_node, weights_layout).format;
+ }
- // case for unsupported 'user' primitives
- template <class T>
- auto get_reorder(layout const& data_layout,
- primitive_id const& id,
- data_type type,
- T& node,
- layout const& user_layout) ->
- typename std::enable_if<!meta::is_any_of<T,
- convolution_node,
- fully_connected_node,
- deconvolution_node,
- detection_output_node,
- embed_node,
- lstm_gemm_node,
- binary_convolution_node>::value,
- meta::deduce_ret_type_t<decltype(&layout_optimizer::create_reorder_if_needed)>>::type {
- static_assert(meta::always_false<T>::value,
- "Layout optimization for given primitive type is currently unsupported!");
- return meta::deduce_ret_type_t<decltype(&layout_optimizer::create_reorder_if_needed)>();
+ return expected;
}
- std::vector<std::pair<std::shared_ptr<primitive>, bool>> get_generic_layer(
- const kernel_selector::weights_reorder_params& reorder_params,
- primitive_id input_id,
- const layout& old_layout,
- data_type type);
+ bool is_format_supported(program_node& node, format::type fmt);
+
+ // Returns whether reorder between "prev" with format fmt_prev and "next" with format fmt_next
+ // can be fused into next.
+ bool can_fuse_reorder(program_node& prev, program_node& next, format fmt_prev, format fmt_next);
void set_optimization_attribute(optimization_attributes_type attribute, int32_t val);
optimization_attributes get_optimization_attributes() { return _optimization_attributes; }
bool is_format_optimized(const convolution_node& node, const format& format);
+ bool is_format_optimized(const deconvolution_node& node, const format& format);
};
} // namespace cldnn
diff --git a/inference-engine/thirdparty/clDNN/src/include/lookup_table_inst.h b/inference-engine/thirdparty/clDNN/src/include/lookup_table_inst.h
index 486fa3071..6e07af20a 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lookup_table_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lookup_table_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/lookup_table.hpp"
+#include "api/lookup_table.hpp"
#include "primitive_inst.h"
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/lrn_inst.h b/inference-engine/thirdparty/clDNN/src/include/lrn_inst.h
index 35e082b3a..f43315535 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lrn_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lrn_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/lrn.hpp"
+#include "api/lrn.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_input_inst.h b/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_input_inst.h
index 4ed70cc98..6b8cddb1f 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_input_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_input_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api_extension/CPP/lstm_dynamic_input.hpp"
+#include "api_extension/lstm_dynamic_input.hpp"
#include "primitive_inst.h"
#include "error_handler.h"
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_inst.h b/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_inst.h
index 9c2445f95..fac24bcbc 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/lstm_dynamic.hpp"
+#include "api/lstm_dynamic.hpp"
#include "primitive_inst.h"
#include "error_handler.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_timeloop_inst.h b/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_timeloop_inst.h
index 22a4ce6b3..f5d13e309 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_timeloop_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lstm_dynamic_timeloop_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api_extension/CPP/lstm_dynamic_timeloop.hpp"
+#include "api_extension/lstm_dynamic_timeloop.hpp"
#include "primitive_inst.h"
#include "error_handler.h"
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/lstm_elt_inst.h b/inference-engine/thirdparty/clDNN/src/include/lstm_elt_inst.h
index 20aed4649..5be00727c 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lstm_elt_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lstm_elt_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/lstm.hpp"
+#include "api/lstm.hpp"
#include "primitive_inst.h"
#include <string>
@@ -31,7 +31,7 @@ public:
program_node& input() const { return get_dependency(0); }
program_node& cell() const { return get_dependency(1); }
bool cell_term() const { return !get_primitive()->cell.empty(); }
- int32_t offset_order() const { return get_primitive()->offset_order; }
+ lstm_weights_order offset_order() const { return get_primitive()->offset_order; }
float clip() const {
float clip_val = get_primitive()->clip;
if (clip_val < 0)
@@ -57,7 +57,7 @@ public:
memory_impl& cell_memory() const { return dep_memory(1); }
bool cell_term() const { return !argument.cell.empty(); }
- int32_t offset_order() const { return argument.offset_order; }
+ lstm_weights_order offset_order() const { return argument.offset_order; }
float clip() const {
float clip_val = argument.clip;
if (clip_val < 0)
diff --git a/inference-engine/thirdparty/clDNN/src/include/lstm_gemm_inst.h b/inference-engine/thirdparty/clDNN/src/include/lstm_gemm_inst.h
index 9054262b3..aff8437eb 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lstm_gemm_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lstm_gemm_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/lstm.hpp"
+#include "api/lstm.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/lstm_inst.h b/inference-engine/thirdparty/clDNN/src/include/lstm_inst.h
index 36a4bf4a4..95f9f348b 100644
--- a/inference-engine/thirdparty/clDNN/src/include/lstm_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/lstm_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/lstm.hpp"
+#include "api/lstm.hpp"
#include "primitive_inst.h"
#include <string>
#include <vector>
@@ -43,8 +43,8 @@ public:
bool peepholes_term() const { return !get_primitive()->peepholes.empty(); }
bool initial_hidden_term() const { return !get_primitive()->initial_hidden.empty(); }
bool initial_cell_term() const { return !get_primitive()->initial_cell.empty(); }
- std::vector<cldnn_activation_func> activations() const { return get_primitive()->activations; }
- std::vector<cldnn_activation_additional_params> activation_params() const {
+ std::vector<activation_func> activations() const { return get_primitive()->activations; }
+ std::vector<activation_additional_params> activation_params() const {
return get_primitive()->activation_params;
}
size_t sequence_len() const { return get_primitive()->input.size(); }
@@ -75,8 +75,8 @@ public:
bool peepholes_term() const { return !argument.peepholes.empty(); }
bool initial_hidden_term() const { return !argument.initial_hidden.empty(); }
bool initial_cell_term() const { return !argument.initial_cell.empty(); }
- std::vector<cldnn_activation_func> activations() const { return argument.activations; }
- std::vector<cldnn_activation_additional_params> activation_params() const { return argument.activation_params; }
+ std::vector<activation_func> activations() const { return argument.activations; }
+ std::vector<activation_additional_params> activation_params() const { return argument.activation_params; }
};
using lstm_inst = typed_primitive_inst<lstm>;
diff --git a/inference-engine/thirdparty/clDNN/src/include/max_unpooling_inst.h b/inference-engine/thirdparty/clDNN/src/include/max_unpooling_inst.h
index 5b34910d2..4e06c0dd0 100644
--- a/inference-engine/thirdparty/clDNN/src/include/max_unpooling_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/max_unpooling_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/max_unpooling.hpp"
+#include "api/max_unpooling.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/memory_impl.h b/inference-engine/thirdparty/clDNN/src/include/memory_impl.h
index e20200166..5ff6499ff 100644
--- a/inference-engine/thirdparty/clDNN/src/include/memory_impl.h
+++ b/inference-engine/thirdparty/clDNN/src/include/memory_impl.h
@@ -16,27 +16,26 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/memory.hpp"
+#include "api/memory.hpp"
-#include "api_impl.h"
#include "engine_impl.h"
#include "refcounted_obj.h"
namespace cldnn {
struct memory_impl : refcounted_obj<memory_impl> {
- memory_impl(const engine_impl::ptr& engine, layout layout, uint16_t stream_id, bool reused = false)
- : _engine(engine), _layout(layout), _stream_id(stream_id), _reused(reused) {}
+ memory_impl(const engine_impl::ptr& engine, const layout& layout, uint16_t stream_id, bool reused = false)
+ : _engine(engine), _layout(layout), _stream_id(stream_id), _reused(reused), _bytes_count(_layout.bytes_count()) {}
virtual ~memory_impl() {
if (_engine != (engine_impl::ptr) nullptr && !_reused) {
- _engine->get_memory_pool().subtract_memory_used(_layout.bytes_count());
+ _engine->get_memory_pool().subtract_memory_used(_bytes_count);
}
}
virtual void* lock() = 0;
virtual void unlock() = 0;
virtual void fill(unsigned char pattern, event_impl::ptr ev) = 0;
- size_t size() const { return _layout.bytes_count(); }
+ size_t size() const { return _bytes_count; }
virtual bool is_allocated_by(const engine_impl& engine) const { return &engine == _engine.get(); }
const refcounted_obj_ptr<engine_impl>& get_engine() const { return _engine; }
const layout& get_layout() const { return _layout; }
@@ -49,10 +48,13 @@ protected:
private:
bool _reused;
+ // layout bytes count, needed because of traits static map destruction
+ // before run of memory_impl destructor, when engine is static
+ size_t _bytes_count;
};
struct simple_attached_memory : memory_impl {
- simple_attached_memory(layout layout, void* pointer, uint16_t stream_id)
+ simple_attached_memory(const layout& layout, void* pointer, uint16_t stream_id)
: memory_impl((engine_impl::ptr) nullptr, layout, stream_id), _pointer(pointer) {}
void* lock() override { return _pointer; }
@@ -92,5 +94,3 @@ private:
};
} // namespace cldnn
-
-API_CAST(::cldnn_memory, cldnn::memory_impl)
diff --git a/inference-engine/thirdparty/clDNN/src/include/memory_pool.h b/inference-engine/thirdparty/clDNN/src/include/memory_pool.h
index 890a5a5ce..d915b3780 100644
--- a/inference-engine/thirdparty/clDNN/src/include/memory_pool.h
+++ b/inference-engine/thirdparty/clDNN/src/include/memory_pool.h
@@ -16,9 +16,8 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/layout.hpp"
-#include "api/CPP/primitive.hpp"
-#include "api_impl.h"
+#include "api/layout.hpp"
+#include "api/primitive.hpp"
#include "refcounted_obj.h"
diff --git a/inference-engine/thirdparty/clDNN/src/include/meta_utils.h b/inference-engine/thirdparty/clDNN/src/include/meta_utils.h
index 3eb0efdff..8de3f6fd2 100644
--- a/inference-engine/thirdparty/clDNN/src/include/meta_utils.h
+++ b/inference-engine/thirdparty/clDNN/src/include/meta_utils.h
@@ -16,7 +16,7 @@
#pragma once
#include <type_traits>
-#include "api/CPP/meta_utils.hpp"
+#include "api/meta_utils.hpp"
#include "internal_primitive.h"
namespace cldnn {
diff --git a/inference-engine/thirdparty/clDNN/src/include/mutable_data_inst.h b/inference-engine/thirdparty/clDNN/src/include/mutable_data_inst.h
index 46f1f939d..6b57b1a8d 100644
--- a/inference-engine/thirdparty/clDNN/src/include/mutable_data_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/mutable_data_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/mutable_data.hpp"
+#include "api/mutable_data.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/mvn_inst.h b/inference-engine/thirdparty/clDNN/src/include/mvn_inst.h
index c197f565e..6c8ec3747 100644
--- a/inference-engine/thirdparty/clDNN/src/include/mvn_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/mvn_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/mvn.hpp"
+#include "api/mvn.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/network_impl.h b/inference-engine/thirdparty/clDNN/src/include/network_impl.h
index 420643147..0b99503be 100644
--- a/inference-engine/thirdparty/clDNN/src/include/network_impl.h
+++ b/inference-engine/thirdparty/clDNN/src/include/network_impl.h
@@ -17,9 +17,8 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/network.hpp"
+#include "api/network.hpp"
-#include "api_impl.h"
#include "engine_impl.h"
#include "event_impl.h"
#include "program_impl.h"
@@ -111,5 +110,3 @@ private:
void check_names();
};
} // namespace cldnn
-
-API_CAST(::cldnn_network, cldnn::network_impl)
diff --git a/inference-engine/thirdparty/clDNN/src/include/normalize_inst.h b/inference-engine/thirdparty/clDNN/src/include/normalize_inst.h
index 6135b4274..4e64f4b2b 100644
--- a/inference-engine/thirdparty/clDNN/src/include/normalize_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/normalize_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/normalize.hpp"
+#include "api/normalize.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/one_hot_inst.h b/inference-engine/thirdparty/clDNN/src/include/one_hot_inst.h
index 9c4daed2a..49fc93d18 100644
--- a/inference-engine/thirdparty/clDNN/src/include/one_hot_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/one_hot_inst.h
@@ -15,7 +15,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include <api/CPP/one_hot.hpp>
+#include <api/one_hot.hpp>
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/pass_manager.h b/inference-engine/thirdparty/clDNN/src/include/pass_manager.h
index e404ec9b0..2bdf5f1d6 100644
--- a/inference-engine/thirdparty/clDNN/src/include/pass_manager.h
+++ b/inference-engine/thirdparty/clDNN/src/include/pass_manager.h
@@ -162,14 +162,13 @@ private:
void run(program_impl& p) override;
};
-class prepare_binarization : public base_pass {
+class prepare_quantization : public base_pass {
public:
- prepare_binarization() : base_pass("prepare_binarization") {}
+ prepare_quantization() : base_pass("prepare_quantization") {}
private:
void run(program_impl& p) override;
- void prepare_packed_quantize(program_impl& p, program_node& node);
- void prepare_fusing(program_impl& p, program_node& node);
+ void prepare_packed_quantize(program_impl& p);
};
class prepare_conv_eltw_fusing : public base_pass {
@@ -220,21 +219,23 @@ public:
private:
void run(program_impl& p) override;
- void fuse_skip_layers(program_impl& p, program_node* node);
- void fuse_conv_bn_scale(program_impl& p, program_node* node);
+ void fuse_reorders(program_impl& p);
+ void fuse_activations(program_impl& p);
+ void fuse_skip_layers(program_impl& p);
+ void fuse_simple_primitives(program_impl &p);
layout_optimizer& _lo;
};
class pre_optimize_bias : public base_pass {
public:
- explicit pre_optimize_bias(layout_optimizer& lo_ref);
+ explicit pre_optimize_bias(reorder_factory& rf_ref);
private:
void run(program_impl& p) override;
- virtual void run(program_impl& p, layout_optimizer& lo);
+ virtual void run(program_impl& p, reorder_factory& rf);
template <typename T>
- void optimize_bias(T& node, layout_optimizer& lo, program_impl& p);
- layout_optimizer& _lo;
+ void optimize_bias(T& node, reorder_factory& rf, program_impl& p);
+ reorder_factory& _rf;
};
class prepare_padding : public base_pass {
@@ -258,14 +259,26 @@ private:
class post_optimize_weights : public base_pass {
public:
- explicit post_optimize_weights(layout_optimizer& lo_ref);
+ explicit post_optimize_weights(reorder_factory& rf_ref);
private:
+ struct weights_bias_offset {
+ size_t weights_offset;
+ size_t bias_offset;
+
+ // When using this ctor weights offset is added to the bias_offset
+ weights_bias_offset(const size_t w_offset, const size_t b_offset)
+ : weights_offset(w_offset)
+ , bias_offset(weights_offset + b_offset)
+ {}
+ };
+
void run(program_impl& p) override;
- virtual void run(program_impl& p, layout_optimizer& lo);
- template <typename T>
- void optimize_weights(T& node, layout_optimizer& lo, program_impl& p);
- layout_optimizer& _lo;
+ template<typename T>
+ weights_bias_offset get_weights_bias_offset(const T& node);
+ template<typename T>
+ void optimize_weights(T& node, program_impl& p);
+ reorder_factory& _rf;
};
class propagate_constants : public base_pass {
@@ -288,21 +301,24 @@ private:
class remove_redundant_reorders : public base_pass {
public:
- explicit remove_redundant_reorders(bool bfyx_to_bfyx_f16_opt = false);
+ explicit remove_redundant_reorders(layout_optimizer& lo_ref, bool enable_reorder_fusing = false, bool update_implementations = false);
void run(program_impl& p) override;
private:
- bool bfyx_to_bfyx_f16_opt;
+ layout_optimizer& lo;
+ bool enable_reorder_fusing;
+ bool update_implementations;
};
class reorder_inputs : public base_pass {
public:
- explicit reorder_inputs(layout_optimizer& lo_ref);
+ reorder_inputs(layout_optimizer& lo_ref, reorder_factory& rf_ref);
private:
void run(program_impl& p) override;
- virtual void run(program_impl& p, layout_optimizer& lo);
+ virtual void run(program_impl& p, layout_optimizer& lo, reorder_factory& rf);
layout_optimizer& _lo;
+ reorder_factory& _rf;
};
class trim_to_outputs : public base_pass {
diff --git a/inference-engine/thirdparty/clDNN/src/include/permute_inst.h b/inference-engine/thirdparty/clDNN/src/include/permute_inst.h
index 064f4d9f0..8ad0b371b 100644
--- a/inference-engine/thirdparty/clDNN/src/include/permute_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/permute_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/permute.hpp"
+#include "api/permute.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/pooling_inst.h b/inference-engine/thirdparty/clDNN/src/include/pooling_inst.h
index 7796978f9..39fb338a0 100644
--- a/inference-engine/thirdparty/clDNN/src/include/pooling_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/pooling_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/pooling.hpp"
+#include "api/pooling.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/primitive_inst.h b/inference-engine/thirdparty/clDNN/src/include/primitive_inst.h
index d91a9104d..2e5185208 100644
--- a/inference-engine/thirdparty/clDNN/src/include/primitive_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/primitive_inst.h
@@ -17,8 +17,8 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/primitive.hpp"
-#include "api/CPP/concatenation.hpp"
+#include "api/primitive.hpp"
+#include "api/concatenation.hpp"
#include "event_impl.h"
#include "memory_impl.h"
@@ -86,7 +86,7 @@ public:
memory_impl& dep_memory(size_t index) const { return dependencies().at(index)->output_memory(); }
memory_impl& output_memory() const { return *_output; }
- size_t inputs_memory_count() const { return _node.get_primitive()->input.size(); }
+ size_t inputs_memory_count() const { return _node.get_primitive()->input_size(); }
primitive_type_id type() const { return _node.type(); }
primitive_id id() const { return _node.id(); }
primitive_id org_id() const { return _node.get_org_primitive_id(); }
@@ -115,6 +115,14 @@ public:
void build_deps();
+ memory_impl& fused_memory(size_t dep_id) const {
+ return dep_memory(get_fused_mem_offset() + dep_id);
+ }
+
+ bool has_fused_primitives() const { return !_node.get_fused_primitives().empty(); }
+ size_t get_fused_mem_count() const { return _node.get_fused_inputs_count(); }
+ size_t get_fused_mem_offset() const { return _node.get_fused_primitives()[0].dep_start_idx; }
+
protected:
primitive_inst(network_impl& network, program_node const& node, bool allocate_memory);
diff --git a/inference-engine/thirdparty/clDNN/src/include/primitive_type.h b/inference-engine/thirdparty/clDNN/src/include/primitive_type.h
index 3089c5cda..072b157f0 100644
--- a/inference-engine/thirdparty/clDNN/src/include/primitive_type.h
+++ b/inference-engine/thirdparty/clDNN/src/include/primitive_type.h
@@ -16,9 +16,9 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/memory.hpp"
-#include "api/CPP/primitive.hpp"
-#include "api/CPP/program.hpp"
+#include "api/memory.hpp"
+#include "api/primitive.hpp"
+#include "api/program.hpp"
#include "topology_impl.h"
@@ -32,22 +32,22 @@ struct program_node;
struct primitive_impl;
class primitive_inst;
struct program_impl;
-} // namespace cldnn
-struct cldnn_primitive_type {
- virtual ~cldnn_primitive_type() = default;
- virtual std::shared_ptr<cldnn::primitive> from_dto(const CLDNN_PRIMITIVE_DESC(primitive) * dto) const = 0;
- virtual std::shared_ptr<cldnn::program_node> create_node(cldnn::program_impl& program,
- const std::shared_ptr<cldnn::primitive> prim) const = 0;
- virtual std::shared_ptr<cldnn::primitive_inst> create_instance(cldnn::network_impl& network,
- const cldnn::program_node& node) const = 0;
- virtual std::unique_ptr<cldnn::primitive_impl> choose_impl(cldnn::engine_impl& engine,
- const cldnn::program_node& node) const = 0;
- virtual bool does_an_implementation_exist(cldnn::engine_impl& engine, const cldnn::program_node& node) const = 0;
- virtual bool does_possible_implementation_exist(cldnn::engine_impl& engine,
- const cldnn::program_node& node) const = 0;
- virtual cldnn::layout calc_output_layout(const cldnn::program_node& node) const = 0;
- virtual std::string to_string(const cldnn::program_node& node) const = 0;
+struct primitive_type {
+ virtual ~primitive_type() = default;
+
+ virtual std::shared_ptr<program_node> create_node(program_impl& program,
+ const std::shared_ptr<primitive> prim) const = 0;
+ virtual std::shared_ptr<primitive_inst> create_instance(network_impl& network,
+ const program_node& node) const = 0;
+ virtual std::unique_ptr<primitive_impl> choose_impl(engine_impl& engine,
+ const program_node& node) const = 0;
+ virtual bool does_an_implementation_exist(engine_impl& engine, const program_node& node) const = 0;
+ virtual bool does_possible_implementation_exist(engine_impl& engine,
+ const program_node& node) const = 0;
+ virtual layout calc_output_layout(const program_node& node) const = 0;
+ virtual std::string to_string(const program_node& node) const = 0;
virtual bool is_internal_type() const { return false; }
};
+} // namespace cldnn
diff --git a/inference-engine/thirdparty/clDNN/src/include/primitive_type_base.h b/inference-engine/thirdparty/clDNN/src/include/primitive_type_base.h
index d7e464d7c..0b9033a77 100644
--- a/inference-engine/thirdparty/clDNN/src/include/primitive_type_base.h
+++ b/inference-engine/thirdparty/clDNN/src/include/primitive_type_base.h
@@ -27,17 +27,10 @@
namespace cldnn {
template <class PType>
-struct primitive_type_base : ::cldnn_primitive_type {
+struct primitive_type_base : primitive_type {
static_assert(meta::is_api_primitive<PType>::value,
"Primitive type passed to primitive_type_base should derive from cldnn::primitive");
- std::shared_ptr<primitive> from_dto(const CLDNN_PRIMITIVE_DESC(primitive) * dto) const override {
- if (dto->type != this)
- throw std::invalid_argument("primitive_type_base::from_dto: primitive type mismatch");
-
- return std::make_shared<PType>(as_dto<PType>(dto));
- }
-
std::shared_ptr<cldnn::program_node> create_node(program_impl& program,
const std::shared_ptr<primitive> prim) const override {
if (prim->type != this)
diff --git a/inference-engine/thirdparty/clDNN/src/include/prior_box_inst.h b/inference-engine/thirdparty/clDNN/src/include/prior_box_inst.h
index 9adf088a8..d77b11d70 100644
--- a/inference-engine/thirdparty/clDNN/src/include/prior_box_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/prior_box_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/prior_box.hpp"
+#include "api/prior_box.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
@@ -32,7 +32,7 @@ struct typed_program_node<prior_box> : typed_program_node_base<prior_box> {
program_node& input() const { return get_dependency(0); }
void calc_result();
- memory_impl& get_result_buffer() const { return *result; }
+ memory_impl::ptr get_result_buffer() const { return result; }
private:
memory_impl::ptr result;
diff --git a/inference-engine/thirdparty/clDNN/src/include/program_helpers.h b/inference-engine/thirdparty/clDNN/src/include/program_helpers.h
index ac38579aa..92775139f 100644
--- a/inference-engine/thirdparty/clDNN/src/include/program_helpers.h
+++ b/inference-engine/thirdparty/clDNN/src/include/program_helpers.h
@@ -65,8 +65,14 @@ struct program_helpers {
// helper function which creates single-element array if it's given anything
// other than std::vector.
- // std::vector case -> does not wrap, returns t as-is
- static const primitive::fixed_size_vector_ref& wrap_if_single(primitive::fixed_size_vector_ref const& t) {
+ // std::vector case -> does not wrap
+ template <typename T>
+ static std::vector<T>& wrap_if_single(std::vector<T>& t) {
+ return t;
+ }
+
+ template <typename T>
+ static const std::vector<T>& wrap_if_single(const std::vector<T>& t) {
return t;
}
@@ -103,10 +109,10 @@ struct program_helpers {
}
static void merge_buffers(engine_impl& engine,
program_node& node,
- layout target_layout,
+ const layout& target_layout,
size_t begin_offset,
size_t end_offset);
static layout get_weights_layout(typed_program_node<cldnn::data>& data_node, int32_t split);
static std::pair<bool, bool> are_layouts_identical(layout const& l1, layout const& l2);
};
-} // namespace cldnn \ No newline at end of file
+} // namespace cldnn
diff --git a/inference-engine/thirdparty/clDNN/src/include/program_impl.h b/inference-engine/thirdparty/clDNN/src/include/program_impl.h
index 9af2a2aa8..e19547d96 100644
--- a/inference-engine/thirdparty/clDNN/src/include/program_impl.h
+++ b/inference-engine/thirdparty/clDNN/src/include/program_impl.h
@@ -18,7 +18,7 @@
#pragma once
-#include "api/CPP/program.hpp"
+#include "api/program.hpp"
#include "refcounted_obj.h"
#include "engine_impl.h"
@@ -49,7 +49,7 @@ struct program_impl : public refcounted_obj<program_impl> {
friend class prepare_padding; // to be removed when possible
friend class propagate_constants; // to be removed when possible
friend class prepare_primitive_fusing; // to be removed when possible
- friend class prepare_binarization; // to be removed when possible
+ friend class prepare_quantization; // to be removed when possible
friend class prepare_conv_eltw_fusing; // to be removed when possible
friend class reorder_inputs; // to be removed when possible
friend class remove_redundant_reorders; // to be removed when possible
@@ -60,9 +60,13 @@ public:
public:
typedef std::list<program_node*> list_of_nodes;
typedef list_of_nodes::const_iterator const_iterator;
+ typedef list_of_nodes::const_reverse_iterator const_reverse_iterator;
typedef list_of_nodes::iterator node_iterator;
+ typedef list_of_nodes::reverse_iterator node_reverse_iterator;
const_iterator begin() const { return _processing_order.begin(); }
const_iterator end() const { return _processing_order.end(); }
+ const_reverse_iterator rbegin() const { return _processing_order.rbegin(); }
+ const_reverse_iterator rend() const { return _processing_order.rend(); }
void calc_processing_order_visit(program_node* node);
void calc_processing_order(program_impl& p);
@@ -191,6 +195,9 @@ public:
// returns if 'node' has been extracted and removed successfully
bool extract_and_remove(program_node& node);
+ // Fuses two nodes into fused_node and removes peer_node from graph
+ void fuse_nodes(program_node& fused_node, program_node& peer_node);
+
// returns if 'node' has been removed
bool remove_if_dangling(program_node& node);
@@ -242,6 +249,7 @@ private:
void build_program(bool is_internal);
void init_graph();
void set_options();
+ void set_layout_optimizer_attributes(layout_optimizer& lo);
void apply_opt_pass(base_pass& pass);
@@ -301,5 +309,3 @@ private:
};
} // namespace cldnn
-
-API_CAST(::cldnn_program, cldnn::program_impl)
diff --git a/inference-engine/thirdparty/clDNN/src/include/program_node.h b/inference-engine/thirdparty/clDNN/src/include/program_node.h
index f87dfa006..43cbb770d 100644
--- a/inference-engine/thirdparty/clDNN/src/include/program_node.h
+++ b/inference-engine/thirdparty/clDNN/src/include/program_node.h
@@ -18,7 +18,8 @@
#include <set>
#include <array>
-#include "api/CPP/primitive.hpp"
+#include "api/primitive.hpp"
+#include "api/activation.hpp"
#include "internal_primitive.h"
#include "meta_utils.h"
@@ -32,6 +33,7 @@ namespace cldnn {
struct program_impl;
class reorder_inputs;
class graph_initializations;
+class prepare_quantization;
template <class T>
struct typed_program_node;
@@ -42,6 +44,16 @@ struct internal_primitive_type_base;
class json_composite;
class xml_composite;
+
+struct fused_primitive_desc {
+ std::shared_ptr<const primitive> prim;
+ size_t dep_start_idx;
+ std::vector<primitive_id> deps;
+ activation_func activation;
+ activation_additional_params activation_params;
+ layout output_layout = layout(data_types::f32, format::bfyx, tensor());
+};
+
/*
Base class for all primitives which wraps API class and extends it to be used
in graph context.
@@ -58,7 +70,7 @@ struct program_node {
friend class compile_graph; // to be removed when possible
friend class graph_initializations; // to be removed when possible
friend class prepare_primitive_fusing; // to be removed when possible
- friend class prepare_binarization; // to be removed when possible
+ friend class prepare_quantization; // to be removed when possible
friend class prepare_conv_eltw_fusing; // to be removed when possible
friend class prepare_conv_eltw_read_write_opt; // to be removed when possible
friend class propagate_constants; // to be removed when possible
@@ -153,7 +165,7 @@ public:
// sets cached output layout to an arbitrary value, invalidates users if new layout differs from previous one and @p
// invalidate_users_if_changed is set to true returns whether output layout has changed
- bool set_output_layout(layout new_layout, bool invalidate_users_if_changed = true);
+ bool set_output_layout(layout& new_layout, bool invalidate_users_if_changed = true);
// forces recalculation of cached output layout, invalidates users if new layout is different than previous one and
// @p invalidate_users_if_changed is set to true returns whether output layout has changed
@@ -182,16 +194,31 @@ public:
bool is_marked(uint8_t val) const { return user_mark == val; }
uint8_t get_user_mark() const { return user_mark; }
- void set_fused_activation(cldnn_activation_func activation_func,
- cldnn_activation_additional_params additional_params) {
- fused_activation.activation_func = activation_func;
- fused_activation.additional_params = additional_params;
+ void add_fused_activation(activation_func activation_func,
+ activation_additional_params additional_params) {
+ fused_activations.emplace_back(activation_func, additional_params);
}
- cldnn_activation_func get_fused_activation_func() const { return fused_activation.activation_func; }
+ std::vector<activation_func> get_fused_activations_funcs() const {
+ std::vector<activation_func> funcs;
+ std::transform(fused_activations.begin(),
+ fused_activations.end(),
+ std::back_inserter(funcs),
+ [](fused_activation_params const& p) { return p.func; });
+ return funcs;
+ }
- cldnn_activation_additional_params get_fused_activation_params() const {
- return fused_activation.additional_params;
+ std::vector<activation_additional_params> get_fused_activations_params() const {
+ std::vector<activation_additional_params> params;
+ std::transform(fused_activations.begin(),
+ fused_activations.end(),
+ std::back_inserter(params),
+ [](fused_activation_params const& p) { return p.params; });
+ return params;
+ }
+
+ void copy_fused_activation(const program_node& rhs) {
+ fused_activations = rhs.fused_activations;
}
// check/set if the node can be optimized out (removed from the network)
@@ -257,6 +284,33 @@ public:
return reused_memory_color;
}
+ virtual void add_fused_primitive(fused_primitive_desc& desc) {
+ fused_prims.push_back(desc);
+ }
+
+ virtual void add_fused_primitives(std::vector<fused_primitive_desc> descs) {
+ fused_prims.insert(fused_prims.end(), descs.begin(), descs.end());
+ }
+
+ const std::vector<fused_primitive_desc>& get_fused_primitives() const { return fused_prims; }
+
+ size_t get_fused_inputs_count() const {
+ size_t count = 0;
+ for (auto& fp : get_fused_primitives()) {
+ count += fp.deps.size();
+ }
+ return count;
+ }
+
+ bool has_fused_primitives() const { return !get_fused_primitives().empty(); }
+
+ layout get_fused_output_layout() const {
+ auto fused_prims = get_fused_primitives();
+ if (fused_prims.empty())
+ return layout(data_types::f32, format::bfyx, tensor());
+ return fused_prims.back().output_layout;
+ }
+
protected:
std::shared_ptr<primitive> desc;
program_impl& myprog;
@@ -279,7 +333,7 @@ protected:
uint8_t user_mark = 0;
bool optimized = false;
bool share_buffer = true;
- std::array<bool, CLDNN_TENSOR_DIM_MAX> _support_padding_in_axis = {}; // zero-initialization
+ std::array<bool, tensor_dim_max> _support_padding_in_axis = {}; // zero-initialization
mutable bool has_reused_memory = false;
mutable uint32_t reused_memory_color = 0;
@@ -287,12 +341,18 @@ protected:
const primitive_id org_id;
struct fused_activation_params {
- cldnn_activation_func activation_func = activation_none;
- cldnn_activation_additional_params additional_params = {0.0f, 0.0f};
- };
+ activation_func func = activation_func::none;
+ activation_additional_params params = {0.0f, 0.0f};
- fused_activation_params fused_activation;
+ fused_activation_params() {}
+
+ fused_activation_params(activation_func _func, activation_additional_params _params) :
+ func(_func),
+ params(_params) {}
+ };
+ std::vector<fused_activation_params> fused_activations;
+ std::vector<fused_primitive_desc> fused_prims;
void invalidate_users() const;
};
@@ -303,6 +363,7 @@ struct api_typed_program_node_base : public program_node {
"PType should name a non-const, non-volatile type derived from cldnn::primitive but not from "
"cldnn::internal_primitive");
friend class cldnn::graph_initializations;
+ friend class cldnn::prepare_quantization;
friend struct cldnn::program_impl;
friend class cldnn::reorder_inputs;
diff --git a/inference-engine/thirdparty/clDNN/src/include/proposal_inst.h b/inference-engine/thirdparty/clDNN/src/include/proposal_inst.h
index a8c8d3b62..0ffb3ac73 100644
--- a/inference-engine/thirdparty/clDNN/src/include/proposal_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/proposal_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/proposal.hpp"
+#include "api/proposal.hpp"
#include "primitive_inst.h"
#include <string>
#include <vector>
diff --git a/inference-engine/thirdparty/clDNN/src/include/pyramid_roi_align_inst.h b/inference-engine/thirdparty/clDNN/src/include/pyramid_roi_align_inst.h
index 9da44c2a7..66b602028 100644
--- a/inference-engine/thirdparty/clDNN/src/include/pyramid_roi_align_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/pyramid_roi_align_inst.h
@@ -13,7 +13,7 @@
// limitations under the License.
#pragma once
-#include "api/CPP/pyramid_roi_align.hpp"
+#include "api/pyramid_roi_align.hpp"
#include "primitive_inst.h"
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/quantize_inst.h b/inference-engine/thirdparty/clDNN/src/include/quantize_inst.h
index 75522ecec..250b6eeec 100644
--- a/inference-engine/thirdparty/clDNN/src/include/quantize_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/quantize_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/quantize.hpp"
+#include "api/quantize.hpp"
#include "primitive_inst.h"
#include <string>
@@ -31,11 +31,13 @@ public:
program_node& input(size_t index = 0) const { return get_dependency(index); }
size_t inputs_count() const { return get_dependencies().size(); }
- void set_packed_binary_output(bool _packed_binary_output) { packed_binary_output = _packed_binary_output; }
- bool get_packed_binary_output() const { return packed_binary_output; }
+ void set_output_data_type(data_types dt) { out_dt = dt; dt_changed = true; }
+ data_types get_output_data_type() const { return out_dt; }
+ bool has_custom_out_dt() const { return dt_changed; }
private:
- bool packed_binary_output = false;
+ data_types out_dt;
+ bool dt_changed = false;
};
using quantize_node = typed_program_node<quantize>;
diff --git a/inference-engine/thirdparty/clDNN/src/include/reduce_inst.h b/inference-engine/thirdparty/clDNN/src/include/reduce_inst.h
index 7aa4ad613..9963505fb 100644
--- a/inference-engine/thirdparty/clDNN/src/include/reduce_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/reduce_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/reduce.hpp"
+#include "api/reduce.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/region_yolo_inst.h b/inference-engine/thirdparty/clDNN/src/include/region_yolo_inst.h
index ff94ccae9..0a285b2f1 100644
--- a/inference-engine/thirdparty/clDNN/src/include/region_yolo_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/region_yolo_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/region_yolo.hpp"
+#include "api/region_yolo.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/reorder_inst.h b/inference-engine/thirdparty/clDNN/src/include/reorder_inst.h
index fa5caabc3..e4824d0ce 100644
--- a/inference-engine/thirdparty/clDNN/src/include/reorder_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/reorder_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/reorder.hpp"
+#include "api/reorder.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/reorg_yolo_inst.h b/inference-engine/thirdparty/clDNN/src/include/reorg_yolo_inst.h
index 1fd9e98f8..4f8528645 100644
--- a/inference-engine/thirdparty/clDNN/src/include/reorg_yolo_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/reorg_yolo_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/reorg_yolo.hpp"
+#include "api/reorg_yolo.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/reshape_inst.h b/inference-engine/thirdparty/clDNN/src/include/reshape_inst.h
index 751276dc6..2ee1e45c5 100644
--- a/inference-engine/thirdparty/clDNN/src/include/reshape_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/reshape_inst.h
@@ -16,8 +16,9 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/reshape.hpp"
+#include "api/reshape.hpp"
#include "primitive_inst.h"
+#include "error_handler.h"
#include <string>
#include <memory>
@@ -33,10 +34,13 @@ struct typed_program_node<reshape> : public typed_program_node_base<reshape> {
public:
using parent::parent;
- program_node& input() const { return get_dependency(0); }
+ program_node& input() const {
+ CLDNN_ERROR_LESS_THAN(id(), "the number of dependencies", dependencies.size(), "1", 1, "ERROR: the node has no input");
+ return get_dependency(0);
+ }
bool is_in_place() const {
- if (this->is_output() || this->get_fused_activation_func() != activation_none)
+ if (this->is_output() || !this->get_fused_activations_funcs().empty())
return false;
return (!this->get_output_layout().data_padding && !input().get_output_layout(false).data_padding);
}
diff --git a/inference-engine/thirdparty/clDNN/src/include/reverse_sequence_inst.h b/inference-engine/thirdparty/clDNN/src/include/reverse_sequence_inst.h
index 01411468d..631b59165 100644
--- a/inference-engine/thirdparty/clDNN/src/include/reverse_sequence_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/reverse_sequence_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/reverse_sequence.hpp"
+#include "api/reverse_sequence.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/roi_pooling_inst.h b/inference-engine/thirdparty/clDNN/src/include/roi_pooling_inst.h
index 70d3d7e00..b323d3710 100644
--- a/inference-engine/thirdparty/clDNN/src/include/roi_pooling_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/roi_pooling_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/roi_pooling.hpp"
+#include "api/roi_pooling.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/scale_grad_input_inst.h b/inference-engine/thirdparty/clDNN/src/include/scale_grad_input_inst.h
index 0d15828ed..29815debb 100644
--- a/inference-engine/thirdparty/clDNN/src/include/scale_grad_input_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/scale_grad_input_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/scale_grad_input.hpp"
+#include "api/scale_grad_input.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/scale_grad_weights_inst.h b/inference-engine/thirdparty/clDNN/src/include/scale_grad_weights_inst.h
index b7f22f8f2..ecef6d1d5 100644
--- a/inference-engine/thirdparty/clDNN/src/include/scale_grad_weights_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/scale_grad_weights_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/scale_grad_weights.hpp"
+#include "api/scale_grad_weights.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/scale_inst.h b/inference-engine/thirdparty/clDNN/src/include/scale_inst.h
index fa12b18c4..6b7f28c6c 100644
--- a/inference-engine/thirdparty/clDNN/src/include/scale_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/scale_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/scale.hpp"
+#include "api/scale.hpp"
#include "primitive_inst.h"
#include <string>
#include <memory>
diff --git a/inference-engine/thirdparty/clDNN/src/include/select_inst.h b/inference-engine/thirdparty/clDNN/src/include/select_inst.h
index 2046c8ac9..ffddcffed 100644
--- a/inference-engine/thirdparty/clDNN/src/include/select_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/select_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include <api/CPP/select.hpp>
+#include <api/select.hpp>
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/shuffle_channels_inst.h b/inference-engine/thirdparty/clDNN/src/include/shuffle_channels_inst.h
index 0fd82fad0..168c4ec38 100644
--- a/inference-engine/thirdparty/clDNN/src/include/shuffle_channels_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/shuffle_channels_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/shuffle_channels.hpp"
+#include "api/shuffle_channels.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/sliding_window_utils.h b/inference-engine/thirdparty/clDNN/src/include/sliding_window_utils.h
index bb89fc2d1..8c93cc99f 100644
--- a/inference-engine/thirdparty/clDNN/src/include/sliding_window_utils.h
+++ b/inference-engine/thirdparty/clDNN/src/include/sliding_window_utils.h
@@ -14,8 +14,8 @@
#pragma once
-#include <api/CPP/layout.hpp>
-#include <api/CPP/tensor.hpp>
+#include <api/layout.hpp>
+#include <api/tensor.hpp>
#include <algorithm>
#include <cassert>
diff --git a/inference-engine/thirdparty/clDNN/src/include/softmax_inst.h b/inference-engine/thirdparty/clDNN/src/include/softmax_inst.h
index 1b4fc39d6..1b362936d 100644
--- a/inference-engine/thirdparty/clDNN/src/include/softmax_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/softmax_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/softmax.hpp"
+#include "api/softmax.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/softmax_loss_grad_inst.h b/inference-engine/thirdparty/clDNN/src/include/softmax_loss_grad_inst.h
index a764efca2..88cc8d172 100644
--- a/inference-engine/thirdparty/clDNN/src/include/softmax_loss_grad_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/softmax_loss_grad_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/softmax_loss_grad.hpp"
+#include "api/softmax_loss_grad.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/split_inst.h b/inference-engine/thirdparty/clDNN/src/include/split_inst.h
index 0d6012309..97add445b 100644
--- a/inference-engine/thirdparty/clDNN/src/include/split_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/split_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/split.hpp"
+#include "api/split.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/strided_slice_inst.h b/inference-engine/thirdparty/clDNN/src/include/strided_slice_inst.h
index 1aedff1a2..16a2243c0 100644
--- a/inference-engine/thirdparty/clDNN/src/include/strided_slice_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/strided_slice_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/strided_slice.hpp"
+#include "api/strided_slice.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/tile_inst.h b/inference-engine/thirdparty/clDNN/src/include/tile_inst.h
index 7751c23cf..5bc64ff48 100644
--- a/inference-engine/thirdparty/clDNN/src/include/tile_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/tile_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/tile.hpp"
+#include "api/tile.hpp"
#include "primitive_inst.h"
#include <string>
diff --git a/inference-engine/thirdparty/clDNN/src/include/to_string_utils.h b/inference-engine/thirdparty/clDNN/src/include/to_string_utils.h
index 52a575252..f865a0dd4 100644
--- a/inference-engine/thirdparty/clDNN/src/include/to_string_utils.h
+++ b/inference-engine/thirdparty/clDNN/src/include/to_string_utils.h
@@ -15,9 +15,9 @@
*/
#pragma once
#include <string>
-#include "api/CPP/tensor.hpp"
-#include "api/CPP/layout.hpp"
-#include "api/CPP/primitive.hpp"
+#include "api/tensor.hpp"
+#include "api/layout.hpp"
+#include "api/primitive.hpp"
#include <memory>
namespace cldnn {
@@ -93,6 +93,8 @@ inline std::string fmt_to_str(format fmt) {
return "bfwzyx";
case format::fs_b_yx_fsv32:
return "fs_b_yx_fsv32";
+ case format::bfzyx_f16:
+ return "bfzyx_f16";
case format::winograd_2x3_s1_weights:
return "winograd_2x3_s1_weights";
@@ -134,7 +136,10 @@ inline std::string fmt_to_str(format fmt) {
return "os_is_y_x8_osv8_isv4";
case format::os_is_yx_osv32_isv32p:
return "os_is_yx_osv32_isv32p";
-
+ case format::o_i_zyx_i16_o16:
+ return "o_i_zyx_i16_o16";
+ case format::i_o_zyx_o16_i16:
+ return "i_o_zyx_o16_i16";
default:
return "unknown (" + std::to_string(fmt.value) + ")";
}
diff --git a/inference-engine/thirdparty/clDNN/src/include/topology_impl.h b/inference-engine/thirdparty/clDNN/src/include/topology_impl.h
index 41ca28bb3..c8b465cc4 100644
--- a/inference-engine/thirdparty/clDNN/src/include/topology_impl.h
+++ b/inference-engine/thirdparty/clDNN/src/include/topology_impl.h
@@ -16,9 +16,8 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/primitive.hpp"
-#include "api/CPP/input_layout.hpp"
-#include "api_impl.h"
+#include "api/primitive.hpp"
+#include "api/input_layout.hpp"
#include "refcounted_obj.h"
#include <map>
@@ -55,7 +54,7 @@ public:
}
}
- void change_input_layout(const primitive_id& id, layout new_layout) {
+ void change_input_layout(const primitive_id& id, const layout& new_layout) {
auto& inp_layout = this->at(id);
if (inp_layout->type != input_layout::type_id()) {
throw std::runtime_error("Primitive: " + id + " is not input_layout.");
@@ -76,5 +75,3 @@ private:
topology_map _primitives;
};
} // namespace cldnn
-
-API_CAST(::cldnn_topology, cldnn::topology_impl)
diff --git a/inference-engine/thirdparty/clDNN/src/include/upsampling_inst.h b/inference-engine/thirdparty/clDNN/src/include/upsampling_inst.h
index 86ea8af43..bc19b5c93 100644
--- a/inference-engine/thirdparty/clDNN/src/include/upsampling_inst.h
+++ b/inference-engine/thirdparty/clDNN/src/include/upsampling_inst.h
@@ -16,7 +16,7 @@
///////////////////////////////////////////////////////////////////////////////////////////////////
#pragma once
-#include "api/CPP/upsampling.hpp"
+#include "api/upsampling.hpp"
#include "primitive_inst.h"
#include <memory>
#include "topology_impl.h"