summaryrefslogtreecommitdiff
path: root/inference-engine/src/inference_engine/ie_util_internal.hpp
blob: 46907d81eaa04d7ccdd4fbc54964ed33609f4fee (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
// Copyright (C) 2018 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//

#ifndef IE_UTIL_HPP
#define IE_UTIL_HPP

#include <vector>
#include <functional>
#include <deque>
#include <unordered_set>
#include <utility>
#include <string>

#include <cpp/ie_cnn_network.h>
#include <cnn_network_impl.hpp>

namespace InferenceEngine {

/**
 * @brief Simple helper function to check element presence in container
 * container must provede stl-compliant find member function
 *
 * @param container - Container to check
 * @param element - element to check
 *
 * @return true if element present in container
 */
template<typename C, typename T>
bool contains(const C& container, const T& element) {
    return container.find(element) != container.end();
}

/**
 * @brief Split graph into subgraphs using provided splitter object
 *
 * @param network - Source network
 * @param splitter - Splitter object, take two adjacent layers, must return true
 * if layers must go to different subgraphs
 *
 * @return list of subgraphs
 */
INFERENCE_ENGINE_API_CPP(std::vector<std::vector<InferenceEngine::CNNLayerPtr>>)
groupSubgraphs(InferenceEngine::ICNNNetwork& network,
               std::function<bool(const InferenceEngine::CNNLayerPtr&,
                                  const InferenceEngine::CNNLayerPtr&)> splitter);

/**
 * @brief Creates data object copy unconnected to any graph
 * @param source - source data object
 * @return Shared pointer to new data object
 */
INFERENCE_ENGINE_API_CPP(DataPtr) cloneData(const Data& source);

/**
 * @brief Creates layer object copy, unconnected to any grapoh
 * @param source - source layer object
 * @return Shared pointer to new layer object
 */
INFERENCE_ENGINE_API_CPP(CNNLayerPtr) clonelayer(const CNNLayer& source);

/**
 * @brief Clones selected set of nodes into separate network
 * only connections between passed nodes will be duplicated
 *
 * @param layers - layers to clone, must all be in same network
 * @param layerCloner - layer cloning functor
 *
 * @return Cloned network
 */
INFERENCE_ENGINE_API_CPP(InferenceEngine::details::CNNNetworkImplPtr)
cloneNet(const std::vector<InferenceEngine::CNNLayerPtr>& layers,
         std::function<CNNLayerPtr(const CNNLayer&)> layerCloner = clonelayer);

/**
 * Clones the whole network. All layers and data objects will be cloned
 *
 * Blobs inside layers are reused
 * */
INFERENCE_ENGINE_API_CPP(InferenceEngine::details::CNNNetworkImplPtr)
cloneNet(const InferenceEngine::ICNNNetwork &network);

namespace traverse {

INFERENCE_ENGINE_API_CPP(void)
forward(const InferenceEngine::CNNLayerPtr& layer, std::deque<InferenceEngine::CNNLayerPtr>& layers);

INFERENCE_ENGINE_API_CPP(void)
backward(const InferenceEngine::CNNLayerPtr& layer, std::deque<InferenceEngine::CNNLayerPtr>& layers);

template<class T>
void traverse(T& inputs,
              std::function<void(InferenceEngine::CNNLayerPtr& layer)> apply,
              std::function<void(const InferenceEngine::CNNLayerPtr& layer, std::deque<InferenceEngine::CNNLayerPtr>& layers)> expand = forward) {
    std::unordered_set<InferenceEngine::CNNLayerPtr> visitedObjects;
    std::deque<InferenceEngine::CNNLayerPtr>         layersToCheck;

    layersToCheck.insert(layersToCheck.end(), inputs.begin(), inputs.end());

    while (!layersToCheck.empty()) {
        auto& layer = layersToCheck.front();
        if (visitedObjects.insert(layer).second) {
            apply(layer);
            expand(layer, layersToCheck);
        }
        layersToCheck.pop_front();
    }
}

INFERENCE_ENGINE_API_CPP(void)
traverse(InferenceEngine::ICNNNetwork& network,
         std::function<void(InferenceEngine::CNNLayerPtr& layer)> apply,
         std::function<void(const InferenceEngine::CNNLayerPtr& layer,
         std::deque<InferenceEngine::CNNLayerPtr>& layers)> expand = forward);

}  // namespace traverse

using ordered_properties = std::vector<std::pair<std::string, std::string>>;
using printer_callback = std::function<void(const InferenceEngine::CNNLayerPtr,
                                            ordered_properties &,
                                            ordered_properties &)>;

/**
 * @brief Visualize network in GraphViz (.dot) format and write to output stream
 *
 * @param network - graph to visualize
 * @param out - output stream for saving graph
 * @param layer_cb - callback function, that called on every printed layer node
 */
INFERENCE_ENGINE_API_CPP(void) saveGraphToDot(InferenceEngine::ICNNNetwork &network, std::ostream &out, printer_callback layer_cb = nullptr);

/**
  @brief Return root data objects, i.e. objects came from input or const layers

  @param network - network to process

  @return set of root data objects,
  */
INFERENCE_ENGINE_API_CPP(std::unordered_set<DataPtr>)
getRootDataObjects(ICNNNetwork &network);

}  // namespace InferenceEngine

#endif  // IE_UTIL_HPP