1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4 //
5 // Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7 
8 #ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
9 #define __OPENCV_DNN_OP_INF_ENGINE_HPP__
10 
11 #include "opencv2/core/cvdef.h"
12 #include "opencv2/core/cvstd.hpp"
13 #include "opencv2/dnn.hpp"
14 
15 #include "opencv2/core/async.hpp"
16 #include "opencv2/core/detail/async_promise.hpp"
17 
18 #include "opencv2/dnn/utils/inference_engine.hpp"
19 
20 #ifdef HAVE_INF_ENGINE
21 
22 #define INF_ENGINE_RELEASE_2018R5 2018050000
23 #define INF_ENGINE_RELEASE_2019R1 2019010000
24 #define INF_ENGINE_RELEASE_2019R2 2019020000
25 #define INF_ENGINE_RELEASE_2019R3 2019030000
26 #define INF_ENGINE_RELEASE_2020_1 2020010000
27 #define INF_ENGINE_RELEASE_2020_2 2020020000
28 #define INF_ENGINE_RELEASE_2020_3 2020030000
29 #define INF_ENGINE_RELEASE_2020_4 2020040000
30 #define INF_ENGINE_RELEASE_2021_1 2021010000
31 #define INF_ENGINE_RELEASE_2021_2 2021020000
32 #define INF_ENGINE_RELEASE_2021_3 2021030000
33 #define INF_ENGINE_RELEASE_2021_4 2021040000
34 
35 #ifndef INF_ENGINE_RELEASE
36 #warning("IE version have not been provided via command-line. Using 2021.4 by default")
37 #define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_4
38 #endif
39 
40 #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
41 #define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
42 #define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
43 #define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
44 #define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
45 
46 #if defined(__GNUC__) && __GNUC__ >= 5
47 //#pragma GCC diagnostic push
48 #pragma GCC diagnostic ignored "-Wsuggest-override"
49 #endif
50 
51 #if defined(HAVE_DNN_IE_NN_BUILDER_2019) || INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2020_4)
52 //#define INFERENCE_ENGINE_DEPRECATED  // turn off deprecation warnings from IE
53 //there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally
54 #if defined(__GNUC__)
55 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
56 #endif
57 #ifdef _MSC_VER
58 #pragma warning(disable: 4996)  // was declared deprecated
59 #endif
60 #endif
61 
62 #if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
63 #pragma GCC visibility push(default)
64 #endif
65 
66 #include <inference_engine.hpp>
67 
68 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
69 #include <ie_builders.hpp>
70 #endif
71 
72 #if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
73 #pragma GCC visibility pop
74 #endif
75 
76 #if defined(__GNUC__) && __GNUC__ >= 5
77 //#pragma GCC diagnostic pop
78 #endif
79 
80 #endif  // HAVE_INF_ENGINE
81 
82 namespace cv { namespace dnn {
83 
84 #ifdef HAVE_INF_ENGINE
85 
86 Backend& getInferenceEngineBackendTypeParam();
87 
88 Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
89 
90 void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
91                           std::vector<Mat>& mats);
92 
93 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
94 
95 class InfEngineBackendNet
96 {
97 public:
98     InfEngineBackendNet();
99 
100     InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
101 
102     void addLayer(InferenceEngine::Builder::Layer& layer);
103 
104     void addOutput(const std::string& name);
105 
106     void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
107                  const std::vector<Ptr<BackendWrapper> >& outputs,
108                  const std::string& layerName);
109 
110     bool isInitialized();
111 
112     void init(Target targetId);
113 
114     void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
115                  bool isAsync);
116 
117     void initPlugin(InferenceEngine::CNNNetwork& net);
118 
119     void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
120 
121     void reset();
122 
123 private:
124     InferenceEngine::Builder::Network netBuilder;
125 
126     InferenceEngine::ExecutableNetwork netExec;
127     InferenceEngine::BlobMap allBlobs;
128     std::string device_name;
129 #if INF_ENGINE_VER_MAJOR_LE(2019010000)
130     InferenceEngine::InferenceEnginePluginPtr enginePtr;
131     InferenceEngine::InferencePlugin plugin;
132 #else
133     bool isInit = false;
134 #endif
135 
136     struct InfEngineReqWrapper
137     {
InfEngineReqWrappercv::dnn::InfEngineBackendNet::InfEngineReqWrapper138         InfEngineReqWrapper() : isReady(true) {}
139 
140         void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);
141 
142         InferenceEngine::InferRequest req;
143         std::vector<cv::AsyncPromise> outProms;
144         std::vector<std::string> outsNames;
145         bool isReady;
146     };
147 
148     std::vector<Ptr<InfEngineReqWrapper> > infRequests;
149 
150     InferenceEngine::CNNNetwork cnn;
151     bool hasNetOwner;
152 
153     std::map<std::string, int> layers;
154     std::vector<std::string> requestedOutputs;
155 
156     std::set<std::pair<int, int> > unconnectedPorts;
157 };
158 
159 class InfEngineBackendNode : public BackendNode
160 {
161 public:
162     InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
163 
164     InfEngineBackendNode(Ptr<Layer>& layer, std::vector<Mat*>& inputs,
165                          std::vector<Mat>& outputs, std::vector<Mat>& internals);
166 
167     void connect(std::vector<Ptr<BackendWrapper> >& inputs,
168                  std::vector<Ptr<BackendWrapper> >& outputs);
169 
170     // Inference Engine network object that allows to obtain the outputs of this layer.
171     InferenceEngine::Builder::Layer layer;
172     Ptr<InfEngineBackendNet> net;
173     // CPU fallback in case of unsupported Inference Engine layer.
174     Ptr<dnn::Layer> cvLayer;
175 };
176 
177 class InfEngineBackendWrapper : public BackendWrapper
178 {
179 public:
180     InfEngineBackendWrapper(int targetId, const Mat& m);
181 
182     InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
183 
184     ~InfEngineBackendWrapper();
185 
186     static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
187 
188     virtual void copyToHost() CV_OVERRIDE;
189 
190     virtual void setHostDirty() CV_OVERRIDE;
191 
192     InferenceEngine::DataPtr dataPtr;
193     InferenceEngine::Blob::Ptr blob;
194     AsyncArray futureMat;
195 };
196 
197 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
198 
199 InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
200 
201 InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
202 
203 // Convert Inference Engine blob with FP32 precision to FP16 precision.
204 // Allocates memory for a new blob.
205 InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
206 
207 void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
208 
209 // This is a fake class to run networks from Model Optimizer. Objects of that
210 // class simulate responses of layers are imported by OpenCV and supported by
211 // Inference Engine. The main difference is that they do not perform forward pass.
212 class InfEngineBackendLayer : public Layer
213 {
214 public:
InfEngineBackendLayer(const InferenceEngine::CNNNetwork & t_net_)215     InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
216 
217     virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
218                                  const int requiredOutputs,
219                                  std::vector<MatShape> &outputs,
220                                  std::vector<MatShape> &internals) const CV_OVERRIDE;
221 
222     virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
223                          OutputArrayOfArrays internals) CV_OVERRIDE;
224 
225     virtual bool supportBackend(int backendId) CV_OVERRIDE;
226 
227 private:
228     InferenceEngine::CNNNetwork t_net;
229 };
230 
231 
232 class InfEngineExtension : public InferenceEngine::IExtension
233 {
234 public:
235 #if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
SetLogCallback(InferenceEngine::IErrorListener &)236     virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
237 #endif
Unload()238     virtual void Unload() noexcept {}
Release()239     virtual void Release() noexcept {}
GetVersion(const InferenceEngine::Version * &) const240     virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
241 
getPrimitiveTypes(char ** &,unsigned int &,InferenceEngine::ResponseDesc *)242     virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
243                                                           InferenceEngine::ResponseDesc*) noexcept
244     {
245         return InferenceEngine::StatusCode::OK;
246     }
247 
248     InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
249                                               const InferenceEngine::CNNLayer* cnnLayer,
250                                               InferenceEngine::ResponseDesc* resp) noexcept;
251 };
252 
253 #endif  // HAVE_DNN_IE_NN_BUILDER_2019
254 
255 
256 CV__DNN_INLINE_NS_BEGIN
257 
258 bool isMyriadX();
259 
260 bool isArmComputePlugin();
261 
262 CV__DNN_INLINE_NS_END
263 
264 
265 InferenceEngine::Core& getCore(const std::string& id);
266 
267 template<typename T = size_t>
getShape(const Mat & mat)268 static inline std::vector<T> getShape(const Mat& mat)
269 {
270     std::vector<T> result(mat.dims);
271     for (int i = 0; i < mat.dims; i++)
272         result[i] = (T)mat.size[i];
273     return result;
274 }
275 
276 
277 #endif  // HAVE_INF_ENGINE
278 
279 bool haveInfEngine();
280 
281 void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
282                       Ptr<BackendNode>& node, bool isAsync);
283 
284 }}  // namespace dnn, namespace cv
285 
286 #endif  // __OPENCV_DNN_OP_INF_ENGINE_HPP__
287