1 // This file is part of OpenCV project.
2 // It is subject to the license terms in the LICENSE file found in the top-level directory
3 // of this distribution and at http://opencv.org/license.html.
4 //
5 // Copyright (C) 2017, Intel Corporation, all rights reserved.
6 // Third party copyrights are property of their respective owners.
7 
8 #include "precomp.hpp"
9 #include <opencv2/dnn/shape_utils.hpp>
10 #include "op_halide.hpp"
11 
12 #ifdef HAVE_HALIDE
13 #include <HalideRuntimeOpenCL.h>
14 #endif  // HAVE_HALIDE
15 
16 namespace cv
17 {
18 namespace dnn
19 {
20 
21 #ifdef HAVE_HALIDE
getBufferShape(const MatShape & shape)22 static MatShape getBufferShape(const MatShape& shape)
23 {
24     if (shape.size() == 2 || shape.size() == 4)
25     {
26         int w, h, c, n;
27         getCanonicalSize(shape, &w, &h, &c, &n);
28         return {w, h, c, n};
29     }
30     else
31     {
32         MatShape bufferShape(shape);
33         std::reverse(bufferShape.begin(), bufferShape.end());
34         return bufferShape;
35     }
36 }
37 
getBufferShape(const MatSize & size)38 static MatShape getBufferShape(const MatSize& size)
39 {
40     return getBufferShape(shape(size));
41 }
42 
wrapToHalideBuffer(const Mat & mat)43 Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat)
44 {
45     return wrapToHalideBuffer(mat, getBufferShape(mat.size));
46 }
47 
wrapToHalideBuffer(const Mat & mat,const std::vector<int> & sizes)48 Halide::Buffer<float> wrapToHalideBuffer(const Mat& mat,
49                                          const std::vector<int>& sizes)
50 {
51     Halide::Buffer<float> buffer((float*)mat.data, sizes);
52     buffer.set_host_dirty();  // Indicate that data is on CPU.
53     return buffer;
54 }
55 
halideBuffer(const Ptr<BackendWrapper> & ptr)56 Halide::Buffer<> halideBuffer(const Ptr<BackendWrapper>& ptr)
57 {
58     CV_Assert(!ptr.empty());
59     return ptr.dynamicCast<HalideBackendWrapper>()->buffer;
60 }
61 
halideBuffers(const std::vector<Ptr<BackendWrapper>> & ptrs)62 std::vector<Halide::Buffer<> > halideBuffers(const std::vector<Ptr<BackendWrapper> >& ptrs)
63 {
64     std::vector<Halide::Buffer<> > vec;
65     vec.reserve(ptrs.size());
66     for (const Ptr<BackendWrapper>& ptr : ptrs)
67     {
68         vec.push_back(halideBuffer(ptr));
69     }
70     return vec;
71 }
72 
getCanonicalSize(const Halide::Buffer<> & buffer,int * width,int * height,int * channels,int * batch)73 void getCanonicalSize(const Halide::Buffer<>& buffer, int* width, int* height,
74                       int* channels, int* batch)
75 {
76     CV_Assert(buffer.dimensions() == 4);
77     *width = buffer.extent(0);
78     *height = buffer.extent(1);
79     *channels = buffer.extent(2);
80     *batch = buffer.extent(3);
81 }
82 
HalideBackendNode(const Halide::Func & func)83 HalideBackendNode::HalideBackendNode(const Halide::Func& func)
84     : BackendNode(DNN_BACKEND_HALIDE), funcs(1, func) {}
85 
HalideBackendNode(const std::vector<Halide::Func> & funcs)86 HalideBackendNode::HalideBackendNode(const std::vector<Halide::Func>& funcs)
87     : BackendNode(DNN_BACKEND_HALIDE), funcs(funcs) {}
88 
HalideBackendNode(const Ptr<HalideBackendNode> & base,const Halide::Func & top)89 HalideBackendNode::HalideBackendNode(const Ptr<HalideBackendNode>& base,
90                                      const Halide::Func& top)
91     : BackendNode(DNN_BACKEND_HALIDE), funcs(base->funcs)
92 {
93     funcs.back() = top;
94 }
95 
HalideBackendWrapper(int targetId,const cv::Mat & m)96 HalideBackendWrapper::HalideBackendWrapper(int targetId, const cv::Mat& m)
97     : BackendWrapper(DNN_BACKEND_HALIDE, targetId)
98 {
99     managesDevMemory = true;
100     buffer = wrapToHalideBuffer(m);
101     if (targetId == DNN_TARGET_CPU)
102     {
103         return;
104     }
105     else if (targetId == DNN_TARGET_OPENCL)
106     {
107         Halide::Target t = Halide::get_host_target();
108         t.set_feature(Halide::Target::OpenCL);
109         buffer.copy_to_device(t);
110     }
111     else
112         CV_Error(Error::StsNotImplemented, "Unknown target identifier");
113 }
114 
HalideBackendWrapper(const Ptr<BackendWrapper> & base,const MatShape & shape)115 HalideBackendWrapper::HalideBackendWrapper(const Ptr<BackendWrapper>& base,
116                                            const MatShape& shape)
117     : BackendWrapper(DNN_BACKEND_HALIDE, base->targetId)
118 {
119     managesDevMemory = false;
120     Halide::Buffer<float> baseBuffer = halideBuffer(base);
121     buffer = Halide::Buffer<float>((float*)baseBuffer.raw_buffer()->host,
122                                    getBufferShape(shape));
123     if (baseBuffer.has_device_allocation())
124     {
125         buffer.raw_buffer()->device = baseBuffer.raw_buffer()->device;
126         buffer.raw_buffer()->device_interface = baseBuffer.raw_buffer()->device_interface;
127         buffer.set_device_dirty();
128     }
129     else
130     {
131         buffer.set_host_dirty();  // Indicate that data is on CPU.
132         CV_Assert(targetId == DNN_TARGET_CPU);
133     }
134 }
135 
~HalideBackendWrapper()136 HalideBackendWrapper::~HalideBackendWrapper()
137 {
138     if (buffer.has_device_allocation() && !managesDevMemory)
139     {
140         buffer.raw_buffer()->device = 0;
141         buffer.raw_buffer()->device_interface = 0;
142         buffer.set_device_dirty(false);
143     }
144 }
145 
copyToHost()146 void HalideBackendWrapper::copyToHost()
147 {
148     if (buffer.device_dirty())
149     {
150         buffer.device_sync();
151         buffer.copy_to_host();
152     }
153 }
154 
setHostDirty()155 void HalideBackendWrapper::setHostDirty()
156 {
157     buffer.set_device_dirty(false);
158     buffer.set_host_dirty();
159 }
160 #endif  // HAVE_HALIDE
161 
getCanonicalSize(const MatSize & size,int * w,int * h,int * c,int * n)162 void getCanonicalSize(const MatSize& size, int* w, int* h, int* c, int* n)
163 {
164     getCanonicalSize(shape(size), w, h, c, n);
165 }
166 
getCanonicalSize(const MatShape & shape,int * width,int * height,int * channels,int * batch)167 void getCanonicalSize(const MatShape& shape, int* width, int* height,
168                       int* channels, int* batch)
169 {
170     const int dims = shape.size();
171     CV_Assert(dims == 2 || dims == 4);
172     *batch = shape[0];
173     *channels = shape[1];
174     if (dims == 4)
175     {
176         *width = shape[3];
177         *height = shape[2];
178     }
179     else
180     {
181         *width = 1;
182         *height = 1;
183     }
184 }
185 
compileHalide(const std::vector<Mat> & outputs,Ptr<BackendNode> & node,int targetId)186 void compileHalide(const std::vector<Mat> &outputs, Ptr<BackendNode>& node, int targetId)
187 {
188 #ifdef HAVE_HALIDE
189     CV_Assert(!node.empty());
190     Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs.back();
191 
192     int outW, outH, outC, outN;
193     Halide::Var x("x"), y("y"), c("c"), n("n");
194     getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
195     top.bound(x, 0, outW).bound(y, 0, outH)
196        .bound(c, 0, outC).bound(n, 0, outN);
197 
198     Halide::Target target = Halide::get_host_target();
199     target.set_feature(Halide::Target::NoAsserts);
200     if (targetId == DNN_TARGET_OPENCL)
201     {
202         target.set_feature(Halide::Target::OpenCL);
203     }
204     CV_Assert(target.supported());
205     top.compile_jit(target);
206 #endif  // HAVE_HALIDE
207 }
208 
forwardHalide(std::vector<Ptr<BackendWrapper>> & outputs,const Ptr<BackendNode> & node)209 void forwardHalide(std::vector<Ptr<BackendWrapper> > &outputs,
210                    const Ptr<BackendNode>& node)
211 {
212 #ifdef HAVE_HALIDE
213     CV_Assert(!node.empty());
214     Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs.back();
215     auto outputBuffers = halideBuffers(outputs);
216     top.realize(Halide::Realization(outputBuffers));
217 #endif  // HAVE_HALIDE
218 }
219 
haveHalide()220 bool haveHalide()
221 {
222 #ifdef HAVE_HALIDE
223     return true;
224 #else
225     return false;
226 #endif  // HAVE_HALIDE
227 }
228 
229 }  // namespace dnn
230 }  // namespace cv
231