1 // borrowed in spirit from https://github.com/yati-sagade/opencv-ndarray-conversion
2 // MIT License
3
4 #include "conversion.h"
5
6 #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
7 #include <numpy/ndarrayobject.h>
8
9 #if PY_VERSION_HEX >= 0x03000000
10 #define PyInt_Check PyLong_Check
11 #define PyInt_AsLong PyLong_AsLong
12 #endif
13
14 struct Tmp {
15 const char * name;
16
TmpTmp17 Tmp(const char * name ) : name(name) {}
18 };
19
20 Tmp info("return value");
21
init_numpy()22 bool NDArrayConverter::init_numpy() {
23 // bug: import_array will hide the inner exception if something fails, so
24 // go ahead and import the numpy module first
25 PyObject * np_module = PyImport_ImportModule("numpy.core.multiarray");
26 if (np_module == NULL) {
27 PyErr_Print();
28 return false;
29 }
30
31 Py_DECREF(np_module);
32
33 // this has to be in this file, since PyArray_API is defined as static
34 import_array1(false);
35 return true;
36 }
37
38 /*
39 * The following conversion functions are taken/adapted from OpenCV's cv2.cpp file
40 * inside modules/python/src2 folder (OpenCV 3.1.0)
41 */
42
43 static PyObject* opencv_error = 0;
44
failmsg(const char * fmt,...)45 static int failmsg(const char *fmt, ...)
46 {
47 char str[1000];
48
49 va_list ap;
50 va_start(ap, fmt);
51 vsnprintf(str, sizeof(str), fmt, ap);
52 va_end(ap);
53
54 PyErr_SetString(PyExc_TypeError, str);
55 return 0;
56 }
57
58 class PyAllowThreads
59 {
60 public:
PyAllowThreads()61 PyAllowThreads() : _state(PyEval_SaveThread()) {}
~PyAllowThreads()62 ~PyAllowThreads()
63 {
64 PyEval_RestoreThread(_state);
65 }
66 private:
67 PyThreadState* _state;
68 };
69
70 class PyEnsureGIL
71 {
72 public:
PyEnsureGIL()73 PyEnsureGIL() : _state(PyGILState_Ensure()) {}
~PyEnsureGIL()74 ~PyEnsureGIL()
75 {
76 PyGILState_Release(_state);
77 }
78 private:
79 PyGILState_STATE _state;
80 };
81
82 #define ERRWRAP2(expr) \
83 try \
84 { \
85 PyAllowThreads allowThreads; \
86 expr; \
87 } \
88 catch (const cv::Exception &e) \
89 { \
90 PyErr_SetString(opencv_error, e.what()); \
91 return 0; \
92 }
93
94 using namespace cv;
95
96 class NumpyAllocator : public MatAllocator
97 {
98 public:
NumpyAllocator()99 NumpyAllocator() { stdAllocator = Mat::getStdAllocator(); }
~NumpyAllocator()100 ~NumpyAllocator() {}
101
allocate(PyObject * o,int dims,const int * sizes,int type,size_t * step) const102 UMatData* allocate(PyObject* o, int dims, const int* sizes, int type, size_t* step) const
103 {
104 UMatData* u = new UMatData(this);
105 u->data = u->origdata = (uchar*)PyArray_DATA((PyArrayObject*) o);
106 npy_intp* _strides = PyArray_STRIDES((PyArrayObject*) o);
107 for( int i = 0; i < dims - 1; i++ )
108 step[i] = (size_t)_strides[i];
109 step[dims-1] = CV_ELEM_SIZE(type);
110 u->size = sizes[0]*step[0];
111 u->userdata = o;
112 return u;
113 }
114
allocate(int dims0,const int * sizes,int type,void * data,size_t * step,int flags,UMatUsageFlags usageFlags) const115 UMatData* allocate(int dims0, const int* sizes, int type, void* data, size_t* step, int flags, UMatUsageFlags usageFlags) const
116 {
117 if( data != 0 )
118 {
119 CV_Error(Error::StsAssert, "The data should normally be NULL!");
120 // probably this is safe to do in such extreme case
121 return stdAllocator->allocate(dims0, sizes, type, data, step, flags, usageFlags);
122 }
123 PyEnsureGIL gil;
124
125 int depth = CV_MAT_DEPTH(type);
126 int cn = CV_MAT_CN(type);
127 const int f = (int)(sizeof(size_t)/8);
128 int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
129 depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT :
130 depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
131 depth == CV_64F ? NPY_DOUBLE : f*NPY_ULONGLONG + (f^1)*NPY_UINT;
132 int i, dims = dims0;
133 cv::AutoBuffer<npy_intp> _sizes(dims + 1);
134 for( i = 0; i < dims; i++ )
135 _sizes[i] = sizes[i];
136 if( cn > 1 )
137 _sizes[dims++] = cn;
138 PyObject* o = PyArray_SimpleNew(dims, _sizes, typenum);
139 if(!o)
140 CV_Error_(Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
141 return allocate(o, dims0, sizes, type, step);
142 }
143
allocate(UMatData * u,int accessFlags,UMatUsageFlags usageFlags) const144 bool allocate(UMatData* u, int accessFlags, UMatUsageFlags usageFlags) const
145 {
146 return stdAllocator->allocate(u, accessFlags, usageFlags);
147 }
148
deallocate(UMatData * u) const149 void deallocate(UMatData* u) const
150 {
151 if(!u)
152 return;
153 PyEnsureGIL gil;
154 CV_Assert(u->urefcount >= 0);
155 CV_Assert(u->refcount >= 0);
156 if(u->refcount == 0)
157 {
158 PyObject* o = (PyObject*)u->userdata;
159 Py_XDECREF(o);
160 delete u;
161 }
162 }
163
164 const MatAllocator* stdAllocator;
165 };
166
167 NumpyAllocator g_numpyAllocator;
168
toMat(PyObject * o,Mat & m)169 bool NDArrayConverter::toMat(PyObject *o, Mat &m)
170 {
171 bool allowND = true;
172 if(!o || o == Py_None)
173 {
174 if( !m.data )
175 m.allocator = &g_numpyAllocator;
176 return true;
177 }
178
179 if( PyInt_Check(o) )
180 {
181 double v[] = {static_cast<double>(PyInt_AsLong((PyObject*)o)), 0., 0., 0.};
182 m = Mat(4, 1, CV_64F, v).clone();
183 return true;
184 }
185 if( PyFloat_Check(o) )
186 {
187 double v[] = {PyFloat_AsDouble((PyObject*)o), 0., 0., 0.};
188 m = Mat(4, 1, CV_64F, v).clone();
189 return true;
190 }
191 if( PyTuple_Check(o) )
192 {
193 int i, sz = (int)PyTuple_Size((PyObject*)o);
194 m = Mat(sz, 1, CV_64F);
195 for( i = 0; i < sz; i++ )
196 {
197 PyObject* oi = PyTuple_GET_ITEM(o, i);
198 if( PyInt_Check(oi) )
199 m.at<double>(i) = (double)PyInt_AsLong(oi);
200 else if( PyFloat_Check(oi) )
201 m.at<double>(i) = (double)PyFloat_AsDouble(oi);
202 else
203 {
204 failmsg("%s is not a numerical tuple", info.name);
205 m.release();
206 return false;
207 }
208 }
209 return true;
210 }
211
212 if( !PyArray_Check(o) )
213 {
214 failmsg("%s is not a numpy array, neither a scalar", info.name);
215 return false;
216 }
217
218 PyArrayObject* oarr = (PyArrayObject*) o;
219
220 bool needcopy = false, needcast = false;
221 int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
222 int type = typenum == NPY_UBYTE ? CV_8U :
223 typenum == NPY_BYTE ? CV_8S :
224 typenum == NPY_USHORT ? CV_16U :
225 typenum == NPY_SHORT ? CV_16S :
226 typenum == NPY_INT ? CV_32S :
227 typenum == NPY_INT32 ? CV_32S :
228 typenum == NPY_FLOAT ? CV_32F :
229 typenum == NPY_DOUBLE ? CV_64F : -1;
230
231 if( type < 0 )
232 {
233 if( typenum == NPY_INT64 || typenum == NPY_UINT64 || typenum == NPY_LONG )
234 {
235 needcopy = needcast = true;
236 new_typenum = NPY_INT;
237 type = CV_32S;
238 }
239 else
240 {
241 failmsg("%s data type = %d is not supported", info.name, typenum);
242 return false;
243 }
244 }
245
246 #ifndef CV_MAX_DIM
247 const int CV_MAX_DIM = 32;
248 #endif
249
250 int ndims = PyArray_NDIM(oarr);
251 if(ndims >= CV_MAX_DIM)
252 {
253 failmsg("%s dimensionality (=%d) is too high", info.name, ndims);
254 return false;
255 }
256
257 int size[CV_MAX_DIM+1];
258 size_t step[CV_MAX_DIM+1];
259 size_t elemsize = CV_ELEM_SIZE1(type);
260 const npy_intp* _sizes = PyArray_DIMS(oarr);
261 const npy_intp* _strides = PyArray_STRIDES(oarr);
262 bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
263
264 for( int i = ndims-1; i >= 0 && !needcopy; i-- )
265 {
266 // these checks handle cases of
267 // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
268 // b) transposed arrays, where _strides[] elements go in non-descending order
269 // c) flipped arrays, where some of _strides[] elements are negative
270 // the _sizes[i] > 1 is needed to avoid spurious copies when NPY_RELAXED_STRIDES is set
271 if( (i == ndims-1 && _sizes[i] > 1 && (size_t)_strides[i] != elemsize) ||
272 (i < ndims-1 && _sizes[i] > 1 && _strides[i] < _strides[i+1]) )
273 needcopy = true;
274 }
275
276 if( ismultichannel && _strides[1] != (npy_intp)elemsize*_sizes[2] )
277 needcopy = true;
278
279 if (needcopy)
280 {
281 //if (info.outputarg)
282 //{
283 // failmsg("Layout of the output array %s is incompatible with cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)", info.name);
284 // return false;
285 //}
286
287 if( needcast ) {
288 o = PyArray_Cast(oarr, new_typenum);
289 oarr = (PyArrayObject*) o;
290 }
291 else {
292 oarr = PyArray_GETCONTIGUOUS(oarr);
293 o = (PyObject*) oarr;
294 }
295
296 _strides = PyArray_STRIDES(oarr);
297 }
298
299 // Normalize strides in case NPY_RELAXED_STRIDES is set
300 size_t default_step = elemsize;
301 for ( int i = ndims - 1; i >= 0; --i )
302 {
303 size[i] = (int)_sizes[i];
304 if ( size[i] > 1 )
305 {
306 step[i] = (size_t)_strides[i];
307 default_step = step[i] * size[i];
308 }
309 else
310 {
311 step[i] = default_step;
312 default_step *= size[i];
313 }
314 }
315
316 // handle degenerate case
317 if( ndims == 0) {
318 size[ndims] = 1;
319 step[ndims] = elemsize;
320 ndims++;
321 }
322
323 if( ismultichannel )
324 {
325 ndims--;
326 type |= CV_MAKETYPE(0, size[2]);
327 }
328
329 if( ndims > 2 && !allowND )
330 {
331 failmsg("%s has more than 2 dimensions", info.name);
332 return false;
333 }
334
335 m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
336 m.u = g_numpyAllocator.allocate(o, ndims, size, type, step);
337 m.addref();
338
339 if( !needcopy )
340 {
341 Py_INCREF(o);
342 }
343 m.allocator = &g_numpyAllocator;
344
345 return true;
346 }
347
toNDArray(const cv::Mat & m)348 PyObject* NDArrayConverter::toNDArray(const cv::Mat& m)
349 {
350 if( !m.data )
351 Py_RETURN_NONE;
352 Mat temp, *p = (Mat*)&m;
353 if(!p->u || p->allocator != &g_numpyAllocator)
354 {
355 temp.allocator = &g_numpyAllocator;
356 ERRWRAP2(m.copyTo(temp));
357 p = &temp;
358 }
359 PyObject* o = (PyObject*)p->u->userdata;
360 Py_INCREF(o);
361 return o;
362 }
363