1 /*
2  *Copyright (c) 2018 Intel Corporation.
3  *
4  *Permission is hereby granted, free of charge, to any person obtaining a copy
5  *of this software and associated documentation files (the "Software"), to deal
6  *in the Software without restriction, including without limitation the rights
7  *to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  *copies of the Software, and to permit persons to whom the Software is
9  *furnished to do so, subject to the following conditions:
10  *
11  *The above copyright notice and this permission notice shall be included in
12  *all copies or substantial portions of the Software.
13  *
14  *THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  *IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  *FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17  *AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  *LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  *OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  *THE SOFTWARE.
21  *
22  */
23 
24 
25 #ifndef _MDARRAY_H_
26 #define _MDARRAY_H_
27 #include <Python.h>
28 #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
29 #include <numpy/arrayobject.h>
30 #include <numpy/ndarraytypes.h>
31 #include <cassert>
32 #include <cstring>
33 #include <iostream>
34 #include <vector>
35 #include <numeric>
36 #include <memory>
37 #include <forward_list>
38 #include <stdexcept>
39 #include <type_traits>
40 #include <swigpyrun.h>
41 #include "ideep.hpp"
42 #include "utils.h"
43 
44 namespace implementation {
45   class mdarray;
46 }
47 
48 class reorderer;
49 
50 using py_handle = std::shared_ptr<implementation::mdarray>;
51 
52 namespace implementation {
53 
54 #if PY_VERSION_HEX >= 0x03000000
55   int g_init();
56 #else
57   void g_init();
58 #endif
59 
60 #define NPY_ARRAY_SURROGATE_ENTRY(mdarray) \
61   PyObject *surrogate = PyArray_FromAny(mdarray, nullptr, 0, 0 \
62       , NPY_ARRAY_ELEMENTSTRIDES, nullptr)   \
63 
64 #define NPY_ARRAY_SURROGATE_EXIT()
65 
66 #define nb_unary_map_impl(method) \
67   PyObject * m_ ## method ## _map_impl(PyObject *self) { \
68     NPY_ARRAY_SURROGATE_ENTRY(self); \
69                                 \
70     if (surrogate == nullptr)   \
71       return nullptr;           \
72                                 \
73     PyObject *res = PyNumber_ ## method(surrogate); \
74     Py_DECREF(surrogate);   \
75     NPY_ARRAY_SURROGATE_EXIT(); \
76     return res;   \
77   } \
78 
79 #define nb_unary_map(method) \
80   nb_unary_map_impl(method) \
81   PyObject * m_ ## method (PyObject *self) {    \
82     return m_ ## method ## _map_impl(self); \
83   } \
84 
85 #define nb_binary_map_impl(method) \
86   PyObject * m_ ## method ## _map_impl(PyObject *self, PyObject *o) {   \
87     PyObject *left = self, *right = o;                                  \
88     if (is_mdarray(left)) {                                             \
89       left = PyArray_FromAny(left, nullptr, 0, 0                        \
90         , NPY_ARRAY_ELEMENTSTRIDES, nullptr);                           \
91     }                                                                   \
92     if (is_mdarray(right)) {                                            \
93       right = PyArray_FromAny(right, nullptr, 0, 0                      \
94         , NPY_ARRAY_ELEMENTSTRIDES, nullptr);                           \
95     }                                                                   \
96     PyObject *res = PyNumber_ ## method(left, right);                   \
97     if (left != self)                                                   \
98       Py_DECREF(left);                                                  \
99     if (right != o)                                                     \
100       Py_DECREF(right);                                                 \
101     return res;                                                         \
102   }
103 
104 #define nb_binary_map_impl_with_target_func(method, tfunc) \
105   PyObject * m_ ## method ## _map_impl(PyObject *self, PyObject *o) {    \
106     NPY_ARRAY_SURROGATE_ENTRY(self); \
107                                 \
108     if (surrogate == nullptr)   \
109       return nullptr;           \
110                                 \
111     PyObject *res = PyNumber_ ## tfunc(surrogate, o); \
112     Py_DECREF(surrogate);   \
113     NPY_ARRAY_SURROGATE_EXIT(); \
114     return res;   \
115   }
116 
117 #define nb_binary_map(method) \
118   nb_binary_map_impl(method) \
119   PyObject * m_ ## method (PyObject *self, PyObject *o) {    \
120     return m_ ## method ## _map_impl(self, o); \
121   } \
122 
123 #define nb_ternary_map_impl(method) \
124   PyObject * m_ ## method ## _map_impl(PyObject *self, PyObject *o1, PyObject *o2) {    \
125     NPY_ARRAY_SURROGATE_ENTRY(self); \
126                                 \
127     if (surrogate == nullptr)   \
128       return nullptr;           \
129                                 \
130     PyObject *res = PyNumber_ ## method(surrogate, o1, o2); \
131     Py_DECREF(surrogate); \
132     NPY_ARRAY_SURROGATE_EXIT(); \
133     return res;   \
134   }
135 
136 #define nb_ternary_map(method) \
137   nb_ternary_map_impl(method) \
138   PyObject * m_ ## method (PyObject *self, PyObject *o1, PyObject *o2) {    \
139     return m_ ## method ## _map_impl(self, o1, o2); \
140   } \
141 
142 // FIXME: Redundant interceptions in lambda []
143 class mdarray : public ideep::tensor {
144 public:
145   using tensor = ideep::tensor;
146   using data_type_t = mkldnn::memory::data_type;
147   using dims_t = mkldnn::memory::dims;
148   using format_t = ideep::format;
149   using error = mkldnn::error;
150   using scratch_allocator = ideep::utils::scratch_allocator;
151   using reorder = ideep::reorder;
152   using convolution_forward = ideep::convolution_forward;
153 
154   static constexpr int MAX_NDIM = 12; //XXX: For now
155 
156   typedef size_t size_type;
157 
158   mdarray() = default;
159   virtual ~mdarray();
160 
161   // Create an memory entity from tensor
162   // mdarray must be an owner of memory. In the case of the ctor,
163   // * It is guaranteed that input tensor is a memory owner. If tensor
164   //   is not a memory owner, please use ctor `mdarray(const mdarray &m)`.
165   // * It is guaranteed that input tensor is a memory entity not a view.
166   //   ALLOWED: tensor(entity) -> mdarray
167   //   NOT-ALLOWED: mdarray(entity) -> mdarray/tensor(view) -> mdarray
168   //   If tensor is a view, please use ctor `mdarray(const mdarray &m)`.
mdarray(const tensor & t)169   mdarray(const tensor &t) :
170       tensor(t),
171       buff_([t]() {
172             if (t.get_tensor_buffer().get() != nullptr) {
173               return t.get_tensor_buffer();
174             } else {
175               throw error(mkldnn_invalid_arguments, std::string(
176                   "mdarray ctor does not support view input"));
177               return std::shared_ptr<char>(nullptr);
178             }
179           } ()),
180       view_(nullptr) {}
181 
182   // Share from a mdarray
183   // * If src mdarray is a memory entity, this mdarray shares the buffer.
184   // * If src mdarray is a memory view, this mdarray shares the view.
185   // this_mdarray->buff_ (src is entity)
186   // this_mdarray->view->rb(other)->data_ (src is view)
mdarray(const mdarray & m)187   mdarray(const mdarray &m) :
188       tensor(m),
189       buff_(m.get_shared_buff()),
190       view_(nullptr) {
191     Py_buffer *view = nullptr;
192     if (m.view_.get()) {
193       // m is view
194       view = new Py_buffer;
195       // No need to modify attributes in view to keep consistence
196       // between view and `this`(array). View in consumer(this) is just a
197       // record of its producer. Hold sharing memory entity `view->obj`
198       // only here. When `this` is a producer, a new view will be created
199       // according to current `this`(array). Refer to `getbuffer`.
200       memcpy((void *)(view), (void *)(m.view_.get()), sizeof(Py_buffer));
201       Py_INCREF(m.view_->obj);
202     } else {
203       // m is entity
204     }
205     view_.reset(view);
206   }
207 
208   // Memory entity created by array attributes
mdarray(dims_t dims,data_type_t dt)209   mdarray(dims_t dims, data_type_t dt) :
210       tensor({dims, dt, [dims]() {
211             return ndims2format(dims.size());
212           } ()}, [&]() {
213             return reinterpret_cast<void *>(
214                 new scratch_allocator::byte<tensor>[dims2size(dims, dt)]);
215           } ()),
216       buff_(std::shared_ptr<char>((char *)get_data_handle<false>(), [](char *p) {
217             auto _p = reinterpret_cast<scratch_allocator::byte<tensor> *>(p);
218             delete [] _p;
219           })),
220       view_(nullptr) {}
221 
222   // Memory view created by producer's view
223   mdarray(Py_buffer *view, char input_type='d') :
224       tensor({[&]() {
225             return dims_t(view->shape, view->shape + view->ndim);
226           } (), [&]() {
227             data_type_t dt;
228             std::string format(view->format);
229             if (std::string::npos != format.find_last_of('f')) {
230               dt = data_type_t::f32;
231             } else if (std::string::npos != format.find_last_of('i')) {
232               dt = data_type_t::s32;
233             } else if (std::string::npos != format.find_last_of('h')) {
234               dt = data_type_t::s16;
235             } else if (std::string::npos != format.find_last_of('b')) {
236               dt = data_type_t::s8;
237             } else if (std::string::npos != format.find_last_of('B')) {
238               dt = data_type_t::u8;
239             } else {
240               throw error(mkldnn_invalid_arguments,
241                   std::string("mdarray does not support data type: ") + format);
242             }
243             return dt;
244           } (), [&]() {
245             return ndims2format(view->ndim, input_type);
246           } ()}, [&]() {
247             void *buf = view->buf;
248             #if 0
249             if ((uint64_t)buf & (_TENSOR_MEM_ALIGNMENT_ - 1)) {
250             #else
251             #define FORCE_CPY true
252             if (FORCE_CPY) {
253             #endif
254               buf = reinterpret_cast<void *>(
255                   new scratch_allocator::byte<tensor>[view->len]);
256               ideep::utils::fast_memcpy((char *)buf, (char *)view->buf, view->len);
257             }
258             return buf;
259           } ()),
260       buff_([&] () {
261             if (get_data_handle<false>() != view->buf) {
262               return std::shared_ptr<char>((char *)get_data_handle<false>(),
263                   [](char *p) {
264                     auto _p =
265                         reinterpret_cast<scratch_allocator::byte<tensor> *>(p);
266                     delete [] _p;
267                   });
268             } else {
269               // Im not the owner of the memory
270               return std::shared_ptr<char>((char *)view->buf, [](char *p) {});
271             }
272           } ()), view_(view) {
273     // Init weight array in prefered format for CNN convolution
274     if (input_type == 'w' && ndims() == 4) {
275       auto desc_in = convolution_forward::
276           expected_weights_descriptor(get_dims(), get_data_type());
277       if (get_descriptor() != desc_in) {
278         auto buf = reinterpret_cast<void *>(
279             new scratch_allocator::byte<tensor>[desc_in.get_size()]);
280         tensor wgt_in = tensor(desc_in, buf);
281         reorder::compute(*this, wgt_in);
282 
283         init(wgt_in.get_descriptor(), wgt_in.get_data_handle<false>());
284 
285         buff_.reset();
286         buff_ = std::shared_ptr<char>((char *)buf, [](char *p) {
287               auto _p = reinterpret_cast<scratch_allocator::byte<tensor> *>(p);
288               delete [] _p;
289             });
290       }
291     }
292 
293     if (view_.get() && get_data_handle<false>() != view->buf) {
294       set_tensor_buffer(buff_);
295       view_.reset();
296     }
297   }
298 
299   static bool is_mdarray(PyObject *o);
300 
301   //FIXME
unpickled_data(void * pdata)302   inline void unpickled_data(void *pdata) {
303     //data_.reset(reinterpret_cast<avx::byte *>(pdata));
304     //m_.set_data_handle(pdata);
305     return;
306   }
307 
308   // PEP 3118 interface
309   int build_view(Py_buffer *view, int flags, const reorderer &reorder);
310 
311   // PyObject *__getstate__(void) const;
312 
313   // void __setstate__(PyObject *state);
314 
315   PyObject *py_mdarray_from(PyObject *o) const;
316 
317   /// d = a * x + b * y, using x's format
318   static void axpby(tensor &dst, float a, const tensor &x, float b, const tensor &y);
319 
320   /// Interface to directly contact python
321   PyObject *axpby(float a, float b, PyObject *o);
322 
323   PyObject *inplace_axpby(float a, PyObject *self, float b, PyObject *o);
324 
325   PyObject *flat(void);
326 
327   void set(PyObject *o);
328 
329   PyObject *reshape(py_handle *self, std::vector<int> dims);
330 
331   PyObject *m_mult_div(PyObject *self, PyObject *o, int mult_or_div, bool inplace);
332 
333   PyObject *sum(std::vector<int> axis, bool keepdims);
334 
335   // PEP: 3118 Buffer Protocol Producer
336   virtual int getbuffer(PyObject *self, Py_buffer *view, int flags);
337 
338   PyObject *getattro(PyObject *self, PyObject *name);
339 
340   PyObject *m_Add(PyObject *self, PyObject *o);
341   nb_binary_map_impl(Add);
342   PyObject *m_InPlaceAdd(PyObject *self, PyObject *o);
343   nb_binary_map_impl(InPlaceAdd);
344   PyObject *m_Subtract(PyObject *self, PyObject *o);
345   nb_binary_map_impl(Subtract);
346   PyObject *m_InPlaceSubtract(PyObject *self, PyObject *o);
347   nb_binary_map_impl(InPlaceSubtract);
348   PyObject *m_Multiply(PyObject *self, PyObject *o);
349   nb_binary_map_impl(Multiply);
350   PyObject *m_InPlaceMultiply(PyObject *self, PyObject *o);
351   nb_binary_map_impl(InPlaceMultiply);
352   // SWIG: nb_true_divide (no slot) <= nb_divide
353   PyObject *m_Divide(PyObject *self, PyObject *o);
354 #if PY_VERSION_HEX < 0x03000000
355   nb_binary_map_impl(Divide);
356 #else
357   nb_binary_map_impl_with_target_func(Divide, TrueDivide);
358 #endif
359   PyObject *m_InPlaceDivide(PyObject *self, PyObject *o);
360 #if PY_VERSION_HEX < 0x03000000
361   nb_binary_map_impl(InPlaceDivide);
362 #else
363   nb_binary_map_impl_with_target_func(InPlaceDivide, InPlaceTrueDivide);
364 #endif
365 
366   nb_binary_map(Remainder);
367   nb_binary_map(Divmod);
368   nb_unary_map(Negative);
369   nb_unary_map(Positive);
370   nb_unary_map(Absolute);
371   nb_unary_map(Invert);
372   nb_binary_map(Lshift);
373   nb_binary_map(Rshift);
374   nb_binary_map(And);
375   nb_binary_map(Xor);
376   nb_binary_map(Or);
377   nb_binary_map(InPlaceRemainder);
378   nb_ternary_map(InPlacePower);
379   nb_binary_map(InPlaceLshift);
380   nb_binary_map(InPlaceRshift);
381   nb_binary_map(InPlaceAnd);
382   nb_binary_map(InPlaceXor);
383   nb_binary_map(InPlaceOr);
384   nb_binary_map(FloorDivide);
385   nb_binary_map(InPlaceFloorDivide);
386 #if (PY_VERSION_HEX >= 0x03000000)
387   nb_binary_map(MatrixMultiply);
388   nb_binary_map(InPlaceMatrixMultiply);
389 #endif
390 
391   Py_ssize_t mp_length(PyObject *self);
392   PyObject *mp_subscript(PyObject *self, PyObject *op);
393   int mp_ass_subscript(PyObject *self, PyObject *ind, PyObject *op);
394 
get_tensor()395   inline tensor &get_tensor() { return *this; }
396 
reset_tensor(tensor & dst)397   inline void reset_tensor(tensor &dst) {
398       init(dst.get_descriptor(), dst.get_data_handle<false>()); }
399 
get_shared_buff()400   inline std::shared_ptr<char> get_shared_buff() const { return buff_; }
set_shared_buff(std::shared_ptr<char> & buff)401   inline void set_shared_buff(std::shared_ptr<char>& buff) { buff_ = buff; }
402 
403 private:
dims2size(dims_t & dims,data_type_t dt)404   static inline size_t dims2size(dims_t &dims, data_type_t dt) {
405     size_t itemsize;
406     switch(dt) {
407     case data_type_t::f32:
408     case data_type_t::s32:
409       itemsize = 4;
410       break;
411     case data_type_t::s16:
412       itemsize = 2;
413       break;
414     case data_type_t::u8:
415     case data_type_t::s8:
416       itemsize = 1;
417       break;
418     default:
419       throw error(mkldnn_invalid_arguments, std::string(
420           "mdarray does not support data type: ") + std::to_string(dt));
421     }
422 
423     size_t nelems = 1;
424     for (unsigned d = 0; d < dims.size(); d++)
425       nelems *= dims[d];
426 
427     return nelems * itemsize;
428   }
429 
430   static inline
431   format_t ndims2format(int ndims, char input_type = 'd')
432   {
433     switch (ndims) {
434     case 1:
435       return format_t::x;
436     case 2:
437       return (input_type == 'd') ? format_t::nc : format_t::oi;
438     case 4:
439       return (input_type == 'd') ? format_t::nchw : format_t::oihw;
440     default:
441       throw error(mkldnn_invalid_arguments, std::string(
442           "MKLDNN does not support dimensions") + std::to_string(ndims));
443       return format_t::format_undef;
444     }
445   }
446 
get_view_shape()447   inline ssize_t *get_view_shape() {
448     static ssize_t shape[MAX_NDIM];
449     auto dims = get_dims();
450     for (int d = 0; d < ndims(); d++)
451       shape[d] = dims[d];
452 
453     return shape;
454   }
455 
get_view_strides(ssize_t itemsize)456   inline ssize_t *get_view_strides(ssize_t itemsize) {
457     static ssize_t strides[MAX_NDIM];
458     ssize_t sd = itemsize;
459     for (int d = ndims() - 1; d >= 0; --d) {
460       strides[d] = sd;
461       sd *= get_dims()[d];
462     }
463 
464     return strides;
465   }
466 
get_view_itemsize()467   inline ssize_t get_view_itemsize() {
468     ssize_t itemsize;
469     switch(get_data_type()) {
470     case data_type_t::f32: itemsize = 4; break;
471     case data_type_t::s32: itemsize = 4; break;
472     case data_type_t::s16: itemsize = 2; break;
473     case data_type_t::s8: itemsize = 1; break;
474     case data_type_t::u8: itemsize = 1; break;
475     default:
476       throw error(mkldnn_invalid_arguments,
477           std::string("get_view_itemsize, unsupport data type"));
478       break;
479     }
480     return itemsize;
481   }
482 
get_view_format()483   inline char *get_view_format() {
484     static char format[4];
485     switch(get_data_type()) {
486     case data_type_t::f32: strcpy(format, "f"); break;
487     case data_type_t::s32: strcpy(format, "i"); break;
488     case data_type_t::s16: strcpy(format, "h"); break;
489     case data_type_t::s8: strcpy(format, "b"); break;
490     case data_type_t::u8: strcpy(format, "B"); break;
491     default:
492       throw error(mkldnn_invalid_arguments,
493           std::string("get_view_format, unsupport data type"));
494       break;
495     }
496     return format;
497   }
498 
499   struct view_manager {
operatorview_manager500     void operator() (const Py_buffer *view) const {
501       PyBuffer_Release(const_cast<Py_buffer *>(view));
502       delete view;
503     }
504 
505   };
506 
507   std::shared_ptr<char> buff_;
508   std::unique_ptr<const Py_buffer, view_manager> view_;
509 };
510 }
511 
512 // `reorderer` is designed from iDeep internal format.
513 // `reorderer` also is considered as a memory holder, when memory shareing
514 // request from python buffer protocol. `reorderer` will be descreased or
515 // deleted by protocol consumer, when related view releases. Memory entity
516 // mdarray always creates new `reorderer` to consumer, and memory view
517 // mdarray always shares the `reorderer` in view to consumer.
518 class reorderer {
519 public:
520   using tensor = ideep::tensor;
521   using data_type_t = mkldnn::memory::data_type;
522   using format_t = ideep::format;
523   using reorder = ideep::reorder;
524   using descriptor = tensor::descriptor;
525   using scratch_allocator = ideep::utils::scratch_allocator;
526   using mdarray = implementation::mdarray;
527 
528   bool non_trivial_;
529   std::shared_ptr<char> data_;
530 
data()531   inline void *data() const {
532     return reinterpret_cast<void *>(data_.get());
533   }
534 
535 public:
reorderer(const mdarray & src)536   reorderer(const mdarray &src) :
537       non_trivial_(!src.is_public_format()) {
538     // Sync data explicitly before sharing data with numpy array
539     (void)src.get_data_handle<true>();
540     if (non_trivial()) {
541       data_ = std::shared_ptr<char>(reinterpret_cast<char *>(
542           new scratch_allocator::byte<tensor>[src.get_size()]),
543           [](char *p) {
544             auto _p = reinterpret_cast<scratch_allocator::byte<tensor> *>(p);
545             delete [] _p;
546           });
547     } else {
548       data_ = src.get_shared_buff();
549     }
550   }
551 
fire(const mdarray & src)552   void fire(const mdarray &src) {
553     if (non_trivial()) {
554       tensor dst;
555       dst.init({src.get_dims(), src.get_data_type(),
556           descriptor::public_compatible_format(src.get_descriptor())},
557           (void *)data_.get());
558       reorder::compute(src, dst);
559     }
560   }
561 
non_trivial()562   inline bool non_trivial() const {
563     return non_trivial_;
564   }
565 };
566 
567 class mdarray : public py_handle {
568 public:
569   using tensor = ideep::tensor;
570   using data_type_t = mkldnn::memory::data_type;
571 
mdarray()572   mdarray() {};
573 
mdarray(tensor & tensor)574   mdarray(tensor &tensor) :
575       py_handle(std::make_shared<implementation::mdarray>(tensor)) {}
576 
mdarray(mkldnn::memory::dims & dims,mkldnn::memory::data_type dt)577   mdarray(mkldnn::memory::dims &dims, mkldnn::memory::data_type dt) :
578       py_handle(std::make_shared<implementation::mdarray>(dims, dt)) {}
579 
580   mdarray(Py_buffer *view, char input_type='d') :
py_handle(std::make_shared<implementation::mdarray> (view,input_type))581       py_handle(std::make_shared<implementation::mdarray>(view, input_type)) {}
582 
mdarray_shape_get(mdarray * self)583   static PyObject *mdarray_shape_get(mdarray *self) {
584     implementation::mdarray *m = self->get();
585     auto dims = m->get_dims();
586     auto ndims = m->ndims();
587     PyObject *intTuple = PyTuple_New(ndims);
588 
589     if (!intTuple)
590       goto fail;
591 
592     for (int i = 0; i < ndims; i++) {
593       PyObject *o = PyLong_FromLong(dims[i]);
594 
595       if (!o) {
596         Py_DECREF(intTuple);
597         intTuple = NULL;
598         goto fail;
599       }
600 
601       PyTuple_SET_ITEM(intTuple, i, o);
602     }
603 
604     fail:
605       return intTuple;
606   }
607 
mdarray_dtype_get(mdarray * self)608   static PyObject *mdarray_dtype_get(mdarray *self) {
609     implementation::mdarray *m = self->get();
610     PyArray_Descr *pd;
611 
612     // Translate our data_type to numpy one
613     switch (m->get_data_type()) {
614     case data_type_t::f32:
615       pd = PyArray_DescrFromType(NPY_FLOAT);
616       break;
617     case data_type_t::s32:
618       pd= PyArray_DescrFromType(NPY_INT);
619       break;
620     case data_type_t::s16:
621       pd= PyArray_DescrFromType(NPY_INT16);
622       break;
623     case data_type_t::s8:
624       pd= PyArray_DescrFromType(NPY_INT8);
625       break;
626     case data_type_t::u8:
627       pd= PyArray_DescrFromType(NPY_UINT8);
628       break;
629     default:
630       PyErr_SetString(PyExc_ValueError, "Bad mdarray data_type");
631       return nullptr;
632     }
633 
634     return reinterpret_cast<PyObject *>(pd);
635   }
636 
mdarray_size_get(mdarray * self)637   static long mdarray_size_get(mdarray *self) {
638     return self->get()->get_nelems();
639   }
640 
mdarray_ndim_get(mdarray * self)641   static long mdarray_ndim_get(mdarray *self) {
642     return self->get()->ndims();
643   }
644 
mdarray_is_mdarray_get(mdarray * self)645   static bool mdarray_is_mdarray_get(mdarray *self) {
646     return true;
647   }
648 };
649 
650 class reorder_buffer : reorderer {
651 public:
reorder_buffer(const py_handle in)652   reorder_buffer(const py_handle in) :
653     reorderer(*in.get()) {}
654 };
655 
656 #endif // _MDARRAY_H_
657