1 ////////// MemviewSliceStruct.proto //////////
2 //@proto_block: utility_code_proto_before_types
3 
4 /* memoryview slice struct */
5 struct {{memview_struct_name}};
6 
7 typedef struct {
8   struct {{memview_struct_name}} *memview;
9   char *data;
10   Py_ssize_t shape[{{max_dims}}];
11   Py_ssize_t strides[{{max_dims}}];
12   Py_ssize_t suboffsets[{{max_dims}}];
13 } {{memviewslice_name}};
14 
15 // used for "len(memviewslice)"
16 #define __Pyx_MemoryView_Len(m)  (m.shape[0])
17 
18 
19 /////////// Atomics.proto /////////////
20 //@proto_block: utility_code_proto_before_types
21 
22 #include <pythread.h>
23 
24 #ifndef CYTHON_ATOMICS
25     #define CYTHON_ATOMICS 1
26 #endif
27 
28 #define __pyx_atomic_int_type int
29 // todo: Portland pgcc, maybe OS X's OSAtomicIncrement32,
30 //       libatomic + autotools-like distutils support? Such a pain...
31 #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||           \
32                     (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \
33                     !defined(__i386__)
34     /* gcc >= 4.1.2 */
35     #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
36     #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
37 
38     #ifdef __PYX_DEBUG_ATOMICS
39         #warning "Using GNU atomics"
40     #endif
41 #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
42     /* msvc */
43     #include <Windows.h>
44     #undef __pyx_atomic_int_type
45     #define __pyx_atomic_int_type LONG
46     #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
47     #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
48 
49     #ifdef __PYX_DEBUG_ATOMICS
50         #pragma message ("Using MSVC atomics")
51     #endif
52 #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
53     #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
54     #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
55 
56     #ifdef __PYX_DEBUG_ATOMICS
57         #warning "Using Intel atomics"
58     #endif
59 #else
60     #undef CYTHON_ATOMICS
61     #define CYTHON_ATOMICS 0
62 
63     #ifdef __PYX_DEBUG_ATOMICS
64         #warning "Not using atomics"
65     #endif
66 #endif
67 
68 typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
69 
70 #if CYTHON_ATOMICS
71     #define __pyx_add_acquisition_count(memview) \
72              __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
73     #define __pyx_sub_acquisition_count(memview) \
74             __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
75 #else
76     #define __pyx_add_acquisition_count(memview) \
77             __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
78     #define __pyx_sub_acquisition_count(memview) \
79             __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
80 #endif
81 
82 
83 /////////////// ObjectToMemviewSlice.proto ///////////////
84 
85 static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *, int writable_flag);
86 
87 
88 ////////// MemviewSliceInit.proto //////////
89 
90 #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
91 
92 #define __Pyx_MEMVIEW_DIRECT   1
93 #define __Pyx_MEMVIEW_PTR      2
94 #define __Pyx_MEMVIEW_FULL     4
95 #define __Pyx_MEMVIEW_CONTIG   8
96 #define __Pyx_MEMVIEW_STRIDED  16
97 #define __Pyx_MEMVIEW_FOLLOW   32
98 
99 #define __Pyx_IS_C_CONTIG 1
100 #define __Pyx_IS_F_CONTIG 2
101 
102 static int __Pyx_init_memviewslice(
103                 struct __pyx_memoryview_obj *memview,
104                 int ndim,
105                 __Pyx_memviewslice *memviewslice,
106                 int memview_is_new_reference);
107 
108 static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
109     __pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
110 static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
111     __pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
112 
113 #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
114 #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
115 #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
116 #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
117 static CYTHON_INLINE void __Pyx_INC_MEMVIEW({{memviewslice_name}} *, int, int);
118 static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *, int, int);
119 
120 
121 /////////////// MemviewSliceIndex.proto ///////////////
122 
123 static CYTHON_INLINE char *__pyx_memviewslice_index_full(
124     const char *bufp, Py_ssize_t idx, Py_ssize_t stride, Py_ssize_t suboffset);
125 
126 
127 /////////////// ObjectToMemviewSlice ///////////////
128 //@requires: MemviewSliceValidateAndInit
129 
130 static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj, int writable_flag) {
131     {{memviewslice_name}} result = {{memslice_init}};
132     __Pyx_BufFmt_StackElem stack[{{struct_nesting_depth}}];
133     int axes_specs[] = { {{axes_specs}} };
134     int retcode;
135 
136     if (obj == Py_None) {
137         /* We don't bother to refcount None */
138         result.memview = (struct __pyx_memoryview_obj *) Py_None;
139         return result;
140     }
141 
142     retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, {{c_or_f_flag}},
143                                                  {{buf_flag}} | writable_flag, {{ndim}},
144                                                  &{{dtype_typeinfo}}, stack,
145                                                  &result, obj);
146 
147     if (unlikely(retcode == -1))
148         goto __pyx_fail;
149 
150     return result;
151 __pyx_fail:
152     result.memview = NULL;
153     result.data = NULL;
154     return result;
155 }
156 
157 
158 /////////////// MemviewSliceValidateAndInit.proto ///////////////
159 
160 static int __Pyx_ValidateAndInit_memviewslice(
161                 int *axes_specs,
162                 int c_or_f_flag,
163                 int buf_flags,
164                 int ndim,
165                 __Pyx_TypeInfo *dtype,
166                 __Pyx_BufFmt_StackElem stack[],
167                 __Pyx_memviewslice *memviewslice,
168                 PyObject *original_obj);
169 
170 /////////////// MemviewSliceValidateAndInit ///////////////
171 //@requires: Buffer.c::TypeInfoCompare
172 //@requires: Buffer.c::BufferFormatStructs
173 //@requires: Buffer.c::BufferFormatCheck
174 
175 static int
176 __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
177 {
178     if (buf->shape[dim] <= 1)
179         return 1;
180 
181     if (buf->strides) {
182         if (spec & __Pyx_MEMVIEW_CONTIG) {
183             if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
184                 if (unlikely(buf->strides[dim] != sizeof(void *))) {
185                     PyErr_Format(PyExc_ValueError,
186                                  "Buffer is not indirectly contiguous "
187                                  "in dimension %d.", dim);
188                     goto fail;
189                 }
190             } else if (unlikely(buf->strides[dim] != buf->itemsize)) {
191                 PyErr_SetString(PyExc_ValueError,
192                                 "Buffer and memoryview are not contiguous "
193                                 "in the same dimension.");
194                 goto fail;
195             }
196         }
197 
198         if (spec & __Pyx_MEMVIEW_FOLLOW) {
199             Py_ssize_t stride = buf->strides[dim];
200             if (stride < 0)
201                 stride = -stride;
202             if (unlikely(stride < buf->itemsize)) {
203                 PyErr_SetString(PyExc_ValueError,
204                                 "Buffer and memoryview are not contiguous "
205                                 "in the same dimension.");
206                 goto fail;
207             }
208         }
209     } else {
210         if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
211             PyErr_Format(PyExc_ValueError,
212                          "C-contiguous buffer is not contiguous in "
213                          "dimension %d", dim);
214             goto fail;
215         } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
216             PyErr_Format(PyExc_ValueError,
217                          "C-contiguous buffer is not indirect in "
218                          "dimension %d", dim);
219             goto fail;
220         } else if (unlikely(buf->suboffsets)) {
221             PyErr_SetString(PyExc_ValueError,
222                             "Buffer exposes suboffsets but no strides");
223             goto fail;
224         }
225     }
226 
227     return 1;
228 fail:
229     return 0;
230 }
231 
232 static int
233 __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
234 {
235     // Todo: without PyBUF_INDIRECT we may not have suboffset information, i.e., the
236     //       ptr may not be set to NULL but may be uninitialized?
237     if (spec & __Pyx_MEMVIEW_DIRECT) {
238         if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
239             PyErr_Format(PyExc_ValueError,
240                          "Buffer not compatible with direct access "
241                          "in dimension %d.", dim);
242             goto fail;
243         }
244     }
245 
246     if (spec & __Pyx_MEMVIEW_PTR) {
247         if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
248             PyErr_Format(PyExc_ValueError,
249                          "Buffer is not indirectly accessible "
250                          "in dimension %d.", dim);
251             goto fail;
252         }
253     }
254 
255     return 1;
256 fail:
257     return 0;
258 }
259 
260 static int
261 __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
262 {
263     int i;
264 
265     if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
266         Py_ssize_t stride = 1;
267         for (i = 0; i < ndim; i++) {
268             if (unlikely(stride * buf->itemsize != buf->strides[i]  &&  buf->shape[i] > 1)) {
269                 PyErr_SetString(PyExc_ValueError,
270                     "Buffer not fortran contiguous.");
271                 goto fail;
272             }
273             stride = stride * buf->shape[i];
274         }
275     } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
276         Py_ssize_t stride = 1;
277         for (i = ndim - 1; i >- 1; i--) {
278             if (unlikely(stride * buf->itemsize != buf->strides[i]  &&  buf->shape[i] > 1)) {
279                 PyErr_SetString(PyExc_ValueError,
280                     "Buffer not C contiguous.");
281                 goto fail;
282             }
283             stride = stride * buf->shape[i];
284         }
285     }
286 
287     return 1;
288 fail:
289     return 0;
290 }
291 
292 static int __Pyx_ValidateAndInit_memviewslice(
293                 int *axes_specs,
294                 int c_or_f_flag,
295                 int buf_flags,
296                 int ndim,
297                 __Pyx_TypeInfo *dtype,
298                 __Pyx_BufFmt_StackElem stack[],
299                 __Pyx_memviewslice *memviewslice,
300                 PyObject *original_obj)
301 {
302     struct __pyx_memoryview_obj *memview, *new_memview;
303     __Pyx_RefNannyDeclarations
304     Py_buffer *buf;
305     int i, spec = 0, retval = -1;
306     __Pyx_BufFmt_Context ctx;
307     int from_memoryview = __pyx_memoryview_check(original_obj);
308 
309     __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
310 
311     if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
312                                                             original_obj)->typeinfo)) {
313         /* We have a matching dtype, skip format parsing */
314         memview = (struct __pyx_memoryview_obj *) original_obj;
315         new_memview = NULL;
316     } else {
317         memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
318                                             original_obj, buf_flags, 0, dtype);
319         new_memview = memview;
320         if (unlikely(!memview))
321             goto fail;
322     }
323 
324     buf = &memview->view;
325     if (unlikely(buf->ndim != ndim)) {
326         PyErr_Format(PyExc_ValueError,
327                 "Buffer has wrong number of dimensions (expected %d, got %d)",
328                 ndim, buf->ndim);
329         goto fail;
330     }
331 
332     if (new_memview) {
333         __Pyx_BufFmt_Init(&ctx, stack, dtype);
334         if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
335     }
336 
337     if (unlikely((unsigned) buf->itemsize != dtype->size)) {
338         PyErr_Format(PyExc_ValueError,
339                      "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
340                      "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
341                      buf->itemsize,
342                      (buf->itemsize > 1) ? "s" : "",
343                      dtype->name,
344                      dtype->size,
345                      (dtype->size > 1) ? "s" : "");
346         goto fail;
347     }
348 
349     /* Check axes */
350     if (buf->len > 0) {
351         // 0-sized arrays do not undergo these checks since their strides are
352         // irrelevant and they are always both C- and F-contiguous.
353         for (i = 0; i < ndim; i++) {
354             spec = axes_specs[i];
355             if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
356                 goto fail;
357             if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
358                 goto fail;
359         }
360 
361         /* Check contiguity */
362         if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
363             goto fail;
364     }
365 
366     /* Initialize */
367     if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
368                                          new_memview != NULL) == -1)) {
369         goto fail;
370     }
371 
372     retval = 0;
373     goto no_fail;
374 
375 fail:
376     Py_XDECREF(new_memview);
377     retval = -1;
378 
379 no_fail:
380     __Pyx_RefNannyFinishContext();
381     return retval;
382 }
383 
384 
385 ////////// MemviewSliceInit //////////
386 
387 static int
388 __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
389                         int ndim,
390                         {{memviewslice_name}} *memviewslice,
391                         int memview_is_new_reference)
392 {
393     __Pyx_RefNannyDeclarations
394     int i, retval=-1;
395     Py_buffer *buf = &memview->view;
396     __Pyx_RefNannySetupContext("init_memviewslice", 0);
397 
398     if (unlikely(memviewslice->memview || memviewslice->data)) {
399         PyErr_SetString(PyExc_ValueError,
400             "memviewslice is already initialized!");
401         goto fail;
402     }
403 
404     if (buf->strides) {
405         for (i = 0; i < ndim; i++) {
406             memviewslice->strides[i] = buf->strides[i];
407         }
408     } else {
409         Py_ssize_t stride = buf->itemsize;
410         for (i = ndim - 1; i >= 0; i--) {
411             memviewslice->strides[i] = stride;
412             stride *= buf->shape[i];
413         }
414     }
415 
416     for (i = 0; i < ndim; i++) {
417         memviewslice->shape[i]   = buf->shape[i];
418         if (buf->suboffsets) {
419             memviewslice->suboffsets[i] = buf->suboffsets[i];
420         } else {
421             memviewslice->suboffsets[i] = -1;
422         }
423     }
424 
425     memviewslice->memview = memview;
426     memviewslice->data = (char *)buf->buf;
427     if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
428         Py_INCREF(memview);
429     }
430     retval = 0;
431     goto no_fail;
432 
433 fail:
434     /* Don't decref, the memoryview may be borrowed. Let the caller do the cleanup */
435     /* __Pyx_XDECREF(memviewslice->memview); */
436     memviewslice->memview = 0;
437     memviewslice->data = 0;
438     retval = -1;
439 no_fail:
440     __Pyx_RefNannyFinishContext();
441     return retval;
442 }
443 
444 #ifndef Py_NO_RETURN
445 // available since Py3.3
446 #define Py_NO_RETURN
447 #endif
448 
449 static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
450     va_list vargs;
451     char msg[200];
452 
453 #ifdef HAVE_STDARG_PROTOTYPES
454     va_start(vargs, fmt);
455 #else
456     va_start(vargs);
457 #endif
458     vsnprintf(msg, 200, fmt, vargs);
459     va_end(vargs);
460 
461     Py_FatalError(msg);
462 }
463 
464 static CYTHON_INLINE int
465 __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
466                                    PyThread_type_lock lock)
467 {
468     int result;
469     PyThread_acquire_lock(lock, 1);
470     result = (*acquisition_count)++;
471     PyThread_release_lock(lock);
472     return result;
473 }
474 
475 static CYTHON_INLINE int
476 __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
477                                    PyThread_type_lock lock)
478 {
479     int result;
480     PyThread_acquire_lock(lock, 1);
481     result = (*acquisition_count)--;
482     PyThread_release_lock(lock);
483     return result;
484 }
485 
486 
487 static CYTHON_INLINE void
488 __Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil, int lineno)
489 {
490     int first_time;
491     struct {{memview_struct_name}} *memview = memslice->memview;
492     if (unlikely(!memview || (PyObject *) memview == Py_None))
493         return; /* allow uninitialized memoryview assignment */
494 
495     if (unlikely(__pyx_get_slice_count(memview) < 0))
496         __pyx_fatalerror("Acquisition count is %d (line %d)",
497                          __pyx_get_slice_count(memview), lineno);
498 
499     first_time = __pyx_add_acquisition_count(memview) == 0;
500 
501     if (unlikely(first_time)) {
502         if (have_gil) {
503             Py_INCREF((PyObject *) memview);
504         } else {
505             PyGILState_STATE _gilstate = PyGILState_Ensure();
506             Py_INCREF((PyObject *) memview);
507             PyGILState_Release(_gilstate);
508         }
509     }
510 }
511 
512 static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *memslice,
513                                              int have_gil, int lineno) {
514     int last_time;
515     struct {{memview_struct_name}} *memview = memslice->memview;
516 
517     if (unlikely(!memview || (PyObject *) memview == Py_None)) {
518         // we do not ref-count None
519         memslice->memview = NULL;
520         return;
521     }
522 
523     if (unlikely(__pyx_get_slice_count(memview) <= 0))
524         __pyx_fatalerror("Acquisition count is %d (line %d)",
525                          __pyx_get_slice_count(memview), lineno);
526 
527     last_time = __pyx_sub_acquisition_count(memview) == 1;
528     memslice->data = NULL;
529 
530     if (unlikely(last_time)) {
531         if (have_gil) {
532             Py_CLEAR(memslice->memview);
533         } else {
534             PyGILState_STATE _gilstate = PyGILState_Ensure();
535             Py_CLEAR(memslice->memview);
536             PyGILState_Release(_gilstate);
537         }
538     } else {
539         memslice->memview = NULL;
540     }
541 }
542 
543 
544 ////////// MemviewSliceCopyTemplate.proto //////////
545 
546 static {{memviewslice_name}}
547 __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
548                                  const char *mode, int ndim,
549                                  size_t sizeof_dtype, int contig_flag,
550                                  int dtype_is_object);
551 
552 
553 ////////// MemviewSliceCopyTemplate //////////
554 
555 static {{memviewslice_name}}
556 __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
557                                  const char *mode, int ndim,
558                                  size_t sizeof_dtype, int contig_flag,
559                                  int dtype_is_object)
560 {
561     __Pyx_RefNannyDeclarations
562     int i;
563     __Pyx_memviewslice new_mvs = {{memslice_init}};
564     struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
565     Py_buffer *buf = &from_memview->view;
566     PyObject *shape_tuple = NULL;
567     PyObject *temp_int = NULL;
568     struct __pyx_array_obj *array_obj = NULL;
569     struct __pyx_memoryview_obj *memview_obj = NULL;
570 
571     __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
572 
573     for (i = 0; i < ndim; i++) {
574         if (unlikely(from_mvs->suboffsets[i] >= 0)) {
575             PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
576                                            "indirect dimensions (axis %d)", i);
577             goto fail;
578         }
579     }
580 
581     shape_tuple = PyTuple_New(ndim);
582     if (unlikely(!shape_tuple)) {
583         goto fail;
584     }
585     __Pyx_GOTREF(shape_tuple);
586 
587 
588     for(i = 0; i < ndim; i++) {
589         temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
590         if(unlikely(!temp_int)) {
591             goto fail;
592         } else {
593             PyTuple_SET_ITEM(shape_tuple, i, temp_int);
594             temp_int = NULL;
595         }
596     }
597 
598     array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
599     if (unlikely(!array_obj)) {
600         goto fail;
601     }
602     __Pyx_GOTREF(array_obj);
603 
604     memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
605                                     (PyObject *) array_obj, contig_flag,
606                                     dtype_is_object,
607                                     from_mvs->memview->typeinfo);
608     if (unlikely(!memview_obj))
609         goto fail;
610 
611     /* initialize new_mvs */
612     if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
613         goto fail;
614 
615     if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
616                                                 dtype_is_object) < 0))
617         goto fail;
618 
619     goto no_fail;
620 
621 fail:
622     __Pyx_XDECREF(new_mvs.memview);
623     new_mvs.memview = NULL;
624     new_mvs.data = NULL;
625 no_fail:
626     __Pyx_XDECREF(shape_tuple);
627     __Pyx_XDECREF(temp_int);
628     __Pyx_XDECREF(array_obj);
629     __Pyx_RefNannyFinishContext();
630     return new_mvs;
631 }
632 
633 
634 ////////// CopyContentsUtility.proto /////////
635 
636 #define {{func_cname}}(slice) \
637         __pyx_memoryview_copy_new_contig(&slice, "{{mode}}", {{ndim}},            \
638                                          sizeof({{dtype_decl}}), {{contig_flag}}, \
639                                          {{dtype_is_object}})
640 
641 
642 ////////// OverlappingSlices.proto //////////
643 
644 static int __pyx_slices_overlap({{memviewslice_name}} *slice1,
645                                 {{memviewslice_name}} *slice2,
646                                 int ndim, size_t itemsize);
647 
648 
649 ////////// OverlappingSlices //////////
650 
651 /* Based on numpy's core/src/multiarray/array_assign.c */
652 
653 /* Gets a half-open range [start, end) which contains the array data */
654 static void
655 __pyx_get_array_memory_extents({{memviewslice_name}} *slice,
656                                void **out_start, void **out_end,
657                                int ndim, size_t itemsize)
658 {
659     char *start, *end;
660     int i;
661 
662     start = end = slice->data;
663 
664     for (i = 0; i < ndim; i++) {
665         Py_ssize_t stride = slice->strides[i];
666         Py_ssize_t extent = slice->shape[i];
667 
668         if (extent == 0) {
669             *out_start = *out_end = start;
670             return;
671         } else {
672             if (stride > 0)
673                 end += stride * (extent - 1);
674             else
675                 start += stride * (extent - 1);
676         }
677     }
678 
679     /* Return a half-open range */
680     *out_start = start;
681     *out_end = end + itemsize;
682 }
683 
684 /* Returns 1 if the arrays have overlapping data, 0 otherwise */
685 static int
686 __pyx_slices_overlap({{memviewslice_name}} *slice1,
687                      {{memviewslice_name}} *slice2,
688                      int ndim, size_t itemsize)
689 {
690     void *start1, *end1, *start2, *end2;
691 
692     __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
693     __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
694 
695     return (start1 < end2) && (start2 < end1);
696 }
697 
698 
699 ////////// MemviewSliceCheckContig.proto //////////
700 
701 #define __pyx_memviewslice_is_contig_{{contig_type}}{{ndim}}(slice) \
702     __pyx_memviewslice_is_contig(slice, '{{contig_type}}', {{ndim}})
703 
704 
705 ////////// MemviewSliceIsContig.proto //////////
706 
707 static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int ndim);/*proto*/
708 
709 
710 ////////// MemviewSliceIsContig //////////
711 
712 static int
713 __pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int ndim)
714 {
715     int i, index, step, start;
716     Py_ssize_t itemsize = mvs.memview->view.itemsize;
717 
718     if (order == 'F') {
719         step = 1;
720         start = 0;
721     } else {
722         step = -1;
723         start = ndim - 1;
724     }
725 
726     for (i = 0; i < ndim; i++) {
727         index = start + step * i;
728         if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
729             return 0;
730 
731         itemsize *= mvs.shape[index];
732     }
733 
734     return 1;
735 }
736 
737 
738 /////////////// MemviewSliceIndex ///////////////
739 
740 static CYTHON_INLINE char *
741 __pyx_memviewslice_index_full(const char *bufp, Py_ssize_t idx,
742                               Py_ssize_t stride, Py_ssize_t suboffset)
743 {
744     bufp = bufp + idx * stride;
745     if (suboffset >= 0) {
746         bufp = *((char **) bufp) + suboffset;
747     }
748     return (char *) bufp;
749 }
750 
751 
752 /////////////// MemviewDtypeToObject.proto ///////////////
753 
754 {{if to_py_function}}
755 static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp); /* proto */
756 {{endif}}
757 
758 {{if from_py_function}}
759 static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj); /* proto */
760 {{endif}}
761 
762 /////////////// MemviewDtypeToObject ///////////////
763 
764 {{#__pyx_memview_<dtype_name>_to_object}}
765 
766 /* Convert a dtype to or from a Python object */
767 
768 {{if to_py_function}}
769 static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp) {
770     return (PyObject *) {{to_py_function}}(*({{dtype}} *) itemp);
771 }
772 {{endif}}
773 
774 {{if from_py_function}}
775 static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj) {
776     {{dtype}} value = {{from_py_function}}(obj);
777     if ({{error_condition}})
778         return 0;
779     *({{dtype}} *) itemp = value;
780     return 1;
781 }
782 {{endif}}
783 
784 
785 /////////////// MemviewObjectToObject.proto ///////////////
786 
787 /* Function callbacks (for memoryview object) for dtype object */
788 static PyObject *{{get_function}}(const char *itemp); /* proto */
789 static int {{set_function}}(const char *itemp, PyObject *obj); /* proto */
790 
791 
792 /////////////// MemviewObjectToObject ///////////////
793 
794 static PyObject *{{get_function}}(const char *itemp) {
795     PyObject *result = *(PyObject **) itemp;
796     Py_INCREF(result);
797     return result;
798 }
799 
800 static int {{set_function}}(const char *itemp, PyObject *obj) {
801     Py_INCREF(obj);
802     Py_DECREF(*(PyObject **) itemp);
803     *(PyObject **) itemp = obj;
804     return 1;
805 }
806 
807 /////////// ToughSlice //////////
808 
809 /* Dimension is indexed with 'start:stop:step' */
810 
811 if (unlikely(__pyx_memoryview_slice_memviewslice(
812     &{{dst}},
813     {{src}}.shape[{{dim}}], {{src}}.strides[{{dim}}], {{src}}.suboffsets[{{dim}}],
814     {{dim}},
815     {{new_ndim}},
816     &{{get_suboffset_dim()}},
817     {{start}},
818     {{stop}},
819     {{step}},
820     {{int(have_start)}},
821     {{int(have_stop)}},
822     {{int(have_step)}},
823     1) < 0))
824 {
825     {{error_goto}}
826 }
827 
828 
829 ////////// SimpleSlice //////////
830 
831 /* Dimension is indexed with ':' only */
832 
833 {{dst}}.shape[{{new_ndim}}] = {{src}}.shape[{{dim}}];
834 {{dst}}.strides[{{new_ndim}}] = {{src}}.strides[{{dim}}];
835 
836 {{if access == 'direct'}}
837     {{dst}}.suboffsets[{{new_ndim}}] = -1;
838 {{else}}
839     {{dst}}.suboffsets[{{new_ndim}}] = {{src}}.suboffsets[{{dim}}];
840     if ({{src}}.suboffsets[{{dim}}] >= 0)
841         {{get_suboffset_dim()}} = {{new_ndim}};
842 {{endif}}
843 
844 
845 ////////// SliceIndex //////////
846 
847 // Dimension is indexed with an integer, we could use the ToughSlice
848 // approach, but this is faster
849 
850 {
851     Py_ssize_t __pyx_tmp_idx = {{idx}};
852 
853     {{if wraparound or boundscheck}}
854         Py_ssize_t __pyx_tmp_shape = {{src}}.shape[{{dim}}];
855     {{endif}}
856 
857     Py_ssize_t __pyx_tmp_stride = {{src}}.strides[{{dim}}];
858     {{if wraparound}}
859         if (__pyx_tmp_idx < 0)
860             __pyx_tmp_idx += __pyx_tmp_shape;
861     {{endif}}
862 
863     {{if boundscheck}}
864         if (unlikely(!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape))) {
865             {{if not have_gil}}
866                 #ifdef WITH_THREAD
867                 PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
868                 #endif
869             {{endif}}
870 
871             PyErr_SetString(PyExc_IndexError,
872                             "Index out of bounds (axis {{dim}})");
873 
874             {{if not have_gil}}
875                 #ifdef WITH_THREAD
876                 PyGILState_Release(__pyx_gilstate_save);
877                 #endif
878             {{endif}}
879 
880             {{error_goto}}
881         }
882     {{endif}}
883 
884     {{if all_dimensions_direct}}
885         {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride;
886     {{else}}
887         if ({{get_suboffset_dim()}} < 0) {
888             {{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride;
889 
890             /* This dimension is the first dimension, or is preceded by    */
891             /* direct or indirect dimensions that are indexed away.        */
892             /* Hence suboffset_dim must be less than zero, and we can have */
893             /* our data pointer refer to another block by dereferencing.   */
894             /*   slice.data -> B -> C     becomes     slice.data -> C      */
895 
896             {{if indirect}}
897               {
898                 Py_ssize_t __pyx_tmp_suboffset = {{src}}.suboffsets[{{dim}}];
899 
900                 {{if generic}}
901                     if (__pyx_tmp_suboffset >= 0)
902                 {{endif}}
903 
904                     {{dst}}.data = *((char **) {{dst}}.data) + __pyx_tmp_suboffset;
905               }
906             {{endif}}
907 
908         } else {
909             {{dst}}.suboffsets[{{get_suboffset_dim()}}] += __pyx_tmp_idx * __pyx_tmp_stride;
910 
911             /* Note: dimension can not be indirect, the compiler will have */
912             /*       issued an error */
913         }
914 
915     {{endif}}
916 }
917 
918 
919 ////////// FillStrided1DScalar.proto //////////
920 
921 static void
922 __pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride,
923                                 size_t itemsize, void *itemp);
924 
925 ////////// FillStrided1DScalar //////////
926 
927 /* Fill a slice with a scalar value. The dimension is direct and strided or contiguous */
928 /* This can be used as a callback for the memoryview object to efficienty assign a scalar */
929 /* Currently unused */
930 static void
931 __pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride,
932                                 size_t itemsize, void *itemp)
933 {
934     Py_ssize_t i;
935     {{type_decl}} item = *(({{type_decl}} *) itemp);
936     {{type_decl}} *endp;
937 
938     stride /= sizeof({{type_decl}});
939     endp = p + stride * extent;
940 
941     while (p < endp) {
942         *p = item;
943         p += stride;
944     }
945 }
946