1 /*
2 * Memoryview object implementation
3 * --------------------------------
4 *
5 * This implementation is a complete rewrite contributed by Stefan Krah in
6 * Python 3.3. Substantial credit goes to Antoine Pitrou (who had already
7 * fortified and rewritten the previous implementation) and Nick Coghlan
8 * (who came up with the idea of the ManagedBuffer) for analyzing the complex
9 * ownership rules.
10 *
11 */
12
13 #include "Python.h"
14 #include "pycore_abstract.h" // _PyIndex_Check()
15 #include "pycore_object.h" // _PyObject_GC_UNTRACK()
16 #include "pycore_strhex.h" // _Py_strhex_with_sep()
17 #include <stddef.h> // offsetof()
18
19 /*[clinic input]
20 class memoryview "PyMemoryViewObject *" "&PyMemoryView_Type"
21 [clinic start generated code]*/
22 /*[clinic end generated code: output=da39a3ee5e6b4b0d input=e2e49d2192835219]*/
23
24 #include "clinic/memoryobject.c.h"
25
26 /****************************************************************************/
27 /* ManagedBuffer Object */
28 /****************************************************************************/
29
30 /*
31 ManagedBuffer Object:
32 ---------------------
33
34 The purpose of this object is to facilitate the handling of chained
35 memoryviews that have the same underlying exporting object. PEP-3118
36 allows the underlying object to change while a view is exported. This
37 could lead to unexpected results when constructing a new memoryview
38 from an existing memoryview.
39
40 Rather than repeatedly redirecting buffer requests to the original base
41 object, all chained memoryviews use a single buffer snapshot. This
42 snapshot is generated by the constructor _PyManagedBuffer_FromObject().
43
44 Ownership rules:
45 ----------------
46
47 The master buffer inside a managed buffer is filled in by the original
48 base object. shape, strides, suboffsets and format are read-only for
49 all consumers.
50
51 A memoryview's buffer is a private copy of the exporter's buffer. shape,
52 strides and suboffsets belong to the memoryview and are thus writable.
53
54 If a memoryview itself exports several buffers via memory_getbuf(), all
55 buffer copies share shape, strides and suboffsets. In this case, the
56 arrays are NOT writable.
57
58 Reference count assumptions:
59 ----------------------------
60
61 The 'obj' member of a Py_buffer must either be NULL or refer to the
62 exporting base object. In the Python codebase, all getbufferprocs
63 return a new reference to view.obj (example: bytes_buffer_getbuffer()).
64
65 PyBuffer_Release() decrements view.obj (if non-NULL), so the
66 releasebufferprocs must NOT decrement view.obj.
67 */
68
69
70 #define CHECK_MBUF_RELEASED(mbuf) \
71 if (((_PyManagedBufferObject *)mbuf)->flags&_Py_MANAGED_BUFFER_RELEASED) { \
72 PyErr_SetString(PyExc_ValueError, \
73 "operation forbidden on released memoryview object"); \
74 return NULL; \
75 }
76
77
78 static inline _PyManagedBufferObject *
mbuf_alloc(void)79 mbuf_alloc(void)
80 {
81 _PyManagedBufferObject *mbuf;
82
83 mbuf = (_PyManagedBufferObject *)
84 PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
85 if (mbuf == NULL)
86 return NULL;
87 mbuf->flags = 0;
88 mbuf->exports = 0;
89 mbuf->master.obj = NULL;
90 _PyObject_GC_TRACK(mbuf);
91
92 return mbuf;
93 }
94
95 static PyObject *
_PyManagedBuffer_FromObject(PyObject * base)96 _PyManagedBuffer_FromObject(PyObject *base)
97 {
98 _PyManagedBufferObject *mbuf;
99
100 mbuf = mbuf_alloc();
101 if (mbuf == NULL)
102 return NULL;
103
104 if (PyObject_GetBuffer(base, &mbuf->master, PyBUF_FULL_RO) < 0) {
105 mbuf->master.obj = NULL;
106 Py_DECREF(mbuf);
107 return NULL;
108 }
109
110 return (PyObject *)mbuf;
111 }
112
113 static void
mbuf_release(_PyManagedBufferObject * self)114 mbuf_release(_PyManagedBufferObject *self)
115 {
116 if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
117 return;
118
119 /* NOTE: at this point self->exports can still be > 0 if this function
120 is called from mbuf_clear() to break up a reference cycle. */
121 self->flags |= _Py_MANAGED_BUFFER_RELEASED;
122
123 /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
124 _PyObject_GC_UNTRACK(self);
125 PyBuffer_Release(&self->master);
126 }
127
128 static void
mbuf_dealloc(_PyManagedBufferObject * self)129 mbuf_dealloc(_PyManagedBufferObject *self)
130 {
131 assert(self->exports == 0);
132 mbuf_release(self);
133 if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
134 PyMem_Free(self->master.format);
135 PyObject_GC_Del(self);
136 }
137
138 static int
mbuf_traverse(_PyManagedBufferObject * self,visitproc visit,void * arg)139 mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
140 {
141 Py_VISIT(self->master.obj);
142 return 0;
143 }
144
145 static int
mbuf_clear(_PyManagedBufferObject * self)146 mbuf_clear(_PyManagedBufferObject *self)
147 {
148 assert(self->exports >= 0);
149 mbuf_release(self);
150 return 0;
151 }
152
153 PyTypeObject _PyManagedBuffer_Type = {
154 PyVarObject_HEAD_INIT(&PyType_Type, 0)
155 "managedbuffer",
156 sizeof(_PyManagedBufferObject),
157 0,
158 (destructor)mbuf_dealloc, /* tp_dealloc */
159 0, /* tp_vectorcall_offset */
160 0, /* tp_getattr */
161 0, /* tp_setattr */
162 0, /* tp_as_async */
163 0, /* tp_repr */
164 0, /* tp_as_number */
165 0, /* tp_as_sequence */
166 0, /* tp_as_mapping */
167 0, /* tp_hash */
168 0, /* tp_call */
169 0, /* tp_str */
170 PyObject_GenericGetAttr, /* tp_getattro */
171 0, /* tp_setattro */
172 0, /* tp_as_buffer */
173 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
174 0, /* tp_doc */
175 (traverseproc)mbuf_traverse, /* tp_traverse */
176 (inquiry)mbuf_clear /* tp_clear */
177 };
178
179
180 /****************************************************************************/
181 /* MemoryView Object */
182 /****************************************************************************/
183
184 /* In the process of breaking reference cycles mbuf_release() can be
185 called before memory_release(). */
186 #define BASE_INACCESSIBLE(mv) \
187 (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
188 ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
189
190 #define CHECK_RELEASED(mv) \
191 if (BASE_INACCESSIBLE(mv)) { \
192 PyErr_SetString(PyExc_ValueError, \
193 "operation forbidden on released memoryview object"); \
194 return NULL; \
195 }
196
197 #define CHECK_RELEASED_INT(mv) \
198 if (BASE_INACCESSIBLE(mv)) { \
199 PyErr_SetString(PyExc_ValueError, \
200 "operation forbidden on released memoryview object"); \
201 return -1; \
202 }
203
204 #define CHECK_LIST_OR_TUPLE(v) \
205 if (!PyList_Check(v) && !PyTuple_Check(v)) { \
206 PyErr_SetString(PyExc_TypeError, \
207 #v " must be a list or a tuple"); \
208 return NULL; \
209 }
210
211 #define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
212
213 /* Check for the presence of suboffsets in the first dimension. */
214 #define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
215 /* Adjust ptr if suboffsets are present. */
216 #define ADJUST_PTR(ptr, suboffsets, dim) \
217 (HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
218
219 /* Memoryview buffer properties */
220 #define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
221 #define MV_F_CONTIGUOUS(flags) \
222 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
223 #define MV_ANY_CONTIGUOUS(flags) \
224 (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
225
226 /* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
227 #define MV_CONTIGUOUS_NDIM1(view) \
228 ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
229
230 /* getbuffer() requests */
231 #define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
232 #define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
233 #define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
234 #define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
235 #define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
236 #define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
237 #define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
238 #define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
239
240
241 /**************************************************************************/
242 /* Copy memoryview buffers */
243 /**************************************************************************/
244
245 /* The functions in this section take a source and a destination buffer
246 with the same logical structure: format, itemsize, ndim and shape
247 are identical, with ndim > 0.
248
249 NOTE: All buffers are assumed to have PyBUF_FULL information, which
250 is the case for memoryviews! */
251
252
253 /* Assumptions: ndim >= 1. The macro tests for a corner case that should
254 perhaps be explicitly forbidden in the PEP. */
255 #define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
256 (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
257
258 static inline int
last_dim_is_contiguous(const Py_buffer * dest,const Py_buffer * src)259 last_dim_is_contiguous(const Py_buffer *dest, const Py_buffer *src)
260 {
261 assert(dest->ndim > 0 && src->ndim > 0);
262 return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
263 !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
264 dest->strides[dest->ndim-1] == dest->itemsize &&
265 src->strides[src->ndim-1] == src->itemsize);
266 }
267
268 /* This is not a general function for determining format equivalence.
269 It is used in copy_single() and copy_buffer() to weed out non-matching
270 formats. Skipping the '@' character is specifically used in slice
271 assignments, where the lvalue is already known to have a single character
272 format. This is a performance hack that could be rewritten (if properly
273 benchmarked). */
274 static inline int
equiv_format(const Py_buffer * dest,const Py_buffer * src)275 equiv_format(const Py_buffer *dest, const Py_buffer *src)
276 {
277 const char *dfmt, *sfmt;
278
279 assert(dest->format && src->format);
280 dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
281 sfmt = src->format[0] == '@' ? src->format+1 : src->format;
282
283 if (strcmp(dfmt, sfmt) != 0 ||
284 dest->itemsize != src->itemsize) {
285 return 0;
286 }
287
288 return 1;
289 }
290
291 /* Two shapes are equivalent if they are either equal or identical up
292 to a zero element at the same position. For example, in NumPy arrays
293 the shapes [1, 0, 5] and [1, 0, 7] are equivalent. */
294 static inline int
equiv_shape(const Py_buffer * dest,const Py_buffer * src)295 equiv_shape(const Py_buffer *dest, const Py_buffer *src)
296 {
297 int i;
298
299 if (dest->ndim != src->ndim)
300 return 0;
301
302 for (i = 0; i < dest->ndim; i++) {
303 if (dest->shape[i] != src->shape[i])
304 return 0;
305 if (dest->shape[i] == 0)
306 break;
307 }
308
309 return 1;
310 }
311
312 /* Check that the logical structure of the destination and source buffers
313 is identical. */
314 static int
equiv_structure(const Py_buffer * dest,const Py_buffer * src)315 equiv_structure(const Py_buffer *dest, const Py_buffer *src)
316 {
317 if (!equiv_format(dest, src) ||
318 !equiv_shape(dest, src)) {
319 PyErr_SetString(PyExc_ValueError,
320 "memoryview assignment: lvalue and rvalue have different "
321 "structures");
322 return 0;
323 }
324
325 return 1;
326 }
327
328 /* Base case for recursive multi-dimensional copying. Contiguous arrays are
329 copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
330 sizeof(mem) == shape[0] * itemsize. */
331 static void
copy_base(const Py_ssize_t * shape,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)332 copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
333 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
334 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
335 char *mem)
336 {
337 if (mem == NULL) { /* contiguous */
338 Py_ssize_t size = shape[0] * itemsize;
339 if (dptr + size < sptr || sptr + size < dptr)
340 memcpy(dptr, sptr, size); /* no overlapping */
341 else
342 memmove(dptr, sptr, size);
343 }
344 else {
345 char *p;
346 Py_ssize_t i;
347 for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
348 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
349 memcpy(p, xsptr, itemsize);
350 }
351 for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
352 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
353 memcpy(xdptr, p, itemsize);
354 }
355 }
356
357 }
358
359 /* Recursively copy a source buffer to a destination buffer. The two buffers
360 have the same ndim, shape and itemsize. */
361 static void
copy_rec(const Py_ssize_t * shape,Py_ssize_t ndim,Py_ssize_t itemsize,char * dptr,const Py_ssize_t * dstrides,const Py_ssize_t * dsuboffsets,char * sptr,const Py_ssize_t * sstrides,const Py_ssize_t * ssuboffsets,char * mem)362 copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
363 char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
364 char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
365 char *mem)
366 {
367 Py_ssize_t i;
368
369 assert(ndim >= 1);
370
371 if (ndim == 1) {
372 copy_base(shape, itemsize,
373 dptr, dstrides, dsuboffsets,
374 sptr, sstrides, ssuboffsets,
375 mem);
376 return;
377 }
378
379 for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
380 char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
381 char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
382
383 copy_rec(shape+1, ndim-1, itemsize,
384 xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
385 xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
386 mem);
387 }
388 }
389
390 /* Faster copying of one-dimensional arrays. */
391 static int
copy_single(Py_buffer * dest,Py_buffer * src)392 copy_single(Py_buffer *dest, Py_buffer *src)
393 {
394 char *mem = NULL;
395
396 assert(dest->ndim == 1);
397
398 if (!equiv_structure(dest, src))
399 return -1;
400
401 if (!last_dim_is_contiguous(dest, src)) {
402 mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
403 if (mem == NULL) {
404 PyErr_NoMemory();
405 return -1;
406 }
407 }
408
409 copy_base(dest->shape, dest->itemsize,
410 dest->buf, dest->strides, dest->suboffsets,
411 src->buf, src->strides, src->suboffsets,
412 mem);
413
414 if (mem)
415 PyMem_Free(mem);
416
417 return 0;
418 }
419
420 /* Recursively copy src to dest. Both buffers must have the same basic
421 structure. Copying is atomic, the function never fails with a partial
422 copy. */
423 static int
copy_buffer(Py_buffer * dest,Py_buffer * src)424 copy_buffer(Py_buffer *dest, Py_buffer *src)
425 {
426 char *mem = NULL;
427
428 assert(dest->ndim > 0);
429
430 if (!equiv_structure(dest, src))
431 return -1;
432
433 if (!last_dim_is_contiguous(dest, src)) {
434 mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
435 if (mem == NULL) {
436 PyErr_NoMemory();
437 return -1;
438 }
439 }
440
441 copy_rec(dest->shape, dest->ndim, dest->itemsize,
442 dest->buf, dest->strides, dest->suboffsets,
443 src->buf, src->strides, src->suboffsets,
444 mem);
445
446 if (mem)
447 PyMem_Free(mem);
448
449 return 0;
450 }
451
452 /* Initialize strides for a C-contiguous array. */
453 static inline void
init_strides_from_shape(Py_buffer * view)454 init_strides_from_shape(Py_buffer *view)
455 {
456 Py_ssize_t i;
457
458 assert(view->ndim > 0);
459
460 view->strides[view->ndim-1] = view->itemsize;
461 for (i = view->ndim-2; i >= 0; i--)
462 view->strides[i] = view->strides[i+1] * view->shape[i+1];
463 }
464
465 /* Initialize strides for a Fortran-contiguous array. */
466 static inline void
init_fortran_strides_from_shape(Py_buffer * view)467 init_fortran_strides_from_shape(Py_buffer *view)
468 {
469 Py_ssize_t i;
470
471 assert(view->ndim > 0);
472
473 view->strides[0] = view->itemsize;
474 for (i = 1; i < view->ndim; i++)
475 view->strides[i] = view->strides[i-1] * view->shape[i-1];
476 }
477
478 /* Copy src to a contiguous representation. order is one of 'C', 'F' (Fortran)
479 or 'A' (Any). Assumptions: src has PyBUF_FULL information, src->ndim >= 1,
480 len(mem) == src->len. */
481 static int
buffer_to_contiguous(char * mem,Py_buffer * src,char order)482 buffer_to_contiguous(char *mem, Py_buffer *src, char order)
483 {
484 Py_buffer dest;
485 Py_ssize_t *strides;
486 int ret;
487
488 assert(src->ndim >= 1);
489 assert(src->shape != NULL);
490 assert(src->strides != NULL);
491
492 strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
493 if (strides == NULL) {
494 PyErr_NoMemory();
495 return -1;
496 }
497
498 /* initialize dest */
499 dest = *src;
500 dest.buf = mem;
501 /* shape is constant and shared: the logical representation of the
502 array is unaltered. */
503
504 /* The physical representation determined by strides (and possibly
505 suboffsets) may change. */
506 dest.strides = strides;
507 if (order == 'C' || order == 'A') {
508 init_strides_from_shape(&dest);
509 }
510 else {
511 init_fortran_strides_from_shape(&dest);
512 }
513
514 dest.suboffsets = NULL;
515
516 ret = copy_buffer(&dest, src);
517
518 PyMem_Free(strides);
519 return ret;
520 }
521
522
523 /****************************************************************************/
524 /* Constructors */
525 /****************************************************************************/
526
527 /* Initialize values that are shared with the managed buffer. */
528 static inline void
init_shared_values(Py_buffer * dest,const Py_buffer * src)529 init_shared_values(Py_buffer *dest, const Py_buffer *src)
530 {
531 dest->obj = src->obj;
532 dest->buf = src->buf;
533 dest->len = src->len;
534 dest->itemsize = src->itemsize;
535 dest->readonly = src->readonly;
536 dest->format = src->format ? src->format : "B";
537 dest->internal = src->internal;
538 }
539
540 /* Copy shape and strides. Reconstruct missing values. */
541 static void
init_shape_strides(Py_buffer * dest,const Py_buffer * src)542 init_shape_strides(Py_buffer *dest, const Py_buffer *src)
543 {
544 Py_ssize_t i;
545
546 if (src->ndim == 0) {
547 dest->shape = NULL;
548 dest->strides = NULL;
549 return;
550 }
551 if (src->ndim == 1) {
552 dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
553 dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
554 return;
555 }
556
557 for (i = 0; i < src->ndim; i++)
558 dest->shape[i] = src->shape[i];
559 if (src->strides) {
560 for (i = 0; i < src->ndim; i++)
561 dest->strides[i] = src->strides[i];
562 }
563 else {
564 init_strides_from_shape(dest);
565 }
566 }
567
568 static inline void
init_suboffsets(Py_buffer * dest,const Py_buffer * src)569 init_suboffsets(Py_buffer *dest, const Py_buffer *src)
570 {
571 Py_ssize_t i;
572
573 if (src->suboffsets == NULL) {
574 dest->suboffsets = NULL;
575 return;
576 }
577 for (i = 0; i < src->ndim; i++)
578 dest->suboffsets[i] = src->suboffsets[i];
579 }
580
581 /* len = product(shape) * itemsize */
582 static inline void
init_len(Py_buffer * view)583 init_len(Py_buffer *view)
584 {
585 Py_ssize_t i, len;
586
587 len = 1;
588 for (i = 0; i < view->ndim; i++)
589 len *= view->shape[i];
590 len *= view->itemsize;
591
592 view->len = len;
593 }
594
595 /* Initialize memoryview buffer properties. */
596 static void
init_flags(PyMemoryViewObject * mv)597 init_flags(PyMemoryViewObject *mv)
598 {
599 const Py_buffer *view = &mv->view;
600 int flags = 0;
601
602 switch (view->ndim) {
603 case 0:
604 flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
605 _Py_MEMORYVIEW_FORTRAN);
606 break;
607 case 1:
608 if (MV_CONTIGUOUS_NDIM1(view))
609 flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
610 break;
611 default:
612 if (PyBuffer_IsContiguous(view, 'C'))
613 flags |= _Py_MEMORYVIEW_C;
614 if (PyBuffer_IsContiguous(view, 'F'))
615 flags |= _Py_MEMORYVIEW_FORTRAN;
616 break;
617 }
618
619 if (view->suboffsets) {
620 flags |= _Py_MEMORYVIEW_PIL;
621 flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
622 }
623
624 mv->flags = flags;
625 }
626
627 /* Allocate a new memoryview and perform basic initialization. New memoryviews
628 are exclusively created through the mbuf_add functions. */
629 static inline PyMemoryViewObject *
memory_alloc(int ndim)630 memory_alloc(int ndim)
631 {
632 PyMemoryViewObject *mv;
633
634 mv = (PyMemoryViewObject *)
635 PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
636 if (mv == NULL)
637 return NULL;
638
639 mv->mbuf = NULL;
640 mv->hash = -1;
641 mv->flags = 0;
642 mv->exports = 0;
643 mv->view.ndim = ndim;
644 mv->view.shape = mv->ob_array;
645 mv->view.strides = mv->ob_array + ndim;
646 mv->view.suboffsets = mv->ob_array + 2 * ndim;
647 mv->weakreflist = NULL;
648
649 _PyObject_GC_TRACK(mv);
650 return mv;
651 }
652
653 /*
654 Return a new memoryview that is registered with mbuf. If src is NULL,
655 use mbuf->master as the underlying buffer. Otherwise, use src.
656
657 The new memoryview has full buffer information: shape and strides
658 are always present, suboffsets as needed. Arrays are copied to
659 the memoryview's ob_array field.
660 */
661 static PyObject *
mbuf_add_view(_PyManagedBufferObject * mbuf,const Py_buffer * src)662 mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
663 {
664 PyMemoryViewObject *mv;
665 Py_buffer *dest;
666
667 if (src == NULL)
668 src = &mbuf->master;
669
670 if (src->ndim > PyBUF_MAX_NDIM) {
671 PyErr_SetString(PyExc_ValueError,
672 "memoryview: number of dimensions must not exceed "
673 Py_STRINGIFY(PyBUF_MAX_NDIM));
674 return NULL;
675 }
676
677 mv = memory_alloc(src->ndim);
678 if (mv == NULL)
679 return NULL;
680
681 dest = &mv->view;
682 init_shared_values(dest, src);
683 init_shape_strides(dest, src);
684 init_suboffsets(dest, src);
685 init_flags(mv);
686
687 mv->mbuf = mbuf;
688 Py_INCREF(mbuf);
689 mbuf->exports++;
690
691 return (PyObject *)mv;
692 }
693
694 /* Register an incomplete view: shape, strides, suboffsets and flags still
695 need to be initialized. Use 'ndim' instead of src->ndim to determine the
696 size of the memoryview's ob_array.
697
698 Assumption: ndim <= PyBUF_MAX_NDIM. */
699 static PyObject *
mbuf_add_incomplete_view(_PyManagedBufferObject * mbuf,const Py_buffer * src,int ndim)700 mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
701 int ndim)
702 {
703 PyMemoryViewObject *mv;
704 Py_buffer *dest;
705
706 if (src == NULL)
707 src = &mbuf->master;
708
709 assert(ndim <= PyBUF_MAX_NDIM);
710
711 mv = memory_alloc(ndim);
712 if (mv == NULL)
713 return NULL;
714
715 dest = &mv->view;
716 init_shared_values(dest, src);
717
718 mv->mbuf = mbuf;
719 Py_INCREF(mbuf);
720 mbuf->exports++;
721
722 return (PyObject *)mv;
723 }
724
725 /* Expose a raw memory area as a view of contiguous bytes. flags can be
726 PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
727 The memoryview has complete buffer information. */
728 PyObject *
PyMemoryView_FromMemory(char * mem,Py_ssize_t size,int flags)729 PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
730 {
731 _PyManagedBufferObject *mbuf;
732 PyObject *mv;
733 int readonly;
734
735 assert(mem != NULL);
736 assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
737
738 mbuf = mbuf_alloc();
739 if (mbuf == NULL)
740 return NULL;
741
742 readonly = (flags == PyBUF_WRITE) ? 0 : 1;
743 (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
744 PyBUF_FULL_RO);
745
746 mv = mbuf_add_view(mbuf, NULL);
747 Py_DECREF(mbuf);
748
749 return mv;
750 }
751
752 /* Create a memoryview from a given Py_buffer. For simple byte views,
753 PyMemoryView_FromMemory() should be used instead.
754 This function is the only entry point that can create a master buffer
755 without full information. Because of this fact init_shape_strides()
756 must be able to reconstruct missing values. */
757 PyObject *
PyMemoryView_FromBuffer(Py_buffer * info)758 PyMemoryView_FromBuffer(Py_buffer *info)
759 {
760 _PyManagedBufferObject *mbuf;
761 PyObject *mv;
762
763 if (info->buf == NULL) {
764 PyErr_SetString(PyExc_ValueError,
765 "PyMemoryView_FromBuffer(): info->buf must not be NULL");
766 return NULL;
767 }
768
769 mbuf = mbuf_alloc();
770 if (mbuf == NULL)
771 return NULL;
772
773 /* info->obj is either NULL or a borrowed reference. This reference
774 should not be decremented in PyBuffer_Release(). */
775 mbuf->master = *info;
776 mbuf->master.obj = NULL;
777
778 mv = mbuf_add_view(mbuf, NULL);
779 Py_DECREF(mbuf);
780
781 return mv;
782 }
783
784 /* Create a memoryview from an object that implements the buffer protocol.
785 If the object is a memoryview, the new memoryview must be registered
786 with the same managed buffer. Otherwise, a new managed buffer is created. */
787 PyObject *
PyMemoryView_FromObject(PyObject * v)788 PyMemoryView_FromObject(PyObject *v)
789 {
790 _PyManagedBufferObject *mbuf;
791
792 if (PyMemoryView_Check(v)) {
793 PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
794 CHECK_RELEASED(mv);
795 return mbuf_add_view(mv->mbuf, &mv->view);
796 }
797 else if (PyObject_CheckBuffer(v)) {
798 PyObject *ret;
799 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v);
800 if (mbuf == NULL)
801 return NULL;
802 ret = mbuf_add_view(mbuf, NULL);
803 Py_DECREF(mbuf);
804 return ret;
805 }
806
807 PyErr_Format(PyExc_TypeError,
808 "memoryview: a bytes-like object is required, not '%.200s'",
809 Py_TYPE(v)->tp_name);
810 return NULL;
811 }
812
813 /* Copy the format string from a base object that might vanish. */
814 static int
mbuf_copy_format(_PyManagedBufferObject * mbuf,const char * fmt)815 mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
816 {
817 if (fmt != NULL) {
818 char *cp = PyMem_Malloc(strlen(fmt)+1);
819 if (cp == NULL) {
820 PyErr_NoMemory();
821 return -1;
822 }
823 mbuf->master.format = strcpy(cp, fmt);
824 mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
825 }
826
827 return 0;
828 }
829
830 /*
831 Return a memoryview that is based on a contiguous copy of src.
832 Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
833
834 Ownership rules:
835 1) As usual, the returned memoryview has a private copy
836 of src->shape, src->strides and src->suboffsets.
837 2) src->format is copied to the master buffer and released
838 in mbuf_dealloc(). The releasebufferproc of the bytes
839 object is NULL, so it does not matter that mbuf_release()
840 passes the altered format pointer to PyBuffer_Release().
841 */
842 static PyObject *
memory_from_contiguous_copy(Py_buffer * src,char order)843 memory_from_contiguous_copy(Py_buffer *src, char order)
844 {
845 _PyManagedBufferObject *mbuf;
846 PyMemoryViewObject *mv;
847 PyObject *bytes;
848 Py_buffer *dest;
849 int i;
850
851 assert(src->ndim > 0);
852 assert(src->shape != NULL);
853
854 bytes = PyBytes_FromStringAndSize(NULL, src->len);
855 if (bytes == NULL)
856 return NULL;
857
858 mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes);
859 Py_DECREF(bytes);
860 if (mbuf == NULL)
861 return NULL;
862
863 if (mbuf_copy_format(mbuf, src->format) < 0) {
864 Py_DECREF(mbuf);
865 return NULL;
866 }
867
868 mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
869 Py_DECREF(mbuf);
870 if (mv == NULL)
871 return NULL;
872
873 dest = &mv->view;
874
875 /* shared values are initialized correctly except for itemsize */
876 dest->itemsize = src->itemsize;
877
878 /* shape and strides */
879 for (i = 0; i < src->ndim; i++) {
880 dest->shape[i] = src->shape[i];
881 }
882 if (order == 'C' || order == 'A') {
883 init_strides_from_shape(dest);
884 }
885 else {
886 init_fortran_strides_from_shape(dest);
887 }
888 /* suboffsets */
889 dest->suboffsets = NULL;
890
891 /* flags */
892 init_flags(mv);
893
894 if (copy_buffer(dest, src) < 0) {
895 Py_DECREF(mv);
896 return NULL;
897 }
898
899 return (PyObject *)mv;
900 }
901
902 /*
903 Return a new memoryview object based on a contiguous exporter with
904 buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
905 The logical structure of the input and output buffers is the same
906 (i.e. tolist(input) == tolist(output)), but the physical layout in
907 memory can be explicitly chosen.
908
909 As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
910 otherwise it may be writable or read-only.
911
912 If the exporter is already contiguous with the desired target order,
913 the memoryview will be directly based on the exporter.
914
915 Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
916 based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
917 'F'ortran order otherwise.
918 */
919 PyObject *
PyMemoryView_GetContiguous(PyObject * obj,int buffertype,char order)920 PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
921 {
922 PyMemoryViewObject *mv;
923 PyObject *ret;
924 Py_buffer *view;
925
926 assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
927 assert(order == 'C' || order == 'F' || order == 'A');
928
929 mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
930 if (mv == NULL)
931 return NULL;
932
933 view = &mv->view;
934 if (buffertype == PyBUF_WRITE && view->readonly) {
935 PyErr_SetString(PyExc_BufferError,
936 "underlying buffer is not writable");
937 Py_DECREF(mv);
938 return NULL;
939 }
940
941 if (PyBuffer_IsContiguous(view, order))
942 return (PyObject *)mv;
943
944 if (buffertype == PyBUF_WRITE) {
945 PyErr_SetString(PyExc_BufferError,
946 "writable contiguous buffer requested "
947 "for a non-contiguous object.");
948 Py_DECREF(mv);
949 return NULL;
950 }
951
952 ret = memory_from_contiguous_copy(view, order);
953 Py_DECREF(mv);
954 return ret;
955 }
956
957
958 /*[clinic input]
959 @classmethod
960 memoryview.__new__
961
962 object: object
963
964 Create a new memoryview object which references the given object.
965 [clinic start generated code]*/
966
967 static PyObject *
memoryview_impl(PyTypeObject * type,PyObject * object)968 memoryview_impl(PyTypeObject *type, PyObject *object)
969 /*[clinic end generated code: output=7de78e184ed66db8 input=f04429eb0bdf8c6e]*/
970 {
971 return PyMemoryView_FromObject(object);
972 }
973
974
975 /****************************************************************************/
976 /* Previously in abstract.c */
977 /****************************************************************************/
978
979 typedef struct {
980 Py_buffer view;
981 Py_ssize_t array[1];
982 } Py_buffer_full;
983
984 int
PyBuffer_ToContiguous(void * buf,Py_buffer * src,Py_ssize_t len,char order)985 PyBuffer_ToContiguous(void *buf, Py_buffer *src, Py_ssize_t len, char order)
986 {
987 Py_buffer_full *fb = NULL;
988 int ret;
989
990 assert(order == 'C' || order == 'F' || order == 'A');
991
992 if (len != src->len) {
993 PyErr_SetString(PyExc_ValueError,
994 "PyBuffer_ToContiguous: len != view->len");
995 return -1;
996 }
997
998 if (PyBuffer_IsContiguous(src, order)) {
999 memcpy((char *)buf, src->buf, len);
1000 return 0;
1001 }
1002
1003 /* buffer_to_contiguous() assumes PyBUF_FULL */
1004 fb = PyMem_Malloc(sizeof *fb + 3 * src->ndim * (sizeof *fb->array));
1005 if (fb == NULL) {
1006 PyErr_NoMemory();
1007 return -1;
1008 }
1009 fb->view.ndim = src->ndim;
1010 fb->view.shape = fb->array;
1011 fb->view.strides = fb->array + src->ndim;
1012 fb->view.suboffsets = fb->array + 2 * src->ndim;
1013
1014 init_shared_values(&fb->view, src);
1015 init_shape_strides(&fb->view, src);
1016 init_suboffsets(&fb->view, src);
1017
1018 src = &fb->view;
1019
1020 ret = buffer_to_contiguous(buf, src, order);
1021 PyMem_Free(fb);
1022 return ret;
1023 }
1024
1025
1026 /****************************************************************************/
1027 /* Release/GC management */
1028 /****************************************************************************/
1029
1030 /* Inform the managed buffer that this particular memoryview will not access
1031 the underlying buffer again. If no other memoryviews are registered with
1032 the managed buffer, the underlying buffer is released instantly and
1033 marked as inaccessible for both the memoryview and the managed buffer.
1034
1035 This function fails if the memoryview itself has exported buffers. */
1036 static int
_memory_release(PyMemoryViewObject * self)1037 _memory_release(PyMemoryViewObject *self)
1038 {
1039 if (self->flags & _Py_MEMORYVIEW_RELEASED)
1040 return 0;
1041
1042 if (self->exports == 0) {
1043 self->flags |= _Py_MEMORYVIEW_RELEASED;
1044 assert(self->mbuf->exports > 0);
1045 if (--self->mbuf->exports == 0)
1046 mbuf_release(self->mbuf);
1047 return 0;
1048 }
1049 if (self->exports > 0) {
1050 PyErr_Format(PyExc_BufferError,
1051 "memoryview has %zd exported buffer%s", self->exports,
1052 self->exports==1 ? "" : "s");
1053 return -1;
1054 }
1055
1056 PyErr_SetString(PyExc_SystemError,
1057 "_memory_release(): negative export count");
1058 return -1;
1059 }
1060
1061 /*[clinic input]
1062 memoryview.release
1063
1064 Release the underlying buffer exposed by the memoryview object.
1065 [clinic start generated code]*/
1066
1067 static PyObject *
memoryview_release_impl(PyMemoryViewObject * self)1068 memoryview_release_impl(PyMemoryViewObject *self)
1069 /*[clinic end generated code: output=d0b7e3ba95b7fcb9 input=bc71d1d51f4a52f0]*/
1070 {
1071 if (_memory_release(self) < 0)
1072 return NULL;
1073 Py_RETURN_NONE;
1074 }
1075
1076 static void
memory_dealloc(PyMemoryViewObject * self)1077 memory_dealloc(PyMemoryViewObject *self)
1078 {
1079 assert(self->exports == 0);
1080 _PyObject_GC_UNTRACK(self);
1081 (void)_memory_release(self);
1082 Py_CLEAR(self->mbuf);
1083 if (self->weakreflist != NULL)
1084 PyObject_ClearWeakRefs((PyObject *) self);
1085 PyObject_GC_Del(self);
1086 }
1087
1088 static int
memory_traverse(PyMemoryViewObject * self,visitproc visit,void * arg)1089 memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
1090 {
1091 Py_VISIT(self->mbuf);
1092 return 0;
1093 }
1094
1095 static int
memory_clear(PyMemoryViewObject * self)1096 memory_clear(PyMemoryViewObject *self)
1097 {
1098 (void)_memory_release(self);
1099 Py_CLEAR(self->mbuf);
1100 return 0;
1101 }
1102
1103 static PyObject *
memory_enter(PyObject * self,PyObject * args)1104 memory_enter(PyObject *self, PyObject *args)
1105 {
1106 CHECK_RELEASED(self);
1107 Py_INCREF(self);
1108 return self;
1109 }
1110
1111 static PyObject *
memory_exit(PyObject * self,PyObject * args)1112 memory_exit(PyObject *self, PyObject *args)
1113 {
1114 return memoryview_release_impl((PyMemoryViewObject *)self);
1115 }
1116
1117
1118 /****************************************************************************/
1119 /* Casting format and shape */
1120 /****************************************************************************/
1121
1122 #define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
1123
1124 static inline Py_ssize_t
get_native_fmtchar(char * result,const char * fmt)1125 get_native_fmtchar(char *result, const char *fmt)
1126 {
1127 Py_ssize_t size = -1;
1128
1129 if (fmt[0] == '@') fmt++;
1130
1131 switch (fmt[0]) {
1132 case 'c': case 'b': case 'B': size = sizeof(char); break;
1133 case 'h': case 'H': size = sizeof(short); break;
1134 case 'i': case 'I': size = sizeof(int); break;
1135 case 'l': case 'L': size = sizeof(long); break;
1136 case 'q': case 'Q': size = sizeof(long long); break;
1137 case 'n': case 'N': size = sizeof(Py_ssize_t); break;
1138 case 'f': size = sizeof(float); break;
1139 case 'd': size = sizeof(double); break;
1140 case '?': size = sizeof(_Bool); break;
1141 case 'P': size = sizeof(void *); break;
1142 }
1143
1144 if (size > 0 && fmt[1] == '\0') {
1145 *result = fmt[0];
1146 return size;
1147 }
1148
1149 return -1;
1150 }
1151
1152 static inline const char *
get_native_fmtstr(const char * fmt)1153 get_native_fmtstr(const char *fmt)
1154 {
1155 int at = 0;
1156
1157 if (fmt[0] == '@') {
1158 at = 1;
1159 fmt++;
1160 }
1161 if (fmt[0] == '\0' || fmt[1] != '\0') {
1162 return NULL;
1163 }
1164
1165 #define RETURN(s) do { return at ? "@" s : s; } while (0)
1166
1167 switch (fmt[0]) {
1168 case 'c': RETURN("c");
1169 case 'b': RETURN("b");
1170 case 'B': RETURN("B");
1171 case 'h': RETURN("h");
1172 case 'H': RETURN("H");
1173 case 'i': RETURN("i");
1174 case 'I': RETURN("I");
1175 case 'l': RETURN("l");
1176 case 'L': RETURN("L");
1177 case 'q': RETURN("q");
1178 case 'Q': RETURN("Q");
1179 case 'n': RETURN("n");
1180 case 'N': RETURN("N");
1181 case 'f': RETURN("f");
1182 case 'd': RETURN("d");
1183 case '?': RETURN("?");
1184 case 'P': RETURN("P");
1185 }
1186
1187 return NULL;
1188 }
1189
1190
1191 /* Cast a memoryview's data type to 'format'. The input array must be
1192 C-contiguous. At least one of input-format, output-format must have
1193 byte size. The output array is 1-D, with the same byte length as the
1194 input array. Thus, view->len must be a multiple of the new itemsize. */
1195 static int
cast_to_1D(PyMemoryViewObject * mv,PyObject * format)1196 cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
1197 {
1198 Py_buffer *view = &mv->view;
1199 PyObject *asciifmt;
1200 char srcchar, destchar;
1201 Py_ssize_t itemsize;
1202 int ret = -1;
1203
1204 assert(view->ndim >= 1);
1205 assert(Py_SIZE(mv) == 3*view->ndim);
1206 assert(view->shape == mv->ob_array);
1207 assert(view->strides == mv->ob_array + view->ndim);
1208 assert(view->suboffsets == mv->ob_array + 2*view->ndim);
1209
1210 asciifmt = PyUnicode_AsASCIIString(format);
1211 if (asciifmt == NULL)
1212 return ret;
1213
1214 itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
1215 if (itemsize < 0) {
1216 PyErr_SetString(PyExc_ValueError,
1217 "memoryview: destination format must be a native single "
1218 "character format prefixed with an optional '@'");
1219 goto out;
1220 }
1221
1222 if ((get_native_fmtchar(&srcchar, view->format) < 0 ||
1223 !IS_BYTE_FORMAT(srcchar)) && !IS_BYTE_FORMAT(destchar)) {
1224 PyErr_SetString(PyExc_TypeError,
1225 "memoryview: cannot cast between two non-byte formats");
1226 goto out;
1227 }
1228 if (view->len % itemsize) {
1229 PyErr_SetString(PyExc_TypeError,
1230 "memoryview: length is not a multiple of itemsize");
1231 goto out;
1232 }
1233
1234 view->format = (char *)get_native_fmtstr(PyBytes_AS_STRING(asciifmt));
1235 if (view->format == NULL) {
1236 /* NOT_REACHED: get_native_fmtchar() already validates the format. */
1237 PyErr_SetString(PyExc_RuntimeError,
1238 "memoryview: internal error");
1239 goto out;
1240 }
1241 view->itemsize = itemsize;
1242
1243 view->ndim = 1;
1244 view->shape[0] = view->len / view->itemsize;
1245 view->strides[0] = view->itemsize;
1246 view->suboffsets = NULL;
1247
1248 init_flags(mv);
1249
1250 ret = 0;
1251
1252 out:
1253 Py_DECREF(asciifmt);
1254 return ret;
1255 }
1256
1257 /* The memoryview must have space for 3*len(seq) elements. */
1258 static Py_ssize_t
copy_shape(Py_ssize_t * shape,const PyObject * seq,Py_ssize_t ndim,Py_ssize_t itemsize)1259 copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
1260 Py_ssize_t itemsize)
1261 {
1262 Py_ssize_t x, i;
1263 Py_ssize_t len = itemsize;
1264
1265 for (i = 0; i < ndim; i++) {
1266 PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
1267 if (!PyLong_Check(tmp)) {
1268 PyErr_SetString(PyExc_TypeError,
1269 "memoryview.cast(): elements of shape must be integers");
1270 return -1;
1271 }
1272 x = PyLong_AsSsize_t(tmp);
1273 if (x == -1 && PyErr_Occurred()) {
1274 return -1;
1275 }
1276 if (x <= 0) {
1277 /* In general elements of shape may be 0, but not for casting. */
1278 PyErr_Format(PyExc_ValueError,
1279 "memoryview.cast(): elements of shape must be integers > 0");
1280 return -1;
1281 }
1282 if (x > PY_SSIZE_T_MAX / len) {
1283 PyErr_Format(PyExc_ValueError,
1284 "memoryview.cast(): product(shape) > SSIZE_MAX");
1285 return -1;
1286 }
1287 len *= x;
1288 shape[i] = x;
1289 }
1290
1291 return len;
1292 }
1293
1294 /* Cast a 1-D array to a new shape. The result array will be C-contiguous.
1295 If the result array does not have exactly the same byte length as the
1296 input array, raise ValueError. */
1297 static int
cast_to_ND(PyMemoryViewObject * mv,const PyObject * shape,int ndim)1298 cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
1299 {
1300 Py_buffer *view = &mv->view;
1301 Py_ssize_t len;
1302
1303 assert(view->ndim == 1); /* ndim from cast_to_1D() */
1304 assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
1305 assert(view->shape == mv->ob_array);
1306 assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
1307 assert(view->suboffsets == NULL);
1308
1309 view->ndim = ndim;
1310 if (view->ndim == 0) {
1311 view->shape = NULL;
1312 view->strides = NULL;
1313 len = view->itemsize;
1314 }
1315 else {
1316 len = copy_shape(view->shape, shape, ndim, view->itemsize);
1317 if (len < 0)
1318 return -1;
1319 init_strides_from_shape(view);
1320 }
1321
1322 if (view->len != len) {
1323 PyErr_SetString(PyExc_TypeError,
1324 "memoryview: product(shape) * itemsize != buffer size");
1325 return -1;
1326 }
1327
1328 init_flags(mv);
1329
1330 return 0;
1331 }
1332
1333 static int
zero_in_shape(PyMemoryViewObject * mv)1334 zero_in_shape(PyMemoryViewObject *mv)
1335 {
1336 Py_buffer *view = &mv->view;
1337 Py_ssize_t i;
1338
1339 for (i = 0; i < view->ndim; i++)
1340 if (view->shape[i] == 0)
1341 return 1;
1342
1343 return 0;
1344 }
1345
1346 /*
1347 Cast a copy of 'self' to a different view. The input view must
1348 be C-contiguous. The function always casts the input view to a
1349 1-D output according to 'format'. At least one of input-format,
1350 output-format must have byte size.
1351
1352 If 'shape' is given, the 1-D view from the previous step will
1353 be cast to a C-contiguous view with new shape and strides.
1354
1355 All casts must result in views that will have the exact byte
1356 size of the original input. Otherwise, an error is raised.
1357 */
1358 /*[clinic input]
1359 memoryview.cast
1360
1361 format: unicode
1362 shape: object = NULL
1363
1364 Cast a memoryview to a new format or shape.
1365 [clinic start generated code]*/
1366
1367 static PyObject *
memoryview_cast_impl(PyMemoryViewObject * self,PyObject * format,PyObject * shape)1368 memoryview_cast_impl(PyMemoryViewObject *self, PyObject *format,
1369 PyObject *shape)
1370 /*[clinic end generated code: output=bae520b3a389cbab input=138936cc9041b1a3]*/
1371 {
1372 PyMemoryViewObject *mv = NULL;
1373 Py_ssize_t ndim = 1;
1374
1375 CHECK_RELEASED(self);
1376
1377 if (!MV_C_CONTIGUOUS(self->flags)) {
1378 PyErr_SetString(PyExc_TypeError,
1379 "memoryview: casts are restricted to C-contiguous views");
1380 return NULL;
1381 }
1382 if ((shape || self->view.ndim != 1) && zero_in_shape(self)) {
1383 PyErr_SetString(PyExc_TypeError,
1384 "memoryview: cannot cast view with zeros in shape or strides");
1385 return NULL;
1386 }
1387 if (shape) {
1388 CHECK_LIST_OR_TUPLE(shape)
1389 ndim = PySequence_Fast_GET_SIZE(shape);
1390 if (ndim > PyBUF_MAX_NDIM) {
1391 PyErr_SetString(PyExc_ValueError,
1392 "memoryview: number of dimensions must not exceed "
1393 Py_STRINGIFY(PyBUF_MAX_NDIM));
1394 return NULL;
1395 }
1396 if (self->view.ndim != 1 && ndim != 1) {
1397 PyErr_SetString(PyExc_TypeError,
1398 "memoryview: cast must be 1D -> ND or ND -> 1D");
1399 return NULL;
1400 }
1401 }
1402
1403 mv = (PyMemoryViewObject *)
1404 mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
1405 if (mv == NULL)
1406 return NULL;
1407
1408 if (cast_to_1D(mv, format) < 0)
1409 goto error;
1410 if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
1411 goto error;
1412
1413 return (PyObject *)mv;
1414
1415 error:
1416 Py_DECREF(mv);
1417 return NULL;
1418 }
1419
1420 /*[clinic input]
1421 memoryview.toreadonly
1422
1423 Return a readonly version of the memoryview.
1424 [clinic start generated code]*/
1425
1426 static PyObject *
memoryview_toreadonly_impl(PyMemoryViewObject * self)1427 memoryview_toreadonly_impl(PyMemoryViewObject *self)
1428 /*[clinic end generated code: output=2c7e056f04c99e62 input=dc06d20f19ba236f]*/
1429 {
1430 CHECK_RELEASED(self);
1431 /* Even if self is already readonly, we still need to create a new
1432 * object for .release() to work correctly.
1433 */
1434 self = (PyMemoryViewObject *) mbuf_add_view(self->mbuf, &self->view);
1435 if (self != NULL) {
1436 self->view.readonly = 1;
1437 };
1438 return (PyObject *) self;
1439 }
1440
1441
1442 /**************************************************************************/
1443 /* getbuffer */
1444 /**************************************************************************/
1445
1446 static int
memory_getbuf(PyMemoryViewObject * self,Py_buffer * view,int flags)1447 memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
1448 {
1449 Py_buffer *base = &self->view;
1450 int baseflags = self->flags;
1451
1452 CHECK_RELEASED_INT(self);
1453
1454 /* start with complete information */
1455 *view = *base;
1456 view->obj = NULL;
1457
1458 if (REQ_WRITABLE(flags) && base->readonly) {
1459 PyErr_SetString(PyExc_BufferError,
1460 "memoryview: underlying buffer is not writable");
1461 return -1;
1462 }
1463 if (!REQ_FORMAT(flags)) {
1464 /* NULL indicates that the buffer's data type has been cast to 'B'.
1465 view->itemsize is the _previous_ itemsize. If shape is present,
1466 the equality product(shape) * itemsize = len still holds at this
1467 point. The equality calcsize(format) = itemsize does _not_ hold
1468 from here on! */
1469 view->format = NULL;
1470 }
1471
1472 if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
1473 PyErr_SetString(PyExc_BufferError,
1474 "memoryview: underlying buffer is not C-contiguous");
1475 return -1;
1476 }
1477 if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
1478 PyErr_SetString(PyExc_BufferError,
1479 "memoryview: underlying buffer is not Fortran contiguous");
1480 return -1;
1481 }
1482 if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
1483 PyErr_SetString(PyExc_BufferError,
1484 "memoryview: underlying buffer is not contiguous");
1485 return -1;
1486 }
1487 if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
1488 PyErr_SetString(PyExc_BufferError,
1489 "memoryview: underlying buffer requires suboffsets");
1490 return -1;
1491 }
1492 if (!REQ_STRIDES(flags)) {
1493 if (!MV_C_CONTIGUOUS(baseflags)) {
1494 PyErr_SetString(PyExc_BufferError,
1495 "memoryview: underlying buffer is not C-contiguous");
1496 return -1;
1497 }
1498 view->strides = NULL;
1499 }
1500 if (!REQ_SHAPE(flags)) {
1501 /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
1502 so base->buf = ndbuf->data. */
1503 if (view->format != NULL) {
1504 /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
1505 not make sense. */
1506 PyErr_Format(PyExc_BufferError,
1507 "memoryview: cannot cast to unsigned bytes if the format flag "
1508 "is present");
1509 return -1;
1510 }
1511 /* product(shape) * itemsize = len and calcsize(format) = itemsize
1512 do _not_ hold from here on! */
1513 view->ndim = 1;
1514 view->shape = NULL;
1515 }
1516
1517
1518 view->obj = (PyObject *)self;
1519 Py_INCREF(view->obj);
1520 self->exports++;
1521
1522 return 0;
1523 }
1524
1525 static void
memory_releasebuf(PyMemoryViewObject * self,Py_buffer * view)1526 memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
1527 {
1528 self->exports--;
1529 return;
1530 /* PyBuffer_Release() decrements view->obj after this function returns. */
1531 }
1532
1533 /* Buffer methods */
1534 static PyBufferProcs memory_as_buffer = {
1535 (getbufferproc)memory_getbuf, /* bf_getbuffer */
1536 (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
1537 };
1538
1539
1540 /****************************************************************************/
1541 /* Optimized pack/unpack for all native format specifiers */
1542 /****************************************************************************/
1543
1544 /*
1545 Fix exceptions:
1546 1) Include format string in the error message.
1547 2) OverflowError -> ValueError.
1548 3) The error message from PyNumber_Index() is not ideal.
1549 */
1550 static int
type_error_int(const char * fmt)1551 type_error_int(const char *fmt)
1552 {
1553 PyErr_Format(PyExc_TypeError,
1554 "memoryview: invalid type for format '%s'", fmt);
1555 return -1;
1556 }
1557
1558 static int
value_error_int(const char * fmt)1559 value_error_int(const char *fmt)
1560 {
1561 PyErr_Format(PyExc_ValueError,
1562 "memoryview: invalid value for format '%s'", fmt);
1563 return -1;
1564 }
1565
1566 static int
fix_error_int(const char * fmt)1567 fix_error_int(const char *fmt)
1568 {
1569 assert(PyErr_Occurred());
1570 if (PyErr_ExceptionMatches(PyExc_TypeError)) {
1571 PyErr_Clear();
1572 return type_error_int(fmt);
1573 }
1574 else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
1575 PyErr_ExceptionMatches(PyExc_ValueError)) {
1576 PyErr_Clear();
1577 return value_error_int(fmt);
1578 }
1579
1580 return -1;
1581 }
1582
1583 /* Accept integer objects or objects with an __index__() method. */
1584 static long
pylong_as_ld(PyObject * item)1585 pylong_as_ld(PyObject *item)
1586 {
1587 PyObject *tmp;
1588 long ld;
1589
1590 tmp = _PyNumber_Index(item);
1591 if (tmp == NULL)
1592 return -1;
1593
1594 ld = PyLong_AsLong(tmp);
1595 Py_DECREF(tmp);
1596 return ld;
1597 }
1598
1599 static unsigned long
pylong_as_lu(PyObject * item)1600 pylong_as_lu(PyObject *item)
1601 {
1602 PyObject *tmp;
1603 unsigned long lu;
1604
1605 tmp = _PyNumber_Index(item);
1606 if (tmp == NULL)
1607 return (unsigned long)-1;
1608
1609 lu = PyLong_AsUnsignedLong(tmp);
1610 Py_DECREF(tmp);
1611 return lu;
1612 }
1613
1614 static long long
pylong_as_lld(PyObject * item)1615 pylong_as_lld(PyObject *item)
1616 {
1617 PyObject *tmp;
1618 long long lld;
1619
1620 tmp = _PyNumber_Index(item);
1621 if (tmp == NULL)
1622 return -1;
1623
1624 lld = PyLong_AsLongLong(tmp);
1625 Py_DECREF(tmp);
1626 return lld;
1627 }
1628
1629 static unsigned long long
pylong_as_llu(PyObject * item)1630 pylong_as_llu(PyObject *item)
1631 {
1632 PyObject *tmp;
1633 unsigned long long llu;
1634
1635 tmp = _PyNumber_Index(item);
1636 if (tmp == NULL)
1637 return (unsigned long long)-1;
1638
1639 llu = PyLong_AsUnsignedLongLong(tmp);
1640 Py_DECREF(tmp);
1641 return llu;
1642 }
1643
1644 static Py_ssize_t
pylong_as_zd(PyObject * item)1645 pylong_as_zd(PyObject *item)
1646 {
1647 PyObject *tmp;
1648 Py_ssize_t zd;
1649
1650 tmp = _PyNumber_Index(item);
1651 if (tmp == NULL)
1652 return -1;
1653
1654 zd = PyLong_AsSsize_t(tmp);
1655 Py_DECREF(tmp);
1656 return zd;
1657 }
1658
1659 static size_t
pylong_as_zu(PyObject * item)1660 pylong_as_zu(PyObject *item)
1661 {
1662 PyObject *tmp;
1663 size_t zu;
1664
1665 tmp = _PyNumber_Index(item);
1666 if (tmp == NULL)
1667 return (size_t)-1;
1668
1669 zu = PyLong_AsSize_t(tmp);
1670 Py_DECREF(tmp);
1671 return zu;
1672 }
1673
1674 /* Timings with the ndarray from _testbuffer.c indicate that using the
1675 struct module is around 15x slower than the two functions below. */
1676
1677 #define UNPACK_SINGLE(dest, ptr, type) \
1678 do { \
1679 type x; \
1680 memcpy((char *)&x, ptr, sizeof x); \
1681 dest = x; \
1682 } while (0)
1683
1684 /* Unpack a single item. 'fmt' can be any native format character in struct
1685 module syntax. This function is very sensitive to small changes. With this
1686 layout gcc automatically generates a fast jump table. */
1687 static inline PyObject *
unpack_single(const char * ptr,const char * fmt)1688 unpack_single(const char *ptr, const char *fmt)
1689 {
1690 unsigned long long llu;
1691 unsigned long lu;
1692 size_t zu;
1693 long long lld;
1694 long ld;
1695 Py_ssize_t zd;
1696 double d;
1697 unsigned char uc;
1698 void *p;
1699
1700 switch (fmt[0]) {
1701
1702 /* signed integers and fast path for 'B' */
1703 case 'B': uc = *((const unsigned char *)ptr); goto convert_uc;
1704 case 'b': ld = *((const signed char *)ptr); goto convert_ld;
1705 case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
1706 case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
1707 case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
1708
1709 /* boolean */
1710 case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
1711
1712 /* unsigned integers */
1713 case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
1714 case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
1715 case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
1716
1717 /* native 64-bit */
1718 case 'q': UNPACK_SINGLE(lld, ptr, long long); goto convert_lld;
1719 case 'Q': UNPACK_SINGLE(llu, ptr, unsigned long long); goto convert_llu;
1720
1721 /* ssize_t and size_t */
1722 case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
1723 case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
1724
1725 /* floats */
1726 case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
1727 case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
1728
1729 /* bytes object */
1730 case 'c': goto convert_bytes;
1731
1732 /* pointer */
1733 case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
1734
1735 /* default */
1736 default: goto err_format;
1737 }
1738
1739 convert_uc:
1740 /* PyLong_FromUnsignedLong() is slower */
1741 return PyLong_FromLong(uc);
1742 convert_ld:
1743 return PyLong_FromLong(ld);
1744 convert_lu:
1745 return PyLong_FromUnsignedLong(lu);
1746 convert_lld:
1747 return PyLong_FromLongLong(lld);
1748 convert_llu:
1749 return PyLong_FromUnsignedLongLong(llu);
1750 convert_zd:
1751 return PyLong_FromSsize_t(zd);
1752 convert_zu:
1753 return PyLong_FromSize_t(zu);
1754 convert_double:
1755 return PyFloat_FromDouble(d);
1756 convert_bool:
1757 return PyBool_FromLong(ld);
1758 convert_bytes:
1759 return PyBytes_FromStringAndSize(ptr, 1);
1760 convert_pointer:
1761 return PyLong_FromVoidPtr(p);
1762 err_format:
1763 PyErr_Format(PyExc_NotImplementedError,
1764 "memoryview: format %s not supported", fmt);
1765 return NULL;
1766 }
1767
1768 #define PACK_SINGLE(ptr, src, type) \
1769 do { \
1770 type x; \
1771 x = (type)src; \
1772 memcpy(ptr, (char *)&x, sizeof x); \
1773 } while (0)
1774
1775 /* Pack a single item. 'fmt' can be any native format character in
1776 struct module syntax. */
1777 static int
pack_single(char * ptr,PyObject * item,const char * fmt)1778 pack_single(char *ptr, PyObject *item, const char *fmt)
1779 {
1780 unsigned long long llu;
1781 unsigned long lu;
1782 size_t zu;
1783 long long lld;
1784 long ld;
1785 Py_ssize_t zd;
1786 double d;
1787 void *p;
1788
1789 switch (fmt[0]) {
1790 /* signed integers */
1791 case 'b': case 'h': case 'i': case 'l':
1792 ld = pylong_as_ld(item);
1793 if (ld == -1 && PyErr_Occurred())
1794 goto err_occurred;
1795 switch (fmt[0]) {
1796 case 'b':
1797 if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
1798 *((signed char *)ptr) = (signed char)ld; break;
1799 case 'h':
1800 if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
1801 PACK_SINGLE(ptr, ld, short); break;
1802 case 'i':
1803 if (ld < INT_MIN || ld > INT_MAX) goto err_range;
1804 PACK_SINGLE(ptr, ld, int); break;
1805 default: /* 'l' */
1806 PACK_SINGLE(ptr, ld, long); break;
1807 }
1808 break;
1809
1810 /* unsigned integers */
1811 case 'B': case 'H': case 'I': case 'L':
1812 lu = pylong_as_lu(item);
1813 if (lu == (unsigned long)-1 && PyErr_Occurred())
1814 goto err_occurred;
1815 switch (fmt[0]) {
1816 case 'B':
1817 if (lu > UCHAR_MAX) goto err_range;
1818 *((unsigned char *)ptr) = (unsigned char)lu; break;
1819 case 'H':
1820 if (lu > USHRT_MAX) goto err_range;
1821 PACK_SINGLE(ptr, lu, unsigned short); break;
1822 case 'I':
1823 if (lu > UINT_MAX) goto err_range;
1824 PACK_SINGLE(ptr, lu, unsigned int); break;
1825 default: /* 'L' */
1826 PACK_SINGLE(ptr, lu, unsigned long); break;
1827 }
1828 break;
1829
1830 /* native 64-bit */
1831 case 'q':
1832 lld = pylong_as_lld(item);
1833 if (lld == -1 && PyErr_Occurred())
1834 goto err_occurred;
1835 PACK_SINGLE(ptr, lld, long long);
1836 break;
1837 case 'Q':
1838 llu = pylong_as_llu(item);
1839 if (llu == (unsigned long long)-1 && PyErr_Occurred())
1840 goto err_occurred;
1841 PACK_SINGLE(ptr, llu, unsigned long long);
1842 break;
1843
1844 /* ssize_t and size_t */
1845 case 'n':
1846 zd = pylong_as_zd(item);
1847 if (zd == -1 && PyErr_Occurred())
1848 goto err_occurred;
1849 PACK_SINGLE(ptr, zd, Py_ssize_t);
1850 break;
1851 case 'N':
1852 zu = pylong_as_zu(item);
1853 if (zu == (size_t)-1 && PyErr_Occurred())
1854 goto err_occurred;
1855 PACK_SINGLE(ptr, zu, size_t);
1856 break;
1857
1858 /* floats */
1859 case 'f': case 'd':
1860 d = PyFloat_AsDouble(item);
1861 if (d == -1.0 && PyErr_Occurred())
1862 goto err_occurred;
1863 if (fmt[0] == 'f') {
1864 PACK_SINGLE(ptr, d, float);
1865 }
1866 else {
1867 PACK_SINGLE(ptr, d, double);
1868 }
1869 break;
1870
1871 /* bool */
1872 case '?':
1873 ld = PyObject_IsTrue(item);
1874 if (ld < 0)
1875 return -1; /* preserve original error */
1876 PACK_SINGLE(ptr, ld, _Bool);
1877 break;
1878
1879 /* bytes object */
1880 case 'c':
1881 if (!PyBytes_Check(item))
1882 return type_error_int(fmt);
1883 if (PyBytes_GET_SIZE(item) != 1)
1884 return value_error_int(fmt);
1885 *ptr = PyBytes_AS_STRING(item)[0];
1886 break;
1887
1888 /* pointer */
1889 case 'P':
1890 p = PyLong_AsVoidPtr(item);
1891 if (p == NULL && PyErr_Occurred())
1892 goto err_occurred;
1893 PACK_SINGLE(ptr, p, void *);
1894 break;
1895
1896 /* default */
1897 default: goto err_format;
1898 }
1899
1900 return 0;
1901
1902 err_occurred:
1903 return fix_error_int(fmt);
1904 err_range:
1905 return value_error_int(fmt);
1906 err_format:
1907 PyErr_Format(PyExc_NotImplementedError,
1908 "memoryview: format %s not supported", fmt);
1909 return -1;
1910 }
1911
1912
1913 /****************************************************************************/
1914 /* unpack using the struct module */
1915 /****************************************************************************/
1916
1917 /* For reasonable performance it is necessary to cache all objects required
1918 for unpacking. An unpacker can handle the format passed to unpack_from().
1919 Invariant: All pointer fields of the struct should either be NULL or valid
1920 pointers. */
1921 struct unpacker {
1922 PyObject *unpack_from; /* Struct.unpack_from(format) */
1923 PyObject *mview; /* cached memoryview */
1924 char *item; /* buffer for mview */
1925 Py_ssize_t itemsize; /* len(item) */
1926 };
1927
1928 static struct unpacker *
unpacker_new(void)1929 unpacker_new(void)
1930 {
1931 struct unpacker *x = PyMem_Malloc(sizeof *x);
1932
1933 if (x == NULL) {
1934 PyErr_NoMemory();
1935 return NULL;
1936 }
1937
1938 x->unpack_from = NULL;
1939 x->mview = NULL;
1940 x->item = NULL;
1941 x->itemsize = 0;
1942
1943 return x;
1944 }
1945
1946 static void
unpacker_free(struct unpacker * x)1947 unpacker_free(struct unpacker *x)
1948 {
1949 if (x) {
1950 Py_XDECREF(x->unpack_from);
1951 Py_XDECREF(x->mview);
1952 PyMem_Free(x->item);
1953 PyMem_Free(x);
1954 }
1955 }
1956
1957 /* Return a new unpacker for the given format. */
1958 static struct unpacker *
struct_get_unpacker(const char * fmt,Py_ssize_t itemsize)1959 struct_get_unpacker(const char *fmt, Py_ssize_t itemsize)
1960 {
1961 PyObject *structmodule; /* XXX cache these two */
1962 PyObject *Struct = NULL; /* XXX in globals? */
1963 PyObject *structobj = NULL;
1964 PyObject *format = NULL;
1965 struct unpacker *x = NULL;
1966
1967 structmodule = PyImport_ImportModule("struct");
1968 if (structmodule == NULL)
1969 return NULL;
1970
1971 Struct = PyObject_GetAttrString(structmodule, "Struct");
1972 Py_DECREF(structmodule);
1973 if (Struct == NULL)
1974 return NULL;
1975
1976 x = unpacker_new();
1977 if (x == NULL)
1978 goto error;
1979
1980 format = PyBytes_FromString(fmt);
1981 if (format == NULL)
1982 goto error;
1983
1984 structobj = PyObject_CallOneArg(Struct, format);
1985 if (structobj == NULL)
1986 goto error;
1987
1988 x->unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
1989 if (x->unpack_from == NULL)
1990 goto error;
1991
1992 x->item = PyMem_Malloc(itemsize);
1993 if (x->item == NULL) {
1994 PyErr_NoMemory();
1995 goto error;
1996 }
1997 x->itemsize = itemsize;
1998
1999 x->mview = PyMemoryView_FromMemory(x->item, itemsize, PyBUF_WRITE);
2000 if (x->mview == NULL)
2001 goto error;
2002
2003
2004 out:
2005 Py_XDECREF(Struct);
2006 Py_XDECREF(format);
2007 Py_XDECREF(structobj);
2008 return x;
2009
2010 error:
2011 unpacker_free(x);
2012 x = NULL;
2013 goto out;
2014 }
2015
2016 /* unpack a single item */
2017 static PyObject *
struct_unpack_single(const char * ptr,struct unpacker * x)2018 struct_unpack_single(const char *ptr, struct unpacker *x)
2019 {
2020 PyObject *v;
2021
2022 memcpy(x->item, ptr, x->itemsize);
2023 v = PyObject_CallOneArg(x->unpack_from, x->mview);
2024 if (v == NULL)
2025 return NULL;
2026
2027 if (PyTuple_GET_SIZE(v) == 1) {
2028 PyObject *tmp = PyTuple_GET_ITEM(v, 0);
2029 Py_INCREF(tmp);
2030 Py_DECREF(v);
2031 return tmp;
2032 }
2033
2034 return v;
2035 }
2036
2037
2038 /****************************************************************************/
2039 /* Representations */
2040 /****************************************************************************/
2041
2042 /* allow explicit form of native format */
2043 static inline const char *
adjust_fmt(const Py_buffer * view)2044 adjust_fmt(const Py_buffer *view)
2045 {
2046 const char *fmt;
2047
2048 fmt = (view->format[0] == '@') ? view->format+1 : view->format;
2049 if (fmt[0] && fmt[1] == '\0')
2050 return fmt;
2051
2052 PyErr_Format(PyExc_NotImplementedError,
2053 "memoryview: unsupported format %s", view->format);
2054 return NULL;
2055 }
2056
2057 /* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
2058 static PyObject *
tolist_base(const char * ptr,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2059 tolist_base(const char *ptr, const Py_ssize_t *shape,
2060 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2061 const char *fmt)
2062 {
2063 PyObject *lst, *item;
2064 Py_ssize_t i;
2065
2066 lst = PyList_New(shape[0]);
2067 if (lst == NULL)
2068 return NULL;
2069
2070 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2071 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2072 item = unpack_single(xptr, fmt);
2073 if (item == NULL) {
2074 Py_DECREF(lst);
2075 return NULL;
2076 }
2077 PyList_SET_ITEM(lst, i, item);
2078 }
2079
2080 return lst;
2081 }
2082
2083 /* Unpack a multi-dimensional array into a nested list.
2084 Assumption: ndim >= 1. */
2085 static PyObject *
tolist_rec(const char * ptr,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * strides,const Py_ssize_t * suboffsets,const char * fmt)2086 tolist_rec(const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
2087 const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
2088 const char *fmt)
2089 {
2090 PyObject *lst, *item;
2091 Py_ssize_t i;
2092
2093 assert(ndim >= 1);
2094 assert(shape != NULL);
2095 assert(strides != NULL);
2096
2097 if (ndim == 1)
2098 return tolist_base(ptr, shape, strides, suboffsets, fmt);
2099
2100 lst = PyList_New(shape[0]);
2101 if (lst == NULL)
2102 return NULL;
2103
2104 for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
2105 const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
2106 item = tolist_rec(xptr, ndim-1, shape+1,
2107 strides+1, suboffsets ? suboffsets+1 : NULL,
2108 fmt);
2109 if (item == NULL) {
2110 Py_DECREF(lst);
2111 return NULL;
2112 }
2113 PyList_SET_ITEM(lst, i, item);
2114 }
2115
2116 return lst;
2117 }
2118
2119 /* Return a list representation of the memoryview. Currently only buffers
2120 with native format strings are supported. */
2121 /*[clinic input]
2122 memoryview.tolist
2123
2124 Return the data in the buffer as a list of elements.
2125 [clinic start generated code]*/
2126
2127 static PyObject *
memoryview_tolist_impl(PyMemoryViewObject * self)2128 memoryview_tolist_impl(PyMemoryViewObject *self)
2129 /*[clinic end generated code: output=a6cda89214fd5a1b input=21e7d0c1860b211a]*/
2130 {
2131 const Py_buffer *view = &self->view;
2132 const char *fmt;
2133
2134 CHECK_RELEASED(self);
2135
2136 fmt = adjust_fmt(view);
2137 if (fmt == NULL)
2138 return NULL;
2139 if (view->ndim == 0) {
2140 return unpack_single(view->buf, fmt);
2141 }
2142 else if (view->ndim == 1) {
2143 return tolist_base(view->buf, view->shape,
2144 view->strides, view->suboffsets,
2145 fmt);
2146 }
2147 else {
2148 return tolist_rec(view->buf, view->ndim, view->shape,
2149 view->strides, view->suboffsets,
2150 fmt);
2151 }
2152 }
2153
2154 /*[clinic input]
2155 memoryview.tobytes
2156
2157 order: str(accept={str, NoneType}, c_default="NULL") = 'C'
2158
2159 Return the data in the buffer as a byte string.
2160
2161 Order can be {'C', 'F', 'A'}. When order is 'C' or 'F', the data of the
2162 original array is converted to C or Fortran order. For contiguous views,
2163 'A' returns an exact copy of the physical memory. In particular, in-memory
2164 Fortran order is preserved. For non-contiguous views, the data is converted
2165 to C first. order=None is the same as order='C'.
2166 [clinic start generated code]*/
2167
2168 static PyObject *
memoryview_tobytes_impl(PyMemoryViewObject * self,const char * order)2169 memoryview_tobytes_impl(PyMemoryViewObject *self, const char *order)
2170 /*[clinic end generated code: output=1288b62560a32a23 input=0efa3ddaeda573a8]*/
2171 {
2172 Py_buffer *src = VIEW_ADDR(self);
2173 char ord = 'C';
2174 PyObject *bytes;
2175
2176 CHECK_RELEASED(self);
2177
2178 if (order) {
2179 if (strcmp(order, "F") == 0) {
2180 ord = 'F';
2181 }
2182 else if (strcmp(order, "A") == 0) {
2183 ord = 'A';
2184 }
2185 else if (strcmp(order, "C") != 0) {
2186 PyErr_SetString(PyExc_ValueError,
2187 "order must be 'C', 'F' or 'A'");
2188 return NULL;
2189 }
2190 }
2191
2192 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2193 if (bytes == NULL)
2194 return NULL;
2195
2196 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, ord) < 0) {
2197 Py_DECREF(bytes);
2198 return NULL;
2199 }
2200
2201 return bytes;
2202 }
2203
2204 /*[clinic input]
2205 memoryview.hex
2206
2207 sep: object = NULL
2208 An optional single character or byte to separate hex bytes.
2209 bytes_per_sep: int = 1
2210 How many bytes between separators. Positive values count from the
2211 right, negative values count from the left.
2212
2213 Return the data in the buffer as a str of hexadecimal numbers.
2214
2215 Example:
2216 >>> value = memoryview(b'\xb9\x01\xef')
2217 >>> value.hex()
2218 'b901ef'
2219 >>> value.hex(':')
2220 'b9:01:ef'
2221 >>> value.hex(':', 2)
2222 'b9:01ef'
2223 >>> value.hex(':', -2)
2224 'b901:ef'
2225 [clinic start generated code]*/
2226
2227 static PyObject *
memoryview_hex_impl(PyMemoryViewObject * self,PyObject * sep,int bytes_per_sep)2228 memoryview_hex_impl(PyMemoryViewObject *self, PyObject *sep,
2229 int bytes_per_sep)
2230 /*[clinic end generated code: output=430ca760f94f3ca7 input=539f6a3a5fb56946]*/
2231 {
2232 Py_buffer *src = VIEW_ADDR(self);
2233 PyObject *bytes;
2234 PyObject *ret;
2235
2236 CHECK_RELEASED(self);
2237
2238 if (MV_C_CONTIGUOUS(self->flags)) {
2239 return _Py_strhex_with_sep(src->buf, src->len, sep, bytes_per_sep);
2240 }
2241
2242 bytes = PyBytes_FromStringAndSize(NULL, src->len);
2243 if (bytes == NULL)
2244 return NULL;
2245
2246 if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, 'C') < 0) {
2247 Py_DECREF(bytes);
2248 return NULL;
2249 }
2250
2251 ret = _Py_strhex_with_sep(
2252 PyBytes_AS_STRING(bytes), PyBytes_GET_SIZE(bytes),
2253 sep, bytes_per_sep);
2254 Py_DECREF(bytes);
2255
2256 return ret;
2257 }
2258
2259 static PyObject *
memory_repr(PyMemoryViewObject * self)2260 memory_repr(PyMemoryViewObject *self)
2261 {
2262 if (self->flags & _Py_MEMORYVIEW_RELEASED)
2263 return PyUnicode_FromFormat("<released memory at %p>", self);
2264 else
2265 return PyUnicode_FromFormat("<memory at %p>", self);
2266 }
2267
2268
2269 /**************************************************************************/
2270 /* Indexing and slicing */
2271 /**************************************************************************/
2272
2273 static char *
lookup_dimension(Py_buffer * view,char * ptr,int dim,Py_ssize_t index)2274 lookup_dimension(Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
2275 {
2276 Py_ssize_t nitems; /* items in the given dimension */
2277
2278 assert(view->shape);
2279 assert(view->strides);
2280
2281 nitems = view->shape[dim];
2282 if (index < 0) {
2283 index += nitems;
2284 }
2285 if (index < 0 || index >= nitems) {
2286 PyErr_Format(PyExc_IndexError,
2287 "index out of bounds on dimension %d", dim + 1);
2288 return NULL;
2289 }
2290
2291 ptr += view->strides[dim] * index;
2292
2293 ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
2294
2295 return ptr;
2296 }
2297
2298 /* Get the pointer to the item at index. */
2299 static char *
ptr_from_index(Py_buffer * view,Py_ssize_t index)2300 ptr_from_index(Py_buffer *view, Py_ssize_t index)
2301 {
2302 char *ptr = (char *)view->buf;
2303 return lookup_dimension(view, ptr, 0, index);
2304 }
2305
2306 /* Get the pointer to the item at tuple. */
2307 static char *
ptr_from_tuple(Py_buffer * view,PyObject * tup)2308 ptr_from_tuple(Py_buffer *view, PyObject *tup)
2309 {
2310 char *ptr = (char *)view->buf;
2311 Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
2312
2313 if (nindices > view->ndim) {
2314 PyErr_Format(PyExc_TypeError,
2315 "cannot index %zd-dimension view with %zd-element tuple",
2316 view->ndim, nindices);
2317 return NULL;
2318 }
2319
2320 for (dim = 0; dim < nindices; dim++) {
2321 Py_ssize_t index;
2322 index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
2323 PyExc_IndexError);
2324 if (index == -1 && PyErr_Occurred())
2325 return NULL;
2326 ptr = lookup_dimension(view, ptr, (int)dim, index);
2327 if (ptr == NULL)
2328 return NULL;
2329 }
2330 return ptr;
2331 }
2332
2333 /* Return the item at index. In a one-dimensional view, this is an object
2334 with the type specified by view->format. Otherwise, the item is a sub-view.
2335 The function is used in memory_subscript() and memory_as_sequence. */
2336 static PyObject *
memory_item(PyMemoryViewObject * self,Py_ssize_t index)2337 memory_item(PyMemoryViewObject *self, Py_ssize_t index)
2338 {
2339 Py_buffer *view = &(self->view);
2340 const char *fmt;
2341
2342 CHECK_RELEASED(self);
2343
2344 fmt = adjust_fmt(view);
2345 if (fmt == NULL)
2346 return NULL;
2347
2348 if (view->ndim == 0) {
2349 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
2350 return NULL;
2351 }
2352 if (view->ndim == 1) {
2353 char *ptr = ptr_from_index(view, index);
2354 if (ptr == NULL)
2355 return NULL;
2356 return unpack_single(ptr, fmt);
2357 }
2358
2359 PyErr_SetString(PyExc_NotImplementedError,
2360 "multi-dimensional sub-views are not implemented");
2361 return NULL;
2362 }
2363
2364 /* Return the item at position *key* (a tuple of indices). */
2365 static PyObject *
memory_item_multi(PyMemoryViewObject * self,PyObject * tup)2366 memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
2367 {
2368 Py_buffer *view = &(self->view);
2369 const char *fmt;
2370 Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
2371 char *ptr;
2372
2373 CHECK_RELEASED(self);
2374
2375 fmt = adjust_fmt(view);
2376 if (fmt == NULL)
2377 return NULL;
2378
2379 if (nindices < view->ndim) {
2380 PyErr_SetString(PyExc_NotImplementedError,
2381 "sub-views are not implemented");
2382 return NULL;
2383 }
2384 ptr = ptr_from_tuple(view, tup);
2385 if (ptr == NULL)
2386 return NULL;
2387 return unpack_single(ptr, fmt);
2388 }
2389
2390 static inline int
init_slice(Py_buffer * base,PyObject * key,int dim)2391 init_slice(Py_buffer *base, PyObject *key, int dim)
2392 {
2393 Py_ssize_t start, stop, step, slicelength;
2394
2395 if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
2396 return -1;
2397 }
2398 slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
2399
2400
2401 if (base->suboffsets == NULL || dim == 0) {
2402 adjust_buf:
2403 base->buf = (char *)base->buf + base->strides[dim] * start;
2404 }
2405 else {
2406 Py_ssize_t n = dim-1;
2407 while (n >= 0 && base->suboffsets[n] < 0)
2408 n--;
2409 if (n < 0)
2410 goto adjust_buf; /* all suboffsets are negative */
2411 base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
2412 }
2413 base->shape[dim] = slicelength;
2414 base->strides[dim] = base->strides[dim] * step;
2415
2416 return 0;
2417 }
2418
2419 static int
is_multislice(PyObject * key)2420 is_multislice(PyObject *key)
2421 {
2422 Py_ssize_t size, i;
2423
2424 if (!PyTuple_Check(key))
2425 return 0;
2426 size = PyTuple_GET_SIZE(key);
2427 if (size == 0)
2428 return 0;
2429
2430 for (i = 0; i < size; i++) {
2431 PyObject *x = PyTuple_GET_ITEM(key, i);
2432 if (!PySlice_Check(x))
2433 return 0;
2434 }
2435 return 1;
2436 }
2437
2438 static Py_ssize_t
is_multiindex(PyObject * key)2439 is_multiindex(PyObject *key)
2440 {
2441 Py_ssize_t size, i;
2442
2443 if (!PyTuple_Check(key))
2444 return 0;
2445 size = PyTuple_GET_SIZE(key);
2446 for (i = 0; i < size; i++) {
2447 PyObject *x = PyTuple_GET_ITEM(key, i);
2448 if (!_PyIndex_Check(x)) {
2449 return 0;
2450 }
2451 }
2452 return 1;
2453 }
2454
2455 /* mv[obj] returns an object holding the data for one element if obj
2456 fully indexes the memoryview or another memoryview object if it
2457 does not.
2458
2459 0-d memoryview objects can be referenced using mv[...] or mv[()]
2460 but not with anything else. */
2461 static PyObject *
memory_subscript(PyMemoryViewObject * self,PyObject * key)2462 memory_subscript(PyMemoryViewObject *self, PyObject *key)
2463 {
2464 Py_buffer *view;
2465 view = &(self->view);
2466
2467 CHECK_RELEASED(self);
2468
2469 if (view->ndim == 0) {
2470 if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
2471 const char *fmt = adjust_fmt(view);
2472 if (fmt == NULL)
2473 return NULL;
2474 return unpack_single(view->buf, fmt);
2475 }
2476 else if (key == Py_Ellipsis) {
2477 Py_INCREF(self);
2478 return (PyObject *)self;
2479 }
2480 else {
2481 PyErr_SetString(PyExc_TypeError,
2482 "invalid indexing of 0-dim memory");
2483 return NULL;
2484 }
2485 }
2486
2487 if (_PyIndex_Check(key)) {
2488 Py_ssize_t index;
2489 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2490 if (index == -1 && PyErr_Occurred())
2491 return NULL;
2492 return memory_item(self, index);
2493 }
2494 else if (PySlice_Check(key)) {
2495 PyMemoryViewObject *sliced;
2496
2497 sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
2498 if (sliced == NULL)
2499 return NULL;
2500
2501 if (init_slice(&sliced->view, key, 0) < 0) {
2502 Py_DECREF(sliced);
2503 return NULL;
2504 }
2505 init_len(&sliced->view);
2506 init_flags(sliced);
2507
2508 return (PyObject *)sliced;
2509 }
2510 else if (is_multiindex(key)) {
2511 return memory_item_multi(self, key);
2512 }
2513 else if (is_multislice(key)) {
2514 PyErr_SetString(PyExc_NotImplementedError,
2515 "multi-dimensional slicing is not implemented");
2516 return NULL;
2517 }
2518
2519 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2520 return NULL;
2521 }
2522
2523 static int
memory_ass_sub(PyMemoryViewObject * self,PyObject * key,PyObject * value)2524 memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
2525 {
2526 Py_buffer *view = &(self->view);
2527 Py_buffer src;
2528 const char *fmt;
2529 char *ptr;
2530
2531 CHECK_RELEASED_INT(self);
2532
2533 fmt = adjust_fmt(view);
2534 if (fmt == NULL)
2535 return -1;
2536
2537 if (view->readonly) {
2538 PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
2539 return -1;
2540 }
2541 if (value == NULL) {
2542 PyErr_SetString(PyExc_TypeError, "cannot delete memory");
2543 return -1;
2544 }
2545 if (view->ndim == 0) {
2546 if (key == Py_Ellipsis ||
2547 (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
2548 ptr = (char *)view->buf;
2549 return pack_single(ptr, value, fmt);
2550 }
2551 else {
2552 PyErr_SetString(PyExc_TypeError,
2553 "invalid indexing of 0-dim memory");
2554 return -1;
2555 }
2556 }
2557
2558 if (_PyIndex_Check(key)) {
2559 Py_ssize_t index;
2560 if (1 < view->ndim) {
2561 PyErr_SetString(PyExc_NotImplementedError,
2562 "sub-views are not implemented");
2563 return -1;
2564 }
2565 index = PyNumber_AsSsize_t(key, PyExc_IndexError);
2566 if (index == -1 && PyErr_Occurred())
2567 return -1;
2568 ptr = ptr_from_index(view, index);
2569 if (ptr == NULL)
2570 return -1;
2571 return pack_single(ptr, value, fmt);
2572 }
2573 /* one-dimensional: fast path */
2574 if (PySlice_Check(key) && view->ndim == 1) {
2575 Py_buffer dest; /* sliced view */
2576 Py_ssize_t arrays[3];
2577 int ret = -1;
2578
2579 /* rvalue must be an exporter */
2580 if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
2581 return ret;
2582
2583 dest = *view;
2584 dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
2585 dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
2586 if (view->suboffsets) {
2587 dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
2588 }
2589
2590 if (init_slice(&dest, key, 0) < 0)
2591 goto end_block;
2592 dest.len = dest.shape[0] * dest.itemsize;
2593
2594 ret = copy_single(&dest, &src);
2595
2596 end_block:
2597 PyBuffer_Release(&src);
2598 return ret;
2599 }
2600 if (is_multiindex(key)) {
2601 char *ptr;
2602 if (PyTuple_GET_SIZE(key) < view->ndim) {
2603 PyErr_SetString(PyExc_NotImplementedError,
2604 "sub-views are not implemented");
2605 return -1;
2606 }
2607 ptr = ptr_from_tuple(view, key);
2608 if (ptr == NULL)
2609 return -1;
2610 return pack_single(ptr, value, fmt);
2611 }
2612 if (PySlice_Check(key) || is_multislice(key)) {
2613 /* Call memory_subscript() to produce a sliced lvalue, then copy
2614 rvalue into lvalue. This is already implemented in _testbuffer.c. */
2615 PyErr_SetString(PyExc_NotImplementedError,
2616 "memoryview slice assignments are currently restricted "
2617 "to ndim = 1");
2618 return -1;
2619 }
2620
2621 PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
2622 return -1;
2623 }
2624
2625 static Py_ssize_t
memory_length(PyMemoryViewObject * self)2626 memory_length(PyMemoryViewObject *self)
2627 {
2628 CHECK_RELEASED_INT(self);
2629 return self->view.ndim == 0 ? 1 : self->view.shape[0];
2630 }
2631
2632 /* As mapping */
2633 static PyMappingMethods memory_as_mapping = {
2634 (lenfunc)memory_length, /* mp_length */
2635 (binaryfunc)memory_subscript, /* mp_subscript */
2636 (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
2637 };
2638
2639 /* As sequence */
2640 static PySequenceMethods memory_as_sequence = {
2641 (lenfunc)memory_length, /* sq_length */
2642 0, /* sq_concat */
2643 0, /* sq_repeat */
2644 (ssizeargfunc)memory_item, /* sq_item */
2645 };
2646
2647
2648 /**************************************************************************/
2649 /* Comparisons */
2650 /**************************************************************************/
2651
2652 #define MV_COMPARE_EX -1 /* exception */
2653 #define MV_COMPARE_NOT_IMPL -2 /* not implemented */
2654
2655 /* Translate a StructError to "not equal". Preserve other exceptions. */
2656 static int
fix_struct_error_int(void)2657 fix_struct_error_int(void)
2658 {
2659 assert(PyErr_Occurred());
2660 /* XXX Cannot get at StructError directly? */
2661 if (PyErr_ExceptionMatches(PyExc_ImportError) ||
2662 PyErr_ExceptionMatches(PyExc_MemoryError)) {
2663 return MV_COMPARE_EX;
2664 }
2665 /* StructError: invalid or unknown format -> not equal */
2666 PyErr_Clear();
2667 return 0;
2668 }
2669
2670 /* Unpack and compare single items of p and q using the struct module. */
2671 static int
struct_unpack_cmp(const char * p,const char * q,struct unpacker * unpack_p,struct unpacker * unpack_q)2672 struct_unpack_cmp(const char *p, const char *q,
2673 struct unpacker *unpack_p, struct unpacker *unpack_q)
2674 {
2675 PyObject *v, *w;
2676 int ret;
2677
2678 /* At this point any exception from the struct module should not be
2679 StructError, since both formats have been accepted already. */
2680 v = struct_unpack_single(p, unpack_p);
2681 if (v == NULL)
2682 return MV_COMPARE_EX;
2683
2684 w = struct_unpack_single(q, unpack_q);
2685 if (w == NULL) {
2686 Py_DECREF(v);
2687 return MV_COMPARE_EX;
2688 }
2689
2690 /* MV_COMPARE_EX == -1: exceptions are preserved */
2691 ret = PyObject_RichCompareBool(v, w, Py_EQ);
2692 Py_DECREF(v);
2693 Py_DECREF(w);
2694
2695 return ret;
2696 }
2697
2698 /* Unpack and compare single items of p and q. If both p and q have the same
2699 single element native format, the comparison uses a fast path (gcc creates
2700 a jump table and converts memcpy into simple assignments on x86/x64).
2701
2702 Otherwise, the comparison is delegated to the struct module, which is
2703 30-60x slower. */
2704 #define CMP_SINGLE(p, q, type) \
2705 do { \
2706 type x; \
2707 type y; \
2708 memcpy((char *)&x, p, sizeof x); \
2709 memcpy((char *)&y, q, sizeof y); \
2710 equal = (x == y); \
2711 } while (0)
2712
2713 static inline int
unpack_cmp(const char * p,const char * q,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2714 unpack_cmp(const char *p, const char *q, char fmt,
2715 struct unpacker *unpack_p, struct unpacker *unpack_q)
2716 {
2717 int equal;
2718
2719 switch (fmt) {
2720
2721 /* signed integers and fast path for 'B' */
2722 case 'B': return *((const unsigned char *)p) == *((const unsigned char *)q);
2723 case 'b': return *((const signed char *)p) == *((const signed char *)q);
2724 case 'h': CMP_SINGLE(p, q, short); return equal;
2725 case 'i': CMP_SINGLE(p, q, int); return equal;
2726 case 'l': CMP_SINGLE(p, q, long); return equal;
2727
2728 /* boolean */
2729 case '?': CMP_SINGLE(p, q, _Bool); return equal;
2730
2731 /* unsigned integers */
2732 case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
2733 case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
2734 case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
2735
2736 /* native 64-bit */
2737 case 'q': CMP_SINGLE(p, q, long long); return equal;
2738 case 'Q': CMP_SINGLE(p, q, unsigned long long); return equal;
2739
2740 /* ssize_t and size_t */
2741 case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
2742 case 'N': CMP_SINGLE(p, q, size_t); return equal;
2743
2744 /* floats */
2745 /* XXX DBL_EPSILON? */
2746 case 'f': CMP_SINGLE(p, q, float); return equal;
2747 case 'd': CMP_SINGLE(p, q, double); return equal;
2748
2749 /* bytes object */
2750 case 'c': return *p == *q;
2751
2752 /* pointer */
2753 case 'P': CMP_SINGLE(p, q, void *); return equal;
2754
2755 /* use the struct module */
2756 case '_':
2757 assert(unpack_p);
2758 assert(unpack_q);
2759 return struct_unpack_cmp(p, q, unpack_p, unpack_q);
2760 }
2761
2762 /* NOT REACHED */
2763 PyErr_SetString(PyExc_RuntimeError,
2764 "memoryview: internal error in richcompare");
2765 return MV_COMPARE_EX;
2766 }
2767
2768 /* Base case for recursive array comparisons. Assumption: ndim == 1. */
2769 static int
cmp_base(const char * p,const char * q,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2770 cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
2771 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2772 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2773 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2774 {
2775 Py_ssize_t i;
2776 int equal;
2777
2778 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2779 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2780 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2781 equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
2782 if (equal <= 0)
2783 return equal;
2784 }
2785
2786 return 1;
2787 }
2788
2789 /* Recursively compare two multi-dimensional arrays that have the same
2790 logical structure. Assumption: ndim >= 1. */
2791 static int
cmp_rec(const char * p,const char * q,Py_ssize_t ndim,const Py_ssize_t * shape,const Py_ssize_t * pstrides,const Py_ssize_t * psuboffsets,const Py_ssize_t * qstrides,const Py_ssize_t * qsuboffsets,char fmt,struct unpacker * unpack_p,struct unpacker * unpack_q)2792 cmp_rec(const char *p, const char *q,
2793 Py_ssize_t ndim, const Py_ssize_t *shape,
2794 const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
2795 const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
2796 char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
2797 {
2798 Py_ssize_t i;
2799 int equal;
2800
2801 assert(ndim >= 1);
2802 assert(shape != NULL);
2803 assert(pstrides != NULL);
2804 assert(qstrides != NULL);
2805
2806 if (ndim == 1) {
2807 return cmp_base(p, q, shape,
2808 pstrides, psuboffsets,
2809 qstrides, qsuboffsets,
2810 fmt, unpack_p, unpack_q);
2811 }
2812
2813 for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
2814 const char *xp = ADJUST_PTR(p, psuboffsets, 0);
2815 const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
2816 equal = cmp_rec(xp, xq, ndim-1, shape+1,
2817 pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
2818 qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
2819 fmt, unpack_p, unpack_q);
2820 if (equal <= 0)
2821 return equal;
2822 }
2823
2824 return 1;
2825 }
2826
2827 static PyObject *
memory_richcompare(PyObject * v,PyObject * w,int op)2828 memory_richcompare(PyObject *v, PyObject *w, int op)
2829 {
2830 PyObject *res;
2831 Py_buffer wbuf, *vv;
2832 Py_buffer *ww = NULL;
2833 struct unpacker *unpack_v = NULL;
2834 struct unpacker *unpack_w = NULL;
2835 char vfmt, wfmt;
2836 int equal = MV_COMPARE_NOT_IMPL;
2837
2838 if (op != Py_EQ && op != Py_NE)
2839 goto result; /* Py_NotImplemented */
2840
2841 assert(PyMemoryView_Check(v));
2842 if (BASE_INACCESSIBLE(v)) {
2843 equal = (v == w);
2844 goto result;
2845 }
2846 vv = VIEW_ADDR(v);
2847
2848 if (PyMemoryView_Check(w)) {
2849 if (BASE_INACCESSIBLE(w)) {
2850 equal = (v == w);
2851 goto result;
2852 }
2853 ww = VIEW_ADDR(w);
2854 }
2855 else {
2856 if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
2857 PyErr_Clear();
2858 goto result; /* Py_NotImplemented */
2859 }
2860 ww = &wbuf;
2861 }
2862
2863 if (!equiv_shape(vv, ww)) {
2864 PyErr_Clear();
2865 equal = 0;
2866 goto result;
2867 }
2868
2869 /* Use fast unpacking for identical primitive C type formats. */
2870 if (get_native_fmtchar(&vfmt, vv->format) < 0)
2871 vfmt = '_';
2872 if (get_native_fmtchar(&wfmt, ww->format) < 0)
2873 wfmt = '_';
2874 if (vfmt == '_' || wfmt == '_' || vfmt != wfmt) {
2875 /* Use struct module unpacking. NOTE: Even for equal format strings,
2876 memcmp() cannot be used for item comparison since it would give
2877 incorrect results in the case of NaNs or uninitialized padding
2878 bytes. */
2879 vfmt = '_';
2880 unpack_v = struct_get_unpacker(vv->format, vv->itemsize);
2881 if (unpack_v == NULL) {
2882 equal = fix_struct_error_int();
2883 goto result;
2884 }
2885 unpack_w = struct_get_unpacker(ww->format, ww->itemsize);
2886 if (unpack_w == NULL) {
2887 equal = fix_struct_error_int();
2888 goto result;
2889 }
2890 }
2891
2892 if (vv->ndim == 0) {
2893 equal = unpack_cmp(vv->buf, ww->buf,
2894 vfmt, unpack_v, unpack_w);
2895 }
2896 else if (vv->ndim == 1) {
2897 equal = cmp_base(vv->buf, ww->buf, vv->shape,
2898 vv->strides, vv->suboffsets,
2899 ww->strides, ww->suboffsets,
2900 vfmt, unpack_v, unpack_w);
2901 }
2902 else {
2903 equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
2904 vv->strides, vv->suboffsets,
2905 ww->strides, ww->suboffsets,
2906 vfmt, unpack_v, unpack_w);
2907 }
2908
2909 result:
2910 if (equal < 0) {
2911 if (equal == MV_COMPARE_NOT_IMPL)
2912 res = Py_NotImplemented;
2913 else /* exception */
2914 res = NULL;
2915 }
2916 else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
2917 res = Py_True;
2918 else
2919 res = Py_False;
2920
2921 if (ww == &wbuf)
2922 PyBuffer_Release(ww);
2923
2924 unpacker_free(unpack_v);
2925 unpacker_free(unpack_w);
2926
2927 Py_XINCREF(res);
2928 return res;
2929 }
2930
2931 /**************************************************************************/
2932 /* Hash */
2933 /**************************************************************************/
2934
2935 static Py_hash_t
memory_hash(PyMemoryViewObject * self)2936 memory_hash(PyMemoryViewObject *self)
2937 {
2938 if (self->hash == -1) {
2939 Py_buffer *view = &self->view;
2940 char *mem = view->buf;
2941 Py_ssize_t ret;
2942 char fmt;
2943
2944 CHECK_RELEASED_INT(self);
2945
2946 if (!view->readonly) {
2947 PyErr_SetString(PyExc_ValueError,
2948 "cannot hash writable memoryview object");
2949 return -1;
2950 }
2951 ret = get_native_fmtchar(&fmt, view->format);
2952 if (ret < 0 || !IS_BYTE_FORMAT(fmt)) {
2953 PyErr_SetString(PyExc_ValueError,
2954 "memoryview: hashing is restricted to formats 'B', 'b' or 'c'");
2955 return -1;
2956 }
2957 if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
2958 /* Keep the original error message */
2959 return -1;
2960 }
2961
2962 if (!MV_C_CONTIGUOUS(self->flags)) {
2963 mem = PyMem_Malloc(view->len);
2964 if (mem == NULL) {
2965 PyErr_NoMemory();
2966 return -1;
2967 }
2968 if (buffer_to_contiguous(mem, view, 'C') < 0) {
2969 PyMem_Free(mem);
2970 return -1;
2971 }
2972 }
2973
2974 /* Can't fail */
2975 self->hash = _Py_HashBytes(mem, view->len);
2976
2977 if (mem != view->buf)
2978 PyMem_Free(mem);
2979 }
2980
2981 return self->hash;
2982 }
2983
2984
2985 /**************************************************************************/
2986 /* getters */
2987 /**************************************************************************/
2988
2989 static PyObject *
_IntTupleFromSsizet(int len,Py_ssize_t * vals)2990 _IntTupleFromSsizet(int len, Py_ssize_t *vals)
2991 {
2992 int i;
2993 PyObject *o;
2994 PyObject *intTuple;
2995
2996 if (vals == NULL)
2997 return PyTuple_New(0);
2998
2999 intTuple = PyTuple_New(len);
3000 if (!intTuple)
3001 return NULL;
3002 for (i=0; i<len; i++) {
3003 o = PyLong_FromSsize_t(vals[i]);
3004 if (!o) {
3005 Py_DECREF(intTuple);
3006 return NULL;
3007 }
3008 PyTuple_SET_ITEM(intTuple, i, o);
3009 }
3010 return intTuple;
3011 }
3012
3013 static PyObject *
memory_obj_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3014 memory_obj_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3015 {
3016 Py_buffer *view = &self->view;
3017
3018 CHECK_RELEASED(self);
3019 if (view->obj == NULL) {
3020 Py_RETURN_NONE;
3021 }
3022 Py_INCREF(view->obj);
3023 return view->obj;
3024 }
3025
3026 static PyObject *
memory_nbytes_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3027 memory_nbytes_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3028 {
3029 CHECK_RELEASED(self);
3030 return PyLong_FromSsize_t(self->view.len);
3031 }
3032
3033 static PyObject *
memory_format_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3034 memory_format_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3035 {
3036 CHECK_RELEASED(self);
3037 return PyUnicode_FromString(self->view.format);
3038 }
3039
3040 static PyObject *
memory_itemsize_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3041 memory_itemsize_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3042 {
3043 CHECK_RELEASED(self);
3044 return PyLong_FromSsize_t(self->view.itemsize);
3045 }
3046
3047 static PyObject *
memory_shape_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3048 memory_shape_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3049 {
3050 CHECK_RELEASED(self);
3051 return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
3052 }
3053
3054 static PyObject *
memory_strides_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3055 memory_strides_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3056 {
3057 CHECK_RELEASED(self);
3058 return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
3059 }
3060
3061 static PyObject *
memory_suboffsets_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3062 memory_suboffsets_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3063 {
3064 CHECK_RELEASED(self);
3065 return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
3066 }
3067
3068 static PyObject *
memory_readonly_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3069 memory_readonly_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3070 {
3071 CHECK_RELEASED(self);
3072 return PyBool_FromLong(self->view.readonly);
3073 }
3074
3075 static PyObject *
memory_ndim_get(PyMemoryViewObject * self,void * Py_UNUSED (ignored))3076 memory_ndim_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
3077 {
3078 CHECK_RELEASED(self);
3079 return PyLong_FromLong(self->view.ndim);
3080 }
3081
3082 static PyObject *
memory_c_contiguous(PyMemoryViewObject * self,PyObject * dummy)3083 memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3084 {
3085 CHECK_RELEASED(self);
3086 return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
3087 }
3088
3089 static PyObject *
memory_f_contiguous(PyMemoryViewObject * self,PyObject * dummy)3090 memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3091 {
3092 CHECK_RELEASED(self);
3093 return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
3094 }
3095
3096 static PyObject *
memory_contiguous(PyMemoryViewObject * self,PyObject * dummy)3097 memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
3098 {
3099 CHECK_RELEASED(self);
3100 return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
3101 }
3102
3103 PyDoc_STRVAR(memory_obj_doc,
3104 "The underlying object of the memoryview.");
3105 PyDoc_STRVAR(memory_nbytes_doc,
3106 "The amount of space in bytes that the array would use in\n"
3107 " a contiguous representation.");
3108 PyDoc_STRVAR(memory_readonly_doc,
3109 "A bool indicating whether the memory is read only.");
3110 PyDoc_STRVAR(memory_itemsize_doc,
3111 "The size in bytes of each element of the memoryview.");
3112 PyDoc_STRVAR(memory_format_doc,
3113 "A string containing the format (in struct module style)\n"
3114 " for each element in the view.");
3115 PyDoc_STRVAR(memory_ndim_doc,
3116 "An integer indicating how many dimensions of a multi-dimensional\n"
3117 " array the memory represents.");
3118 PyDoc_STRVAR(memory_shape_doc,
3119 "A tuple of ndim integers giving the shape of the memory\n"
3120 " as an N-dimensional array.");
3121 PyDoc_STRVAR(memory_strides_doc,
3122 "A tuple of ndim integers giving the size in bytes to access\n"
3123 " each element for each dimension of the array.");
3124 PyDoc_STRVAR(memory_suboffsets_doc,
3125 "A tuple of integers used internally for PIL-style arrays.");
3126 PyDoc_STRVAR(memory_c_contiguous_doc,
3127 "A bool indicating whether the memory is C contiguous.");
3128 PyDoc_STRVAR(memory_f_contiguous_doc,
3129 "A bool indicating whether the memory is Fortran contiguous.");
3130 PyDoc_STRVAR(memory_contiguous_doc,
3131 "A bool indicating whether the memory is contiguous.");
3132
3133
3134 static PyGetSetDef memory_getsetlist[] = {
3135 {"obj", (getter)memory_obj_get, NULL, memory_obj_doc},
3136 {"nbytes", (getter)memory_nbytes_get, NULL, memory_nbytes_doc},
3137 {"readonly", (getter)memory_readonly_get, NULL, memory_readonly_doc},
3138 {"itemsize", (getter)memory_itemsize_get, NULL, memory_itemsize_doc},
3139 {"format", (getter)memory_format_get, NULL, memory_format_doc},
3140 {"ndim", (getter)memory_ndim_get, NULL, memory_ndim_doc},
3141 {"shape", (getter)memory_shape_get, NULL, memory_shape_doc},
3142 {"strides", (getter)memory_strides_get, NULL, memory_strides_doc},
3143 {"suboffsets", (getter)memory_suboffsets_get, NULL, memory_suboffsets_doc},
3144 {"c_contiguous", (getter)memory_c_contiguous, NULL, memory_c_contiguous_doc},
3145 {"f_contiguous", (getter)memory_f_contiguous, NULL, memory_f_contiguous_doc},
3146 {"contiguous", (getter)memory_contiguous, NULL, memory_contiguous_doc},
3147 {NULL, NULL, NULL, NULL},
3148 };
3149
3150
3151 static PyMethodDef memory_methods[] = {
3152 MEMORYVIEW_RELEASE_METHODDEF
3153 MEMORYVIEW_TOBYTES_METHODDEF
3154 MEMORYVIEW_HEX_METHODDEF
3155 MEMORYVIEW_TOLIST_METHODDEF
3156 MEMORYVIEW_CAST_METHODDEF
3157 MEMORYVIEW_TOREADONLY_METHODDEF
3158 {"__enter__", memory_enter, METH_NOARGS, NULL},
3159 {"__exit__", memory_exit, METH_VARARGS, NULL},
3160 {NULL, NULL}
3161 };
3162
3163 /**************************************************************************/
3164 /* Memoryview Iterator */
3165 /**************************************************************************/
3166
3167 static PyTypeObject PyMemoryIter_Type;
3168
3169 typedef struct {
3170 PyObject_HEAD
3171 Py_ssize_t it_index;
3172 PyMemoryViewObject *it_seq; // Set to NULL when iterator is exhausted
3173 Py_ssize_t it_length;
3174 const char *it_fmt;
3175 } memoryiterobject;
3176
3177 static void
memoryiter_dealloc(memoryiterobject * it)3178 memoryiter_dealloc(memoryiterobject *it)
3179 {
3180 _PyObject_GC_UNTRACK(it);
3181 Py_XDECREF(it->it_seq);
3182 PyObject_GC_Del(it);
3183 }
3184
3185 static int
memoryiter_traverse(memoryiterobject * it,visitproc visit,void * arg)3186 memoryiter_traverse(memoryiterobject *it, visitproc visit, void *arg)
3187 {
3188 Py_VISIT(it->it_seq);
3189 return 0;
3190 }
3191
3192 static PyObject *
memoryiter_next(memoryiterobject * it)3193 memoryiter_next(memoryiterobject *it)
3194 {
3195 PyMemoryViewObject *seq;
3196 seq = it->it_seq;
3197 if (seq == NULL) {
3198 return NULL;
3199 }
3200
3201 if (it->it_index < it->it_length) {
3202 CHECK_RELEASED(seq);
3203 Py_buffer *view = &(seq->view);
3204 char *ptr = (char *)seq->view.buf;
3205
3206 ptr += view->strides[0] * it->it_index++;
3207 ptr = ADJUST_PTR(ptr, view->suboffsets, 0);
3208 if (ptr == NULL) {
3209 return NULL;
3210 }
3211 return unpack_single(ptr, it->it_fmt);
3212 }
3213
3214 it->it_seq = NULL;
3215 Py_DECREF(seq);
3216 return NULL;
3217 }
3218
3219 static PyObject *
memory_iter(PyObject * seq)3220 memory_iter(PyObject *seq)
3221 {
3222 if (!PyMemoryView_Check(seq)) {
3223 PyErr_BadInternalCall();
3224 return NULL;
3225 }
3226 PyMemoryViewObject *obj = (PyMemoryViewObject *)seq;
3227 int ndims = obj->view.ndim;
3228 if (ndims == 0) {
3229 PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
3230 return NULL;
3231 }
3232 if (ndims != 1) {
3233 PyErr_SetString(PyExc_NotImplementedError,
3234 "multi-dimensional sub-views are not implemented");
3235 return NULL;
3236 }
3237
3238 const char *fmt = adjust_fmt(&obj->view);
3239 if (fmt == NULL) {
3240 return NULL;
3241 }
3242
3243 memoryiterobject *it;
3244 it = PyObject_GC_New(memoryiterobject, &PyMemoryIter_Type);
3245 if (it == NULL) {
3246 return NULL;
3247 }
3248 it->it_fmt = fmt;
3249 it->it_length = memory_length(obj);
3250 it->it_index = 0;
3251 Py_INCREF(seq);
3252 it->it_seq = obj;
3253 _PyObject_GC_TRACK(it);
3254 return (PyObject *)it;
3255 }
3256
3257 static PyTypeObject PyMemoryIter_Type = {
3258 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3259 .tp_name = "memory_iterator",
3260 .tp_basicsize = sizeof(memoryiterobject),
3261 // methods
3262 .tp_dealloc = (destructor)memoryiter_dealloc,
3263 .tp_getattro = PyObject_GenericGetAttr,
3264 .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
3265 .tp_traverse = (traverseproc)memoryiter_traverse,
3266 .tp_iter = PyObject_SelfIter,
3267 .tp_iternext = (iternextfunc)memoryiter_next,
3268 };
3269
3270 PyTypeObject PyMemoryView_Type = {
3271 PyVarObject_HEAD_INIT(&PyType_Type, 0)
3272 "memoryview", /* tp_name */
3273 offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
3274 sizeof(Py_ssize_t), /* tp_itemsize */
3275 (destructor)memory_dealloc, /* tp_dealloc */
3276 0, /* tp_vectorcall_offset */
3277 0, /* tp_getattr */
3278 0, /* tp_setattr */
3279 0, /* tp_as_async */
3280 (reprfunc)memory_repr, /* tp_repr */
3281 0, /* tp_as_number */
3282 &memory_as_sequence, /* tp_as_sequence */
3283 &memory_as_mapping, /* tp_as_mapping */
3284 (hashfunc)memory_hash, /* tp_hash */
3285 0, /* tp_call */
3286 0, /* tp_str */
3287 PyObject_GenericGetAttr, /* tp_getattro */
3288 0, /* tp_setattro */
3289 &memory_as_buffer, /* tp_as_buffer */
3290 Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
3291 Py_TPFLAGS_SEQUENCE, /* tp_flags */
3292 memoryview__doc__, /* tp_doc */
3293 (traverseproc)memory_traverse, /* tp_traverse */
3294 (inquiry)memory_clear, /* tp_clear */
3295 memory_richcompare, /* tp_richcompare */
3296 offsetof(PyMemoryViewObject, weakreflist),/* tp_weaklistoffset */
3297 memory_iter, /* tp_iter */
3298 0, /* tp_iternext */
3299 memory_methods, /* tp_methods */
3300 0, /* tp_members */
3301 memory_getsetlist, /* tp_getset */
3302 0, /* tp_base */
3303 0, /* tp_dict */
3304 0, /* tp_descr_get */
3305 0, /* tp_descr_set */
3306 0, /* tp_dictoffset */
3307 0, /* tp_init */
3308 0, /* tp_alloc */
3309 memoryview, /* tp_new */
3310 };
3311