aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/python3/Objects/memoryobject.c
diff options
context:
space:
mode:
authorAlexSm <alex@ydb.tech>2024-03-05 10:40:59 +0100
committerGitHub <noreply@github.com>2024-03-05 12:40:59 +0300
commit1ac13c847b5358faba44dbb638a828e24369467b (patch)
tree07672b4dd3604ad3dee540a02c6494cb7d10dc3d /contrib/tools/python3/Objects/memoryobject.c
parentffcca3e7f7958ddc6487b91d3df8c01054bd0638 (diff)
downloadydb-1ac13c847b5358faba44dbb638a828e24369467b.tar.gz
Library import 16 (#2433)
Co-authored-by: robot-piglet <robot-piglet@yandex-team.com> Co-authored-by: deshevoy <deshevoy@yandex-team.com> Co-authored-by: robot-contrib <robot-contrib@yandex-team.com> Co-authored-by: thegeorg <thegeorg@yandex-team.com> Co-authored-by: robot-ya-builder <robot-ya-builder@yandex-team.com> Co-authored-by: svidyuk <svidyuk@yandex-team.com> Co-authored-by: shadchin <shadchin@yandex-team.com> Co-authored-by: robot-ratatosk <robot-ratatosk@yandex-team.com> Co-authored-by: innokentii <innokentii@yandex-team.com> Co-authored-by: arkady-e1ppa <arkady-e1ppa@yandex-team.com> Co-authored-by: snermolaev <snermolaev@yandex-team.com> Co-authored-by: dimdim11 <dimdim11@yandex-team.com> Co-authored-by: kickbutt <kickbutt@yandex-team.com> Co-authored-by: abdullinsaid <abdullinsaid@yandex-team.com> Co-authored-by: korsunandrei <korsunandrei@yandex-team.com> Co-authored-by: petrk <petrk@yandex-team.com> Co-authored-by: miroslav2 <miroslav2@yandex-team.com> Co-authored-by: serjflint <serjflint@yandex-team.com> Co-authored-by: akhropov <akhropov@yandex-team.com> Co-authored-by: prettyboy <prettyboy@yandex-team.com> Co-authored-by: ilikepugs <ilikepugs@yandex-team.com> Co-authored-by: hiddenpath <hiddenpath@yandex-team.com> Co-authored-by: mikhnenko <mikhnenko@yandex-team.com> Co-authored-by: spreis <spreis@yandex-team.com> Co-authored-by: andreyshspb <andreyshspb@yandex-team.com> Co-authored-by: dimaandreev <dimaandreev@yandex-team.com> Co-authored-by: rashid <rashid@yandex-team.com> Co-authored-by: robot-ydb-importer <robot-ydb-importer@yandex-team.com> Co-authored-by: r-vetrov <r-vetrov@yandex-team.com> Co-authored-by: ypodlesov <ypodlesov@yandex-team.com> Co-authored-by: zaverden <zaverden@yandex-team.com> Co-authored-by: vpozdyayev <vpozdyayev@yandex-team.com> Co-authored-by: robot-cozmo <robot-cozmo@yandex-team.com> Co-authored-by: v-korovin <v-korovin@yandex-team.com> Co-authored-by: arikon <arikon@yandex-team.com> Co-authored-by: khoden <khoden@yandex-team.com> Co-authored-by: psydmm <psydmm@yandex-team.com> Co-authored-by: robot-javacom <robot-javacom@yandex-team.com> Co-authored-by: dtorilov <dtorilov@yandex-team.com> Co-authored-by: sennikovmv <sennikovmv@yandex-team.com> Co-authored-by: hcpp <hcpp@ydb.tech>
Diffstat (limited to 'contrib/tools/python3/Objects/memoryobject.c')
-rw-r--r--contrib/tools/python3/Objects/memoryobject.c3409
1 files changed, 3409 insertions, 0 deletions
diff --git a/contrib/tools/python3/Objects/memoryobject.c b/contrib/tools/python3/Objects/memoryobject.c
new file mode 100644
index 0000000000..b0168044d9
--- /dev/null
+++ b/contrib/tools/python3/Objects/memoryobject.c
@@ -0,0 +1,3409 @@
+/*
+ * Memoryview object implementation
+ * --------------------------------
+ *
+ * This implementation is a complete rewrite contributed by Stefan Krah in
+ * Python 3.3. Substantial credit goes to Antoine Pitrou (who had already
+ * fortified and rewritten the previous implementation) and Nick Coghlan
+ * (who came up with the idea of the ManagedBuffer) for analyzing the complex
+ * ownership rules.
+ *
+ */
+
+#include "Python.h"
+#include "pycore_abstract.h" // _PyIndex_Check()
+#include "pycore_object.h" // _PyObject_GC_UNTRACK()
+#include "pycore_strhex.h" // _Py_strhex_with_sep()
+#include <stddef.h> // offsetof()
+
+/*[clinic input]
+class memoryview "PyMemoryViewObject *" "&PyMemoryView_Type"
+[clinic start generated code]*/
+/*[clinic end generated code: output=da39a3ee5e6b4b0d input=e2e49d2192835219]*/
+
+#include "clinic/memoryobject.c.h"
+
+/****************************************************************************/
+/* ManagedBuffer Object */
+/****************************************************************************/
+
+/*
+ ManagedBuffer Object:
+ ---------------------
+
+ The purpose of this object is to facilitate the handling of chained
+ memoryviews that have the same underlying exporting object. PEP-3118
+ allows the underlying object to change while a view is exported. This
+ could lead to unexpected results when constructing a new memoryview
+ from an existing memoryview.
+
+ Rather than repeatedly redirecting buffer requests to the original base
+ object, all chained memoryviews use a single buffer snapshot. This
+ snapshot is generated by the constructor _PyManagedBuffer_FromObject().
+
+ Ownership rules:
+ ----------------
+
+ The master buffer inside a managed buffer is filled in by the original
+ base object. shape, strides, suboffsets and format are read-only for
+ all consumers.
+
+ A memoryview's buffer is a private copy of the exporter's buffer. shape,
+ strides and suboffsets belong to the memoryview and are thus writable.
+
+ If a memoryview itself exports several buffers via memory_getbuf(), all
+ buffer copies share shape, strides and suboffsets. In this case, the
+ arrays are NOT writable.
+
+ Reference count assumptions:
+ ----------------------------
+
+ The 'obj' member of a Py_buffer must either be NULL or refer to the
+ exporting base object. In the Python codebase, all getbufferprocs
+ return a new reference to view.obj (example: bytes_buffer_getbuffer()).
+
+ PyBuffer_Release() decrements view.obj (if non-NULL), so the
+ releasebufferprocs must NOT decrement view.obj.
+*/
+
+
+static inline _PyManagedBufferObject *
+mbuf_alloc(void)
+{
+ _PyManagedBufferObject *mbuf;
+
+ mbuf = (_PyManagedBufferObject *)
+ PyObject_GC_New(_PyManagedBufferObject, &_PyManagedBuffer_Type);
+ if (mbuf == NULL)
+ return NULL;
+ mbuf->flags = 0;
+ mbuf->exports = 0;
+ mbuf->master.obj = NULL;
+ _PyObject_GC_TRACK(mbuf);
+
+ return mbuf;
+}
+
+static PyObject *
+_PyManagedBuffer_FromObject(PyObject *base, int flags)
+{
+ _PyManagedBufferObject *mbuf;
+
+ mbuf = mbuf_alloc();
+ if (mbuf == NULL)
+ return NULL;
+
+ if (PyObject_GetBuffer(base, &mbuf->master, flags) < 0) {
+ mbuf->master.obj = NULL;
+ Py_DECREF(mbuf);
+ return NULL;
+ }
+
+ return (PyObject *)mbuf;
+}
+
+static void
+mbuf_release(_PyManagedBufferObject *self)
+{
+ if (self->flags&_Py_MANAGED_BUFFER_RELEASED)
+ return;
+
+ /* NOTE: at this point self->exports can still be > 0 if this function
+ is called from mbuf_clear() to break up a reference cycle. */
+ self->flags |= _Py_MANAGED_BUFFER_RELEASED;
+
+ /* PyBuffer_Release() decrements master->obj and sets it to NULL. */
+ _PyObject_GC_UNTRACK(self);
+ PyBuffer_Release(&self->master);
+}
+
+static void
+mbuf_dealloc(_PyManagedBufferObject *self)
+{
+ assert(self->exports == 0);
+ mbuf_release(self);
+ if (self->flags&_Py_MANAGED_BUFFER_FREE_FORMAT)
+ PyMem_Free(self->master.format);
+ PyObject_GC_Del(self);
+}
+
+static int
+mbuf_traverse(_PyManagedBufferObject *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->master.obj);
+ return 0;
+}
+
+static int
+mbuf_clear(_PyManagedBufferObject *self)
+{
+ assert(self->exports >= 0);
+ mbuf_release(self);
+ return 0;
+}
+
+PyTypeObject _PyManagedBuffer_Type = {
+ PyVarObject_HEAD_INIT(&PyType_Type, 0)
+ "managedbuffer",
+ sizeof(_PyManagedBufferObject),
+ 0,
+ (destructor)mbuf_dealloc, /* tp_dealloc */
+ 0, /* tp_vectorcall_offset */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_as_async */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)mbuf_traverse, /* tp_traverse */
+ (inquiry)mbuf_clear /* tp_clear */
+};
+
+
+/****************************************************************************/
+/* MemoryView Object */
+/****************************************************************************/
+
+/* In the process of breaking reference cycles mbuf_release() can be
+ called before memory_release(). */
+#define BASE_INACCESSIBLE(mv) \
+ (((PyMemoryViewObject *)mv)->flags&_Py_MEMORYVIEW_RELEASED || \
+ ((PyMemoryViewObject *)mv)->mbuf->flags&_Py_MANAGED_BUFFER_RELEASED)
+
+#define CHECK_RELEASED(mv) \
+ if (BASE_INACCESSIBLE(mv)) { \
+ PyErr_SetString(PyExc_ValueError, \
+ "operation forbidden on released memoryview object"); \
+ return NULL; \
+ }
+
+#define CHECK_RELEASED_INT(mv) \
+ if (BASE_INACCESSIBLE(mv)) { \
+ PyErr_SetString(PyExc_ValueError, \
+ "operation forbidden on released memoryview object"); \
+ return -1; \
+ }
+
+#define CHECK_RESTRICTED(mv) \
+ if (((PyMemoryViewObject *)(mv))->flags & _Py_MEMORYVIEW_RESTRICTED) { \
+ PyErr_SetString(PyExc_ValueError, \
+ "cannot create new view on restricted memoryview"); \
+ return NULL; \
+ }
+
+#define CHECK_RESTRICTED_INT(mv) \
+ if (((PyMemoryViewObject *)(mv))->flags & _Py_MEMORYVIEW_RESTRICTED) { \
+ PyErr_SetString(PyExc_ValueError, \
+ "cannot create new view on restricted memoryview"); \
+ return -1; \
+ }
+
+/* See gh-92888. These macros signal that we need to check the memoryview
+ again due to possible read after frees. */
+#define CHECK_RELEASED_AGAIN(mv) CHECK_RELEASED(mv)
+#define CHECK_RELEASED_INT_AGAIN(mv) CHECK_RELEASED_INT(mv)
+
+#define CHECK_LIST_OR_TUPLE(v) \
+ if (!PyList_Check(v) && !PyTuple_Check(v)) { \
+ PyErr_SetString(PyExc_TypeError, \
+ #v " must be a list or a tuple"); \
+ return NULL; \
+ }
+
+#define VIEW_ADDR(mv) (&((PyMemoryViewObject *)mv)->view)
+
+/* Check for the presence of suboffsets in the first dimension. */
+#define HAVE_PTR(suboffsets, dim) (suboffsets && suboffsets[dim] >= 0)
+/* Adjust ptr if suboffsets are present. */
+#define ADJUST_PTR(ptr, suboffsets, dim) \
+ (HAVE_PTR(suboffsets, dim) ? *((char**)ptr) + suboffsets[dim] : ptr)
+
+/* Memoryview buffer properties */
+#define MV_C_CONTIGUOUS(flags) (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C))
+#define MV_F_CONTIGUOUS(flags) \
+ (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_FORTRAN))
+#define MV_ANY_CONTIGUOUS(flags) \
+ (flags&(_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN))
+
+/* Fast contiguity test. Caller must ensure suboffsets==NULL and ndim==1. */
+#define MV_CONTIGUOUS_NDIM1(view) \
+ ((view)->shape[0] == 1 || (view)->strides[0] == (view)->itemsize)
+
+/* getbuffer() requests */
+#define REQ_INDIRECT(flags) ((flags&PyBUF_INDIRECT) == PyBUF_INDIRECT)
+#define REQ_C_CONTIGUOUS(flags) ((flags&PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS)
+#define REQ_F_CONTIGUOUS(flags) ((flags&PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)
+#define REQ_ANY_CONTIGUOUS(flags) ((flags&PyBUF_ANY_CONTIGUOUS) == PyBUF_ANY_CONTIGUOUS)
+#define REQ_STRIDES(flags) ((flags&PyBUF_STRIDES) == PyBUF_STRIDES)
+#define REQ_SHAPE(flags) ((flags&PyBUF_ND) == PyBUF_ND)
+#define REQ_WRITABLE(flags) (flags&PyBUF_WRITABLE)
+#define REQ_FORMAT(flags) (flags&PyBUF_FORMAT)
+
+
+/**************************************************************************/
+/* Copy memoryview buffers */
+/**************************************************************************/
+
+/* The functions in this section take a source and a destination buffer
+ with the same logical structure: format, itemsize, ndim and shape
+ are identical, with ndim > 0.
+
+ NOTE: All buffers are assumed to have PyBUF_FULL information, which
+ is the case for memoryviews! */
+
+
+/* Assumptions: ndim >= 1. The macro tests for a corner case that should
+ perhaps be explicitly forbidden in the PEP. */
+#define HAVE_SUBOFFSETS_IN_LAST_DIM(view) \
+ (view->suboffsets && view->suboffsets[dest->ndim-1] >= 0)
+
+static inline int
+last_dim_is_contiguous(const Py_buffer *dest, const Py_buffer *src)
+{
+ assert(dest->ndim > 0 && src->ndim > 0);
+ return (!HAVE_SUBOFFSETS_IN_LAST_DIM(dest) &&
+ !HAVE_SUBOFFSETS_IN_LAST_DIM(src) &&
+ dest->strides[dest->ndim-1] == dest->itemsize &&
+ src->strides[src->ndim-1] == src->itemsize);
+}
+
+/* This is not a general function for determining format equivalence.
+ It is used in copy_single() and copy_buffer() to weed out non-matching
+ formats. Skipping the '@' character is specifically used in slice
+ assignments, where the lvalue is already known to have a single character
+ format. This is a performance hack that could be rewritten (if properly
+ benchmarked). */
+static inline int
+equiv_format(const Py_buffer *dest, const Py_buffer *src)
+{
+ const char *dfmt, *sfmt;
+
+ assert(dest->format && src->format);
+ dfmt = dest->format[0] == '@' ? dest->format+1 : dest->format;
+ sfmt = src->format[0] == '@' ? src->format+1 : src->format;
+
+ if (strcmp(dfmt, sfmt) != 0 ||
+ dest->itemsize != src->itemsize) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Two shapes are equivalent if they are either equal or identical up
+ to a zero element at the same position. For example, in NumPy arrays
+ the shapes [1, 0, 5] and [1, 0, 7] are equivalent. */
+static inline int
+equiv_shape(const Py_buffer *dest, const Py_buffer *src)
+{
+ int i;
+
+ if (dest->ndim != src->ndim)
+ return 0;
+
+ for (i = 0; i < dest->ndim; i++) {
+ if (dest->shape[i] != src->shape[i])
+ return 0;
+ if (dest->shape[i] == 0)
+ break;
+ }
+
+ return 1;
+}
+
+/* Check that the logical structure of the destination and source buffers
+ is identical. */
+static int
+equiv_structure(const Py_buffer *dest, const Py_buffer *src)
+{
+ if (!equiv_format(dest, src) ||
+ !equiv_shape(dest, src)) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview assignment: lvalue and rvalue have different "
+ "structures");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Base case for recursive multi-dimensional copying. Contiguous arrays are
+ copied with very little overhead. Assumptions: ndim == 1, mem == NULL or
+ sizeof(mem) == shape[0] * itemsize. */
+static void
+copy_base(const Py_ssize_t *shape, Py_ssize_t itemsize,
+ char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
+ char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
+ char *mem)
+{
+ if (mem == NULL) { /* contiguous */
+ Py_ssize_t size = shape[0] * itemsize;
+ if (dptr + size < sptr || sptr + size < dptr)
+ memcpy(dptr, sptr, size); /* no overlapping */
+ else
+ memmove(dptr, sptr, size);
+ }
+ else {
+ char *p;
+ Py_ssize_t i;
+ for (i=0, p=mem; i < shape[0]; p+=itemsize, sptr+=sstrides[0], i++) {
+ char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
+ memcpy(p, xsptr, itemsize);
+ }
+ for (i=0, p=mem; i < shape[0]; p+=itemsize, dptr+=dstrides[0], i++) {
+ char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
+ memcpy(xdptr, p, itemsize);
+ }
+ }
+
+}
+
+/* Recursively copy a source buffer to a destination buffer. The two buffers
+ have the same ndim, shape and itemsize. */
+static void
+copy_rec(const Py_ssize_t *shape, Py_ssize_t ndim, Py_ssize_t itemsize,
+ char *dptr, const Py_ssize_t *dstrides, const Py_ssize_t *dsuboffsets,
+ char *sptr, const Py_ssize_t *sstrides, const Py_ssize_t *ssuboffsets,
+ char *mem)
+{
+ Py_ssize_t i;
+
+ assert(ndim >= 1);
+
+ if (ndim == 1) {
+ copy_base(shape, itemsize,
+ dptr, dstrides, dsuboffsets,
+ sptr, sstrides, ssuboffsets,
+ mem);
+ return;
+ }
+
+ for (i = 0; i < shape[0]; dptr+=dstrides[0], sptr+=sstrides[0], i++) {
+ char *xdptr = ADJUST_PTR(dptr, dsuboffsets, 0);
+ char *xsptr = ADJUST_PTR(sptr, ssuboffsets, 0);
+
+ copy_rec(shape+1, ndim-1, itemsize,
+ xdptr, dstrides+1, dsuboffsets ? dsuboffsets+1 : NULL,
+ xsptr, sstrides+1, ssuboffsets ? ssuboffsets+1 : NULL,
+ mem);
+ }
+}
+
+/* Faster copying of one-dimensional arrays. */
+static int
+copy_single(PyMemoryViewObject *self, const Py_buffer *dest, const Py_buffer *src)
+{
+ CHECK_RELEASED_INT_AGAIN(self);
+ char *mem = NULL;
+
+ assert(dest->ndim == 1);
+
+ if (!equiv_structure(dest, src))
+ return -1;
+
+ if (!last_dim_is_contiguous(dest, src)) {
+ mem = PyMem_Malloc(dest->shape[0] * dest->itemsize);
+ if (mem == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ }
+
+ copy_base(dest->shape, dest->itemsize,
+ dest->buf, dest->strides, dest->suboffsets,
+ src->buf, src->strides, src->suboffsets,
+ mem);
+
+ if (mem)
+ PyMem_Free(mem);
+
+ return 0;
+}
+
+/* Recursively copy src to dest. Both buffers must have the same basic
+ structure. Copying is atomic, the function never fails with a partial
+ copy. */
+static int
+copy_buffer(const Py_buffer *dest, const Py_buffer *src)
+{
+ char *mem = NULL;
+
+ assert(dest->ndim > 0);
+
+ if (!equiv_structure(dest, src))
+ return -1;
+
+ if (!last_dim_is_contiguous(dest, src)) {
+ mem = PyMem_Malloc(dest->shape[dest->ndim-1] * dest->itemsize);
+ if (mem == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ }
+
+ copy_rec(dest->shape, dest->ndim, dest->itemsize,
+ dest->buf, dest->strides, dest->suboffsets,
+ src->buf, src->strides, src->suboffsets,
+ mem);
+
+ if (mem)
+ PyMem_Free(mem);
+
+ return 0;
+}
+
+/* Initialize strides for a C-contiguous array. */
+static inline void
+init_strides_from_shape(Py_buffer *view)
+{
+ Py_ssize_t i;
+
+ assert(view->ndim > 0);
+
+ view->strides[view->ndim-1] = view->itemsize;
+ for (i = view->ndim-2; i >= 0; i--)
+ view->strides[i] = view->strides[i+1] * view->shape[i+1];
+}
+
+/* Initialize strides for a Fortran-contiguous array. */
+static inline void
+init_fortran_strides_from_shape(Py_buffer *view)
+{
+ Py_ssize_t i;
+
+ assert(view->ndim > 0);
+
+ view->strides[0] = view->itemsize;
+ for (i = 1; i < view->ndim; i++)
+ view->strides[i] = view->strides[i-1] * view->shape[i-1];
+}
+
+/* Copy src to a contiguous representation. order is one of 'C', 'F' (Fortran)
+ or 'A' (Any). Assumptions: src has PyBUF_FULL information, src->ndim >= 1,
+ len(mem) == src->len. */
+static int
+buffer_to_contiguous(char *mem, const Py_buffer *src, char order)
+{
+ Py_buffer dest;
+ Py_ssize_t *strides;
+ int ret;
+
+ assert(src->ndim >= 1);
+ assert(src->shape != NULL);
+ assert(src->strides != NULL);
+
+ strides = PyMem_Malloc(src->ndim * (sizeof *src->strides));
+ if (strides == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ /* initialize dest */
+ dest = *src;
+ dest.buf = mem;
+ /* shape is constant and shared: the logical representation of the
+ array is unaltered. */
+
+ /* The physical representation determined by strides (and possibly
+ suboffsets) may change. */
+ dest.strides = strides;
+ if (order == 'C' || order == 'A') {
+ init_strides_from_shape(&dest);
+ }
+ else {
+ init_fortran_strides_from_shape(&dest);
+ }
+
+ dest.suboffsets = NULL;
+
+ ret = copy_buffer(&dest, src);
+
+ PyMem_Free(strides);
+ return ret;
+}
+
+
+/****************************************************************************/
+/* Constructors */
+/****************************************************************************/
+
+/* Initialize values that are shared with the managed buffer. */
+static inline void
+init_shared_values(Py_buffer *dest, const Py_buffer *src)
+{
+ dest->obj = src->obj;
+ dest->buf = src->buf;
+ dest->len = src->len;
+ dest->itemsize = src->itemsize;
+ dest->readonly = src->readonly;
+ dest->format = src->format ? src->format : "B";
+ dest->internal = src->internal;
+}
+
+/* Copy shape and strides. Reconstruct missing values. */
+static void
+init_shape_strides(Py_buffer *dest, const Py_buffer *src)
+{
+ Py_ssize_t i;
+
+ if (src->ndim == 0) {
+ dest->shape = NULL;
+ dest->strides = NULL;
+ return;
+ }
+ if (src->ndim == 1) {
+ dest->shape[0] = src->shape ? src->shape[0] : src->len / src->itemsize;
+ dest->strides[0] = src->strides ? src->strides[0] : src->itemsize;
+ return;
+ }
+
+ for (i = 0; i < src->ndim; i++)
+ dest->shape[i] = src->shape[i];
+ if (src->strides) {
+ for (i = 0; i < src->ndim; i++)
+ dest->strides[i] = src->strides[i];
+ }
+ else {
+ init_strides_from_shape(dest);
+ }
+}
+
+static inline void
+init_suboffsets(Py_buffer *dest, const Py_buffer *src)
+{
+ Py_ssize_t i;
+
+ if (src->suboffsets == NULL) {
+ dest->suboffsets = NULL;
+ return;
+ }
+ for (i = 0; i < src->ndim; i++)
+ dest->suboffsets[i] = src->suboffsets[i];
+}
+
+/* len = product(shape) * itemsize */
+static inline void
+init_len(Py_buffer *view)
+{
+ Py_ssize_t i, len;
+
+ len = 1;
+ for (i = 0; i < view->ndim; i++)
+ len *= view->shape[i];
+ len *= view->itemsize;
+
+ view->len = len;
+}
+
+/* Initialize memoryview buffer properties. */
+static void
+init_flags(PyMemoryViewObject *mv)
+{
+ const Py_buffer *view = &mv->view;
+ int flags = 0;
+
+ switch (view->ndim) {
+ case 0:
+ flags |= (_Py_MEMORYVIEW_SCALAR|_Py_MEMORYVIEW_C|
+ _Py_MEMORYVIEW_FORTRAN);
+ break;
+ case 1:
+ if (MV_CONTIGUOUS_NDIM1(view))
+ flags |= (_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
+ break;
+ default:
+ if (PyBuffer_IsContiguous(view, 'C'))
+ flags |= _Py_MEMORYVIEW_C;
+ if (PyBuffer_IsContiguous(view, 'F'))
+ flags |= _Py_MEMORYVIEW_FORTRAN;
+ break;
+ }
+
+ if (view->suboffsets) {
+ flags |= _Py_MEMORYVIEW_PIL;
+ flags &= ~(_Py_MEMORYVIEW_C|_Py_MEMORYVIEW_FORTRAN);
+ }
+
+ mv->flags = flags;
+}
+
+/* Allocate a new memoryview and perform basic initialization. New memoryviews
+ are exclusively created through the mbuf_add functions. */
+static inline PyMemoryViewObject *
+memory_alloc(int ndim)
+{
+ PyMemoryViewObject *mv;
+
+ mv = (PyMemoryViewObject *)
+ PyObject_GC_NewVar(PyMemoryViewObject, &PyMemoryView_Type, 3*ndim);
+ if (mv == NULL)
+ return NULL;
+
+ mv->mbuf = NULL;
+ mv->hash = -1;
+ mv->flags = 0;
+ mv->exports = 0;
+ mv->view.ndim = ndim;
+ mv->view.shape = mv->ob_array;
+ mv->view.strides = mv->ob_array + ndim;
+ mv->view.suboffsets = mv->ob_array + 2 * ndim;
+ mv->weakreflist = NULL;
+
+ _PyObject_GC_TRACK(mv);
+ return mv;
+}
+
+/*
+ Return a new memoryview that is registered with mbuf. If src is NULL,
+ use mbuf->master as the underlying buffer. Otherwise, use src.
+
+ The new memoryview has full buffer information: shape and strides
+ are always present, suboffsets as needed. Arrays are copied to
+ the memoryview's ob_array field.
+ */
+static PyObject *
+mbuf_add_view(_PyManagedBufferObject *mbuf, const Py_buffer *src)
+{
+ PyMemoryViewObject *mv;
+ Py_buffer *dest;
+
+ if (src == NULL)
+ src = &mbuf->master;
+
+ if (src->ndim > PyBUF_MAX_NDIM) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: number of dimensions must not exceed "
+ Py_STRINGIFY(PyBUF_MAX_NDIM));
+ return NULL;
+ }
+
+ mv = memory_alloc(src->ndim);
+ if (mv == NULL)
+ return NULL;
+
+ dest = &mv->view;
+ init_shared_values(dest, src);
+ init_shape_strides(dest, src);
+ init_suboffsets(dest, src);
+ init_flags(mv);
+
+ mv->mbuf = (_PyManagedBufferObject*)Py_NewRef(mbuf);
+ mbuf->exports++;
+
+ return (PyObject *)mv;
+}
+
+/* Register an incomplete view: shape, strides, suboffsets and flags still
+ need to be initialized. Use 'ndim' instead of src->ndim to determine the
+ size of the memoryview's ob_array.
+
+ Assumption: ndim <= PyBUF_MAX_NDIM. */
+static PyObject *
+mbuf_add_incomplete_view(_PyManagedBufferObject *mbuf, const Py_buffer *src,
+ int ndim)
+{
+ PyMemoryViewObject *mv;
+ Py_buffer *dest;
+
+ if (src == NULL)
+ src = &mbuf->master;
+
+ assert(ndim <= PyBUF_MAX_NDIM);
+
+ mv = memory_alloc(ndim);
+ if (mv == NULL)
+ return NULL;
+
+ dest = &mv->view;
+ init_shared_values(dest, src);
+
+ mv->mbuf = (_PyManagedBufferObject*)Py_NewRef(mbuf);
+ mbuf->exports++;
+
+ return (PyObject *)mv;
+}
+
+/* Expose a raw memory area as a view of contiguous bytes. flags can be
+ PyBUF_READ or PyBUF_WRITE. view->format is set to "B" (unsigned bytes).
+ The memoryview has complete buffer information. */
+PyObject *
+PyMemoryView_FromMemory(char *mem, Py_ssize_t size, int flags)
+{
+ _PyManagedBufferObject *mbuf;
+ PyObject *mv;
+ int readonly;
+
+ assert(mem != NULL);
+ assert(flags == PyBUF_READ || flags == PyBUF_WRITE);
+
+ mbuf = mbuf_alloc();
+ if (mbuf == NULL)
+ return NULL;
+
+ readonly = (flags == PyBUF_WRITE) ? 0 : 1;
+ (void)PyBuffer_FillInfo(&mbuf->master, NULL, mem, size, readonly,
+ PyBUF_FULL_RO);
+
+ mv = mbuf_add_view(mbuf, NULL);
+ Py_DECREF(mbuf);
+
+ return mv;
+}
+
+/* Create a memoryview from a given Py_buffer. For simple byte views,
+ PyMemoryView_FromMemory() should be used instead.
+ This function is the only entry point that can create a master buffer
+ without full information. Because of this fact init_shape_strides()
+ must be able to reconstruct missing values. */
+PyObject *
+PyMemoryView_FromBuffer(const Py_buffer *info)
+{
+ _PyManagedBufferObject *mbuf;
+ PyObject *mv;
+
+ if (info->buf == NULL) {
+ PyErr_SetString(PyExc_ValueError,
+ "PyMemoryView_FromBuffer(): info->buf must not be NULL");
+ return NULL;
+ }
+
+ mbuf = mbuf_alloc();
+ if (mbuf == NULL)
+ return NULL;
+
+ /* info->obj is either NULL or a borrowed reference. This reference
+ should not be decremented in PyBuffer_Release(). */
+ mbuf->master = *info;
+ mbuf->master.obj = NULL;
+
+ mv = mbuf_add_view(mbuf, NULL);
+ Py_DECREF(mbuf);
+
+ return mv;
+}
+
+/* Create a memoryview from an object that implements the buffer protocol,
+ using the given flags.
+ If the object is a memoryview, the new memoryview must be registered
+ with the same managed buffer. Otherwise, a new managed buffer is created. */
+static PyObject *
+PyMemoryView_FromObjectAndFlags(PyObject *v, int flags)
+{
+ _PyManagedBufferObject *mbuf;
+
+ if (PyMemoryView_Check(v)) {
+ PyMemoryViewObject *mv = (PyMemoryViewObject *)v;
+ CHECK_RELEASED(mv);
+ CHECK_RESTRICTED(mv);
+ return mbuf_add_view(mv->mbuf, &mv->view);
+ }
+ else if (PyObject_CheckBuffer(v)) {
+ PyObject *ret;
+ mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(v, flags);
+ if (mbuf == NULL)
+ return NULL;
+ ret = mbuf_add_view(mbuf, NULL);
+ Py_DECREF(mbuf);
+ return ret;
+ }
+
+ PyErr_Format(PyExc_TypeError,
+ "memoryview: a bytes-like object is required, not '%.200s'",
+ Py_TYPE(v)->tp_name);
+ return NULL;
+}
+
+/* Create a memoryview from an object that implements the buffer protocol,
+ using the given flags.
+ If the object is a memoryview, the new memoryview must be registered
+ with the same managed buffer. Otherwise, a new managed buffer is created. */
+PyObject *
+_PyMemoryView_FromBufferProc(PyObject *v, int flags, getbufferproc bufferproc)
+{
+ _PyManagedBufferObject *mbuf = mbuf_alloc();
+ if (mbuf == NULL)
+ return NULL;
+
+ int res = bufferproc(v, &mbuf->master, flags);
+ if (res < 0) {
+ mbuf->master.obj = NULL;
+ Py_DECREF(mbuf);
+ return NULL;
+ }
+
+ PyObject *ret = mbuf_add_view(mbuf, NULL);
+ Py_DECREF(mbuf);
+ return ret;
+}
+
+/* Create a memoryview from an object that implements the buffer protocol.
+ If the object is a memoryview, the new memoryview must be registered
+ with the same managed buffer. Otherwise, a new managed buffer is created. */
+PyObject *
+PyMemoryView_FromObject(PyObject *v)
+{
+ return PyMemoryView_FromObjectAndFlags(v, PyBUF_FULL_RO);
+}
+
+/* Copy the format string from a base object that might vanish. */
+static int
+mbuf_copy_format(_PyManagedBufferObject *mbuf, const char *fmt)
+{
+ if (fmt != NULL) {
+ char *cp = PyMem_Malloc(strlen(fmt)+1);
+ if (cp == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ mbuf->master.format = strcpy(cp, fmt);
+ mbuf->flags |= _Py_MANAGED_BUFFER_FREE_FORMAT;
+ }
+
+ return 0;
+}
+
+/*
+ Return a memoryview that is based on a contiguous copy of src.
+ Assumptions: src has PyBUF_FULL_RO information, src->ndim > 0.
+
+ Ownership rules:
+ 1) As usual, the returned memoryview has a private copy
+ of src->shape, src->strides and src->suboffsets.
+ 2) src->format is copied to the master buffer and released
+ in mbuf_dealloc(). The releasebufferproc of the bytes
+ object is NULL, so it does not matter that mbuf_release()
+ passes the altered format pointer to PyBuffer_Release().
+*/
+static PyObject *
+memory_from_contiguous_copy(const Py_buffer *src, char order)
+{
+ _PyManagedBufferObject *mbuf;
+ PyMemoryViewObject *mv;
+ PyObject *bytes;
+ Py_buffer *dest;
+ int i;
+
+ assert(src->ndim > 0);
+ assert(src->shape != NULL);
+
+ bytes = PyBytes_FromStringAndSize(NULL, src->len);
+ if (bytes == NULL)
+ return NULL;
+
+ mbuf = (_PyManagedBufferObject *)_PyManagedBuffer_FromObject(bytes, PyBUF_FULL_RO);
+ Py_DECREF(bytes);
+ if (mbuf == NULL)
+ return NULL;
+
+ if (mbuf_copy_format(mbuf, src->format) < 0) {
+ Py_DECREF(mbuf);
+ return NULL;
+ }
+
+ mv = (PyMemoryViewObject *)mbuf_add_incomplete_view(mbuf, NULL, src->ndim);
+ Py_DECREF(mbuf);
+ if (mv == NULL)
+ return NULL;
+
+ dest = &mv->view;
+
+ /* shared values are initialized correctly except for itemsize */
+ dest->itemsize = src->itemsize;
+
+ /* shape and strides */
+ for (i = 0; i < src->ndim; i++) {
+ dest->shape[i] = src->shape[i];
+ }
+ if (order == 'C' || order == 'A') {
+ init_strides_from_shape(dest);
+ }
+ else {
+ init_fortran_strides_from_shape(dest);
+ }
+ /* suboffsets */
+ dest->suboffsets = NULL;
+
+ /* flags */
+ init_flags(mv);
+
+ if (copy_buffer(dest, src) < 0) {
+ Py_DECREF(mv);
+ return NULL;
+ }
+
+ return (PyObject *)mv;
+}
+
+/*
+ Return a new memoryview object based on a contiguous exporter with
+ buffertype={PyBUF_READ, PyBUF_WRITE} and order={'C', 'F'ortran, or 'A'ny}.
+ The logical structure of the input and output buffers is the same
+ (i.e. tolist(input) == tolist(output)), but the physical layout in
+ memory can be explicitly chosen.
+
+ As usual, if buffertype=PyBUF_WRITE, the exporter's buffer must be writable,
+ otherwise it may be writable or read-only.
+
+ If the exporter is already contiguous with the desired target order,
+ the memoryview will be directly based on the exporter.
+
+ Otherwise, if the buffertype is PyBUF_READ, the memoryview will be
+ based on a new bytes object. If order={'C', 'A'ny}, use 'C' order,
+ 'F'ortran order otherwise.
+*/
+PyObject *
+PyMemoryView_GetContiguous(PyObject *obj, int buffertype, char order)
+{
+ PyMemoryViewObject *mv;
+ PyObject *ret;
+ Py_buffer *view;
+
+ assert(buffertype == PyBUF_READ || buffertype == PyBUF_WRITE);
+ assert(order == 'C' || order == 'F' || order == 'A');
+
+ mv = (PyMemoryViewObject *)PyMemoryView_FromObject(obj);
+ if (mv == NULL)
+ return NULL;
+
+ view = &mv->view;
+ if (buffertype == PyBUF_WRITE && view->readonly) {
+ PyErr_SetString(PyExc_BufferError,
+ "underlying buffer is not writable");
+ Py_DECREF(mv);
+ return NULL;
+ }
+
+ if (PyBuffer_IsContiguous(view, order))
+ return (PyObject *)mv;
+
+ if (buffertype == PyBUF_WRITE) {
+ PyErr_SetString(PyExc_BufferError,
+ "writable contiguous buffer requested "
+ "for a non-contiguous object.");
+ Py_DECREF(mv);
+ return NULL;
+ }
+
+ ret = memory_from_contiguous_copy(view, order);
+ Py_DECREF(mv);
+ return ret;
+}
+
+
+/*[clinic input]
+@classmethod
+memoryview.__new__
+
+ object: object
+
+Create a new memoryview object which references the given object.
+[clinic start generated code]*/
+
+static PyObject *
+memoryview_impl(PyTypeObject *type, PyObject *object)
+/*[clinic end generated code: output=7de78e184ed66db8 input=f04429eb0bdf8c6e]*/
+{
+ return PyMemoryView_FromObject(object);
+}
+
+
+/*[clinic input]
+@classmethod
+memoryview._from_flags
+
+ object: object
+ flags: int
+
+Create a new memoryview object which references the given object.
+[clinic start generated code]*/
+
+static PyObject *
+memoryview__from_flags_impl(PyTypeObject *type, PyObject *object, int flags)
+/*[clinic end generated code: output=bf71f9906c266ee2 input=f5f82fd0e744356b]*/
+{
+ return PyMemoryView_FromObjectAndFlags(object, flags);
+}
+
+
+/****************************************************************************/
+/* Previously in abstract.c */
+/****************************************************************************/
+
+typedef struct {
+ Py_buffer view;
+ Py_ssize_t array[1];
+} Py_buffer_full;
+
+int
+PyBuffer_ToContiguous(void *buf, const Py_buffer *src, Py_ssize_t len, char order)
+{
+ Py_buffer_full *fb = NULL;
+ int ret;
+
+ assert(order == 'C' || order == 'F' || order == 'A');
+
+ if (len != src->len) {
+ PyErr_SetString(PyExc_ValueError,
+ "PyBuffer_ToContiguous: len != view->len");
+ return -1;
+ }
+
+ if (PyBuffer_IsContiguous(src, order)) {
+ memcpy((char *)buf, src->buf, len);
+ return 0;
+ }
+
+ /* buffer_to_contiguous() assumes PyBUF_FULL */
+ fb = PyMem_Malloc(sizeof *fb + 3 * src->ndim * (sizeof *fb->array));
+ if (fb == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ fb->view.ndim = src->ndim;
+ fb->view.shape = fb->array;
+ fb->view.strides = fb->array + src->ndim;
+ fb->view.suboffsets = fb->array + 2 * src->ndim;
+
+ init_shared_values(&fb->view, src);
+ init_shape_strides(&fb->view, src);
+ init_suboffsets(&fb->view, src);
+
+ src = &fb->view;
+
+ ret = buffer_to_contiguous(buf, src, order);
+ PyMem_Free(fb);
+ return ret;
+}
+
+
+/****************************************************************************/
+/* Release/GC management */
+/****************************************************************************/
+
+/* Inform the managed buffer that this particular memoryview will not access
+ the underlying buffer again. If no other memoryviews are registered with
+ the managed buffer, the underlying buffer is released instantly and
+ marked as inaccessible for both the memoryview and the managed buffer.
+
+ This function fails if the memoryview itself has exported buffers. */
+static int
+_memory_release(PyMemoryViewObject *self)
+{
+ if (self->flags & _Py_MEMORYVIEW_RELEASED)
+ return 0;
+
+ if (self->exports == 0) {
+ self->flags |= _Py_MEMORYVIEW_RELEASED;
+ assert(self->mbuf->exports > 0);
+ if (--self->mbuf->exports == 0)
+ mbuf_release(self->mbuf);
+ return 0;
+ }
+ if (self->exports > 0) {
+ PyErr_Format(PyExc_BufferError,
+ "memoryview has %zd exported buffer%s", self->exports,
+ self->exports==1 ? "" : "s");
+ return -1;
+ }
+
+ PyErr_SetString(PyExc_SystemError,
+ "_memory_release(): negative export count");
+ return -1;
+}
+
+/*[clinic input]
+memoryview.release
+
+Release the underlying buffer exposed by the memoryview object.
+[clinic start generated code]*/
+
+static PyObject *
+memoryview_release_impl(PyMemoryViewObject *self)
+/*[clinic end generated code: output=d0b7e3ba95b7fcb9 input=bc71d1d51f4a52f0]*/
+{
+ if (_memory_release(self) < 0)
+ return NULL;
+ Py_RETURN_NONE;
+}
+
+static void
+memory_dealloc(PyMemoryViewObject *self)
+{
+ assert(self->exports == 0);
+ _PyObject_GC_UNTRACK(self);
+ (void)_memory_release(self);
+ Py_CLEAR(self->mbuf);
+ if (self->weakreflist != NULL)
+ PyObject_ClearWeakRefs((PyObject *) self);
+ PyObject_GC_Del(self);
+}
+
+static int
+memory_traverse(PyMemoryViewObject *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->mbuf);
+ return 0;
+}
+
+static int
+memory_clear(PyMemoryViewObject *self)
+{
+ (void)_memory_release(self);
+ Py_CLEAR(self->mbuf);
+ return 0;
+}
+
+static PyObject *
+memory_enter(PyObject *self, PyObject *args)
+{
+ CHECK_RELEASED(self);
+ return Py_NewRef(self);
+}
+
+static PyObject *
+memory_exit(PyObject *self, PyObject *args)
+{
+ return memoryview_release_impl((PyMemoryViewObject *)self);
+}
+
+
+/****************************************************************************/
+/* Casting format and shape */
+/****************************************************************************/
+
+#define IS_BYTE_FORMAT(f) (f == 'b' || f == 'B' || f == 'c')
+
+static inline Py_ssize_t
+get_native_fmtchar(char *result, const char *fmt)
+{
+ Py_ssize_t size = -1;
+
+ if (fmt[0] == '@') fmt++;
+
+ switch (fmt[0]) {
+ case 'c': case 'b': case 'B': size = sizeof(char); break;
+ case 'h': case 'H': size = sizeof(short); break;
+ case 'i': case 'I': size = sizeof(int); break;
+ case 'l': case 'L': size = sizeof(long); break;
+ case 'q': case 'Q': size = sizeof(long long); break;
+ case 'n': case 'N': size = sizeof(Py_ssize_t); break;
+ case 'f': size = sizeof(float); break;
+ case 'd': size = sizeof(double); break;
+ case 'e': size = sizeof(float) / 2; break;
+ case '?': size = sizeof(_Bool); break;
+ case 'P': size = sizeof(void *); break;
+ }
+
+ if (size > 0 && fmt[1] == '\0') {
+ *result = fmt[0];
+ return size;
+ }
+
+ return -1;
+}
+
+static inline const char *
+get_native_fmtstr(const char *fmt)
+{
+ int at = 0;
+
+ if (fmt[0] == '@') {
+ at = 1;
+ fmt++;
+ }
+ if (fmt[0] == '\0' || fmt[1] != '\0') {
+ return NULL;
+ }
+
+#define RETURN(s) do { return at ? "@" s : s; } while (0)
+
+ switch (fmt[0]) {
+ case 'c': RETURN("c");
+ case 'b': RETURN("b");
+ case 'B': RETURN("B");
+ case 'h': RETURN("h");
+ case 'H': RETURN("H");
+ case 'i': RETURN("i");
+ case 'I': RETURN("I");
+ case 'l': RETURN("l");
+ case 'L': RETURN("L");
+ case 'q': RETURN("q");
+ case 'Q': RETURN("Q");
+ case 'n': RETURN("n");
+ case 'N': RETURN("N");
+ case 'f': RETURN("f");
+ case 'd': RETURN("d");
+ case 'e': RETURN("e");
+ case '?': RETURN("?");
+ case 'P': RETURN("P");
+ }
+
+ return NULL;
+}
+
+
+/* Cast a memoryview's data type to 'format'. The input array must be
+ C-contiguous. At least one of input-format, output-format must have
+ byte size. The output array is 1-D, with the same byte length as the
+ input array. Thus, view->len must be a multiple of the new itemsize. */
+static int
+cast_to_1D(PyMemoryViewObject *mv, PyObject *format)
+{
+ Py_buffer *view = &mv->view;
+ PyObject *asciifmt;
+ char srcchar, destchar;
+ Py_ssize_t itemsize;
+ int ret = -1;
+
+ assert(view->ndim >= 1);
+ assert(Py_SIZE(mv) == 3*view->ndim);
+ assert(view->shape == mv->ob_array);
+ assert(view->strides == mv->ob_array + view->ndim);
+ assert(view->suboffsets == mv->ob_array + 2*view->ndim);
+
+ asciifmt = PyUnicode_AsASCIIString(format);
+ if (asciifmt == NULL)
+ return ret;
+
+ itemsize = get_native_fmtchar(&destchar, PyBytes_AS_STRING(asciifmt));
+ if (itemsize < 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: destination format must be a native single "
+ "character format prefixed with an optional '@'");
+ goto out;
+ }
+
+ if ((get_native_fmtchar(&srcchar, view->format) < 0 ||
+ !IS_BYTE_FORMAT(srcchar)) && !IS_BYTE_FORMAT(destchar)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: cannot cast between two non-byte formats");
+ goto out;
+ }
+ if (view->len % itemsize) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: length is not a multiple of itemsize");
+ goto out;
+ }
+
+ view->format = (char *)get_native_fmtstr(PyBytes_AS_STRING(asciifmt));
+ if (view->format == NULL) {
+ /* NOT_REACHED: get_native_fmtchar() already validates the format. */
+ PyErr_SetString(PyExc_RuntimeError,
+ "memoryview: internal error");
+ goto out;
+ }
+ view->itemsize = itemsize;
+
+ view->ndim = 1;
+ view->shape[0] = view->len / view->itemsize;
+ view->strides[0] = view->itemsize;
+ view->suboffsets = NULL;
+
+ init_flags(mv);
+
+ ret = 0;
+
+out:
+ Py_DECREF(asciifmt);
+ return ret;
+}
+
+/* The memoryview must have space for 3*len(seq) elements. */
+static Py_ssize_t
+copy_shape(Py_ssize_t *shape, const PyObject *seq, Py_ssize_t ndim,
+ Py_ssize_t itemsize)
+{
+ Py_ssize_t x, i;
+ Py_ssize_t len = itemsize;
+
+ for (i = 0; i < ndim; i++) {
+ PyObject *tmp = PySequence_Fast_GET_ITEM(seq, i);
+ if (!PyLong_Check(tmp)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview.cast(): elements of shape must be integers");
+ return -1;
+ }
+ x = PyLong_AsSsize_t(tmp);
+ if (x == -1 && PyErr_Occurred()) {
+ return -1;
+ }
+ if (x <= 0) {
+ /* In general elements of shape may be 0, but not for casting. */
+ PyErr_Format(PyExc_ValueError,
+ "memoryview.cast(): elements of shape must be integers > 0");
+ return -1;
+ }
+ if (x > PY_SSIZE_T_MAX / len) {
+ PyErr_Format(PyExc_ValueError,
+ "memoryview.cast(): product(shape) > SSIZE_MAX");
+ return -1;
+ }
+ len *= x;
+ shape[i] = x;
+ }
+
+ return len;
+}
+
+/* Cast a 1-D array to a new shape. The result array will be C-contiguous.
+ If the result array does not have exactly the same byte length as the
+ input array, raise ValueError. */
+static int
+cast_to_ND(PyMemoryViewObject *mv, const PyObject *shape, int ndim)
+{
+ Py_buffer *view = &mv->view;
+ Py_ssize_t len;
+
+ assert(view->ndim == 1); /* ndim from cast_to_1D() */
+ assert(Py_SIZE(mv) == 3*(ndim==0?1:ndim)); /* ndim of result array */
+ assert(view->shape == mv->ob_array);
+ assert(view->strides == mv->ob_array + (ndim==0?1:ndim));
+ assert(view->suboffsets == NULL);
+
+ view->ndim = ndim;
+ if (view->ndim == 0) {
+ view->shape = NULL;
+ view->strides = NULL;
+ len = view->itemsize;
+ }
+ else {
+ len = copy_shape(view->shape, shape, ndim, view->itemsize);
+ if (len < 0)
+ return -1;
+ init_strides_from_shape(view);
+ }
+
+ if (view->len != len) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: product(shape) * itemsize != buffer size");
+ return -1;
+ }
+
+ init_flags(mv);
+
+ return 0;
+}
+
+static int
+zero_in_shape(PyMemoryViewObject *mv)
+{
+ Py_buffer *view = &mv->view;
+ Py_ssize_t i;
+
+ for (i = 0; i < view->ndim; i++)
+ if (view->shape[i] == 0)
+ return 1;
+
+ return 0;
+}
+
+/*
+ Cast a copy of 'self' to a different view. The input view must
+ be C-contiguous. The function always casts the input view to a
+ 1-D output according to 'format'. At least one of input-format,
+ output-format must have byte size.
+
+ If 'shape' is given, the 1-D view from the previous step will
+ be cast to a C-contiguous view with new shape and strides.
+
+ All casts must result in views that will have the exact byte
+ size of the original input. Otherwise, an error is raised.
+*/
+/*[clinic input]
+memoryview.cast
+
+ format: unicode
+ shape: object = NULL
+
+Cast a memoryview to a new format or shape.
+[clinic start generated code]*/
+
+static PyObject *
+memoryview_cast_impl(PyMemoryViewObject *self, PyObject *format,
+ PyObject *shape)
+/*[clinic end generated code: output=bae520b3a389cbab input=138936cc9041b1a3]*/
+{
+ PyMemoryViewObject *mv = NULL;
+ Py_ssize_t ndim = 1;
+
+ CHECK_RELEASED(self);
+ CHECK_RESTRICTED(self);
+
+ if (!MV_C_CONTIGUOUS(self->flags)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: casts are restricted to C-contiguous views");
+ return NULL;
+ }
+ if ((shape || self->view.ndim != 1) && zero_in_shape(self)) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: cannot cast view with zeros in shape or strides");
+ return NULL;
+ }
+ if (shape) {
+ CHECK_LIST_OR_TUPLE(shape)
+ ndim = PySequence_Fast_GET_SIZE(shape);
+ if (ndim > PyBUF_MAX_NDIM) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: number of dimensions must not exceed "
+ Py_STRINGIFY(PyBUF_MAX_NDIM));
+ return NULL;
+ }
+ if (self->view.ndim != 1 && ndim != 1) {
+ PyErr_SetString(PyExc_TypeError,
+ "memoryview: cast must be 1D -> ND or ND -> 1D");
+ return NULL;
+ }
+ }
+
+ mv = (PyMemoryViewObject *)
+ mbuf_add_incomplete_view(self->mbuf, &self->view, ndim==0 ? 1 : (int)ndim);
+ if (mv == NULL)
+ return NULL;
+
+ if (cast_to_1D(mv, format) < 0)
+ goto error;
+ if (shape && cast_to_ND(mv, shape, (int)ndim) < 0)
+ goto error;
+
+ return (PyObject *)mv;
+
+error:
+ Py_DECREF(mv);
+ return NULL;
+}
+
+/*[clinic input]
+memoryview.toreadonly
+
+Return a readonly version of the memoryview.
+[clinic start generated code]*/
+
+static PyObject *
+memoryview_toreadonly_impl(PyMemoryViewObject *self)
+/*[clinic end generated code: output=2c7e056f04c99e62 input=dc06d20f19ba236f]*/
+{
+ CHECK_RELEASED(self);
+ CHECK_RESTRICTED(self);
+ /* Even if self is already readonly, we still need to create a new
+ * object for .release() to work correctly.
+ */
+ self = (PyMemoryViewObject *) mbuf_add_view(self->mbuf, &self->view);
+ if (self != NULL) {
+ self->view.readonly = 1;
+ };
+ return (PyObject *) self;
+}
+
+
+/**************************************************************************/
+/* getbuffer */
+/**************************************************************************/
+
+static int
+memory_getbuf(PyMemoryViewObject *self, Py_buffer *view, int flags)
+{
+ Py_buffer *base = &self->view;
+ int baseflags = self->flags;
+
+ CHECK_RELEASED_INT(self);
+ CHECK_RESTRICTED_INT(self);
+
+ /* start with complete information */
+ *view = *base;
+ view->obj = NULL;
+
+ if (REQ_WRITABLE(flags) && base->readonly) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not writable");
+ return -1;
+ }
+ if (!REQ_FORMAT(flags)) {
+ /* NULL indicates that the buffer's data type has been cast to 'B'.
+ view->itemsize is the _previous_ itemsize. If shape is present,
+ the equality product(shape) * itemsize = len still holds at this
+ point. The equality calcsize(format) = itemsize does _not_ hold
+ from here on! */
+ view->format = NULL;
+ }
+
+ if (REQ_C_CONTIGUOUS(flags) && !MV_C_CONTIGUOUS(baseflags)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not C-contiguous");
+ return -1;
+ }
+ if (REQ_F_CONTIGUOUS(flags) && !MV_F_CONTIGUOUS(baseflags)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not Fortran contiguous");
+ return -1;
+ }
+ if (REQ_ANY_CONTIGUOUS(flags) && !MV_ANY_CONTIGUOUS(baseflags)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not contiguous");
+ return -1;
+ }
+ if (!REQ_INDIRECT(flags) && (baseflags & _Py_MEMORYVIEW_PIL)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer requires suboffsets");
+ return -1;
+ }
+ if (!REQ_STRIDES(flags)) {
+ if (!MV_C_CONTIGUOUS(baseflags)) {
+ PyErr_SetString(PyExc_BufferError,
+ "memoryview: underlying buffer is not C-contiguous");
+ return -1;
+ }
+ view->strides = NULL;
+ }
+ if (!REQ_SHAPE(flags)) {
+ /* PyBUF_SIMPLE or PyBUF_WRITABLE: at this point buf is C-contiguous,
+ so base->buf = ndbuf->data. */
+ if (view->format != NULL) {
+ /* PyBUF_SIMPLE|PyBUF_FORMAT and PyBUF_WRITABLE|PyBUF_FORMAT do
+ not make sense. */
+ PyErr_Format(PyExc_BufferError,
+ "memoryview: cannot cast to unsigned bytes if the format flag "
+ "is present");
+ return -1;
+ }
+ /* product(shape) * itemsize = len and calcsize(format) = itemsize
+ do _not_ hold from here on! */
+ view->ndim = 1;
+ view->shape = NULL;
+ }
+
+
+ view->obj = Py_NewRef(self);
+ self->exports++;
+
+ return 0;
+}
+
+static void
+memory_releasebuf(PyMemoryViewObject *self, Py_buffer *view)
+{
+ self->exports--;
+ return;
+ /* PyBuffer_Release() decrements view->obj after this function returns. */
+}
+
+/* Buffer methods */
+static PyBufferProcs memory_as_buffer = {
+ (getbufferproc)memory_getbuf, /* bf_getbuffer */
+ (releasebufferproc)memory_releasebuf, /* bf_releasebuffer */
+};
+
+
+/****************************************************************************/
+/* Optimized pack/unpack for all native format specifiers */
+/****************************************************************************/
+
+/*
+ Fix exceptions:
+ 1) Include format string in the error message.
+ 2) OverflowError -> ValueError.
+ 3) The error message from PyNumber_Index() is not ideal.
+*/
+static int
+type_error_int(const char *fmt)
+{
+ PyErr_Format(PyExc_TypeError,
+ "memoryview: invalid type for format '%s'", fmt);
+ return -1;
+}
+
+static int
+value_error_int(const char *fmt)
+{
+ PyErr_Format(PyExc_ValueError,
+ "memoryview: invalid value for format '%s'", fmt);
+ return -1;
+}
+
+static int
+fix_error_int(const char *fmt)
+{
+ assert(PyErr_Occurred());
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ return type_error_int(fmt);
+ }
+ else if (PyErr_ExceptionMatches(PyExc_OverflowError) ||
+ PyErr_ExceptionMatches(PyExc_ValueError)) {
+ PyErr_Clear();
+ return value_error_int(fmt);
+ }
+
+ return -1;
+}
+
+/* Accept integer objects or objects with an __index__() method. */
+static long
+pylong_as_ld(PyObject *item)
+{
+ PyObject *tmp;
+ long ld;
+
+ tmp = _PyNumber_Index(item);
+ if (tmp == NULL)
+ return -1;
+
+ ld = PyLong_AsLong(tmp);
+ Py_DECREF(tmp);
+ return ld;
+}
+
+static unsigned long
+pylong_as_lu(PyObject *item)
+{
+ PyObject *tmp;
+ unsigned long lu;
+
+ tmp = _PyNumber_Index(item);
+ if (tmp == NULL)
+ return (unsigned long)-1;
+
+ lu = PyLong_AsUnsignedLong(tmp);
+ Py_DECREF(tmp);
+ return lu;
+}
+
+static long long
+pylong_as_lld(PyObject *item)
+{
+ PyObject *tmp;
+ long long lld;
+
+ tmp = _PyNumber_Index(item);
+ if (tmp == NULL)
+ return -1;
+
+ lld = PyLong_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return lld;
+}
+
+static unsigned long long
+pylong_as_llu(PyObject *item)
+{
+ PyObject *tmp;
+ unsigned long long llu;
+
+ tmp = _PyNumber_Index(item);
+ if (tmp == NULL)
+ return (unsigned long long)-1;
+
+ llu = PyLong_AsUnsignedLongLong(tmp);
+ Py_DECREF(tmp);
+ return llu;
+}
+
+static Py_ssize_t
+pylong_as_zd(PyObject *item)
+{
+ PyObject *tmp;
+ Py_ssize_t zd;
+
+ tmp = _PyNumber_Index(item);
+ if (tmp == NULL)
+ return -1;
+
+ zd = PyLong_AsSsize_t(tmp);
+ Py_DECREF(tmp);
+ return zd;
+}
+
+static size_t
+pylong_as_zu(PyObject *item)
+{
+ PyObject *tmp;
+ size_t zu;
+
+ tmp = _PyNumber_Index(item);
+ if (tmp == NULL)
+ return (size_t)-1;
+
+ zu = PyLong_AsSize_t(tmp);
+ Py_DECREF(tmp);
+ return zu;
+}
+
+/* Timings with the ndarray from _testbuffer.c indicate that using the
+ struct module is around 15x slower than the two functions below. */
+
+#define UNPACK_SINGLE(dest, ptr, type) \
+ do { \
+ type x; \
+ memcpy((char *)&x, ptr, sizeof x); \
+ dest = x; \
+ } while (0)
+
+/* Unpack a single item. 'fmt' can be any native format character in struct
+ module syntax. This function is very sensitive to small changes. With this
+ layout gcc automatically generates a fast jump table. */
+static inline PyObject *
+unpack_single(PyMemoryViewObject *self, const char *ptr, const char *fmt)
+{
+ unsigned long long llu;
+ unsigned long lu;
+ size_t zu;
+ long long lld;
+ long ld;
+ Py_ssize_t zd;
+ double d;
+ unsigned char uc;
+ void *p;
+
+ CHECK_RELEASED_AGAIN(self);
+
+#if PY_LITTLE_ENDIAN
+ int endian = 1;
+#else
+ int endian = 0;
+#endif
+
+ switch (fmt[0]) {
+
+ /* signed integers and fast path for 'B' */
+ case 'B': uc = *((const unsigned char *)ptr); goto convert_uc;
+ case 'b': ld = *((const signed char *)ptr); goto convert_ld;
+ case 'h': UNPACK_SINGLE(ld, ptr, short); goto convert_ld;
+ case 'i': UNPACK_SINGLE(ld, ptr, int); goto convert_ld;
+ case 'l': UNPACK_SINGLE(ld, ptr, long); goto convert_ld;
+
+ /* boolean */
+ case '?': UNPACK_SINGLE(ld, ptr, _Bool); goto convert_bool;
+
+ /* unsigned integers */
+ case 'H': UNPACK_SINGLE(lu, ptr, unsigned short); goto convert_lu;
+ case 'I': UNPACK_SINGLE(lu, ptr, unsigned int); goto convert_lu;
+ case 'L': UNPACK_SINGLE(lu, ptr, unsigned long); goto convert_lu;
+
+ /* native 64-bit */
+ case 'q': UNPACK_SINGLE(lld, ptr, long long); goto convert_lld;
+ case 'Q': UNPACK_SINGLE(llu, ptr, unsigned long long); goto convert_llu;
+
+ /* ssize_t and size_t */
+ case 'n': UNPACK_SINGLE(zd, ptr, Py_ssize_t); goto convert_zd;
+ case 'N': UNPACK_SINGLE(zu, ptr, size_t); goto convert_zu;
+
+ /* floats */
+ case 'f': UNPACK_SINGLE(d, ptr, float); goto convert_double;
+ case 'd': UNPACK_SINGLE(d, ptr, double); goto convert_double;
+ case 'e': d = PyFloat_Unpack2(ptr, endian); goto convert_double;
+
+ /* bytes object */
+ case 'c': goto convert_bytes;
+
+ /* pointer */
+ case 'P': UNPACK_SINGLE(p, ptr, void *); goto convert_pointer;
+
+ /* default */
+ default: goto err_format;
+ }
+
+convert_uc:
+ /* PyLong_FromUnsignedLong() is slower */
+ return PyLong_FromLong(uc);
+convert_ld:
+ return PyLong_FromLong(ld);
+convert_lu:
+ return PyLong_FromUnsignedLong(lu);
+convert_lld:
+ return PyLong_FromLongLong(lld);
+convert_llu:
+ return PyLong_FromUnsignedLongLong(llu);
+convert_zd:
+ return PyLong_FromSsize_t(zd);
+convert_zu:
+ return PyLong_FromSize_t(zu);
+convert_double:
+ return PyFloat_FromDouble(d);
+convert_bool:
+ return PyBool_FromLong(ld);
+convert_bytes:
+ return PyBytes_FromStringAndSize(ptr, 1);
+convert_pointer:
+ return PyLong_FromVoidPtr(p);
+err_format:
+ PyErr_Format(PyExc_NotImplementedError,
+ "memoryview: format %s not supported", fmt);
+ return NULL;
+}
+
+#define PACK_SINGLE(ptr, src, type) \
+ do { \
+ type x; \
+ x = (type)src; \
+ memcpy(ptr, (char *)&x, sizeof x); \
+ } while (0)
+
+/* Pack a single item. 'fmt' can be any native format character in
+ struct module syntax. */
+static int
+pack_single(PyMemoryViewObject *self, char *ptr, PyObject *item, const char *fmt)
+{
+ unsigned long long llu;
+ unsigned long lu;
+ size_t zu;
+ long long lld;
+ long ld;
+ Py_ssize_t zd;
+ double d;
+ void *p;
+
+#if PY_LITTLE_ENDIAN
+ int endian = 1;
+#else
+ int endian = 0;
+#endif
+ switch (fmt[0]) {
+ /* signed integers */
+ case 'b': case 'h': case 'i': case 'l':
+ ld = pylong_as_ld(item);
+ if (ld == -1 && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ switch (fmt[0]) {
+ case 'b':
+ if (ld < SCHAR_MIN || ld > SCHAR_MAX) goto err_range;
+ *((signed char *)ptr) = (signed char)ld; break;
+ case 'h':
+ if (ld < SHRT_MIN || ld > SHRT_MAX) goto err_range;
+ PACK_SINGLE(ptr, ld, short); break;
+ case 'i':
+ if (ld < INT_MIN || ld > INT_MAX) goto err_range;
+ PACK_SINGLE(ptr, ld, int); break;
+ default: /* 'l' */
+ PACK_SINGLE(ptr, ld, long); break;
+ }
+ break;
+
+ /* unsigned integers */
+ case 'B': case 'H': case 'I': case 'L':
+ lu = pylong_as_lu(item);
+ if (lu == (unsigned long)-1 && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ switch (fmt[0]) {
+ case 'B':
+ if (lu > UCHAR_MAX) goto err_range;
+ *((unsigned char *)ptr) = (unsigned char)lu; break;
+ case 'H':
+ if (lu > USHRT_MAX) goto err_range;
+ PACK_SINGLE(ptr, lu, unsigned short); break;
+ case 'I':
+ if (lu > UINT_MAX) goto err_range;
+ PACK_SINGLE(ptr, lu, unsigned int); break;
+ default: /* 'L' */
+ PACK_SINGLE(ptr, lu, unsigned long); break;
+ }
+ break;
+
+ /* native 64-bit */
+ case 'q':
+ lld = pylong_as_lld(item);
+ if (lld == -1 && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ PACK_SINGLE(ptr, lld, long long);
+ break;
+ case 'Q':
+ llu = pylong_as_llu(item);
+ if (llu == (unsigned long long)-1 && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ PACK_SINGLE(ptr, llu, unsigned long long);
+ break;
+
+ /* ssize_t and size_t */
+ case 'n':
+ zd = pylong_as_zd(item);
+ if (zd == -1 && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ PACK_SINGLE(ptr, zd, Py_ssize_t);
+ break;
+ case 'N':
+ zu = pylong_as_zu(item);
+ if (zu == (size_t)-1 && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ PACK_SINGLE(ptr, zu, size_t);
+ break;
+
+ /* floats */
+ case 'f': case 'd': case 'e':
+ d = PyFloat_AsDouble(item);
+ if (d == -1.0 && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ if (fmt[0] == 'f') {
+ PACK_SINGLE(ptr, d, float);
+ }
+ else if (fmt[0] == 'd') {
+ PACK_SINGLE(ptr, d, double);
+ }
+ else {
+ if (PyFloat_Pack2(d, ptr, endian) < 0) {
+ goto err_occurred;
+ }
+ }
+ break;
+
+ /* bool */
+ case '?':
+ ld = PyObject_IsTrue(item);
+ if (ld < 0)
+ return -1; /* preserve original error */
+ CHECK_RELEASED_INT_AGAIN(self);
+ PACK_SINGLE(ptr, ld, _Bool);
+ break;
+
+ /* bytes object */
+ case 'c':
+ if (!PyBytes_Check(item))
+ return type_error_int(fmt);
+ if (PyBytes_GET_SIZE(item) != 1)
+ return value_error_int(fmt);
+ *ptr = PyBytes_AS_STRING(item)[0];
+ break;
+
+ /* pointer */
+ case 'P':
+ p = PyLong_AsVoidPtr(item);
+ if (p == NULL && PyErr_Occurred())
+ goto err_occurred;
+ CHECK_RELEASED_INT_AGAIN(self);
+ PACK_SINGLE(ptr, p, void *);
+ break;
+
+ /* default */
+ default: goto err_format;
+ }
+
+ return 0;
+
+err_occurred:
+ return fix_error_int(fmt);
+err_range:
+ return value_error_int(fmt);
+err_format:
+ PyErr_Format(PyExc_NotImplementedError,
+ "memoryview: format %s not supported", fmt);
+ return -1;
+}
+
+
+/****************************************************************************/
+/* unpack using the struct module */
+/****************************************************************************/
+
+/* For reasonable performance it is necessary to cache all objects required
+ for unpacking. An unpacker can handle the format passed to unpack_from().
+ Invariant: All pointer fields of the struct should either be NULL or valid
+ pointers. */
+struct unpacker {
+ PyObject *unpack_from; /* Struct.unpack_from(format) */
+ PyObject *mview; /* cached memoryview */
+ char *item; /* buffer for mview */
+ Py_ssize_t itemsize; /* len(item) */
+};
+
+static struct unpacker *
+unpacker_new(void)
+{
+ struct unpacker *x = PyMem_Malloc(sizeof *x);
+
+ if (x == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ x->unpack_from = NULL;
+ x->mview = NULL;
+ x->item = NULL;
+ x->itemsize = 0;
+
+ return x;
+}
+
+static void
+unpacker_free(struct unpacker *x)
+{
+ if (x) {
+ Py_XDECREF(x->unpack_from);
+ Py_XDECREF(x->mview);
+ PyMem_Free(x->item);
+ PyMem_Free(x);
+ }
+}
+
+/* Return a new unpacker for the given format. */
+static struct unpacker *
+struct_get_unpacker(const char *fmt, Py_ssize_t itemsize)
+{
+ PyObject *Struct = NULL; /* XXX cache it in globals? */
+ PyObject *structobj = NULL;
+ PyObject *format = NULL;
+ struct unpacker *x = NULL;
+
+ Struct = _PyImport_GetModuleAttrString("struct", "Struct");
+ if (Struct == NULL)
+ return NULL;
+
+ x = unpacker_new();
+ if (x == NULL)
+ goto error;
+
+ format = PyBytes_FromString(fmt);
+ if (format == NULL)
+ goto error;
+
+ structobj = PyObject_CallOneArg(Struct, format);
+ if (structobj == NULL)
+ goto error;
+
+ x->unpack_from = PyObject_GetAttrString(structobj, "unpack_from");
+ if (x->unpack_from == NULL)
+ goto error;
+
+ x->item = PyMem_Malloc(itemsize);
+ if (x->item == NULL) {
+ PyErr_NoMemory();
+ goto error;
+ }
+ x->itemsize = itemsize;
+
+ x->mview = PyMemoryView_FromMemory(x->item, itemsize, PyBUF_WRITE);
+ if (x->mview == NULL)
+ goto error;
+
+
+out:
+ Py_XDECREF(Struct);
+ Py_XDECREF(format);
+ Py_XDECREF(structobj);
+ return x;
+
+error:
+ unpacker_free(x);
+ x = NULL;
+ goto out;
+}
+
+/* unpack a single item */
+static PyObject *
+struct_unpack_single(const char *ptr, struct unpacker *x)
+{
+ PyObject *v;
+
+ memcpy(x->item, ptr, x->itemsize);
+ v = PyObject_CallOneArg(x->unpack_from, x->mview);
+ if (v == NULL)
+ return NULL;
+
+ if (PyTuple_GET_SIZE(v) == 1) {
+ PyObject *res = Py_NewRef(PyTuple_GET_ITEM(v, 0));
+ Py_DECREF(v);
+ return res;
+ }
+
+ return v;
+}
+
+
+/****************************************************************************/
+/* Representations */
+/****************************************************************************/
+
+/* allow explicit form of native format */
+static inline const char *
+adjust_fmt(const Py_buffer *view)
+{
+ const char *fmt;
+
+ fmt = (view->format[0] == '@') ? view->format+1 : view->format;
+ if (fmt[0] && fmt[1] == '\0')
+ return fmt;
+
+ PyErr_Format(PyExc_NotImplementedError,
+ "memoryview: unsupported format %s", view->format);
+ return NULL;
+}
+
+/* Base case for multi-dimensional unpacking. Assumption: ndim == 1. */
+static PyObject *
+tolist_base(PyMemoryViewObject *self, const char *ptr, const Py_ssize_t *shape,
+ const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
+ const char *fmt)
+{
+ PyObject *lst, *item;
+ Py_ssize_t i;
+
+ lst = PyList_New(shape[0]);
+ if (lst == NULL)
+ return NULL;
+
+ for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
+ const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
+ item = unpack_single(self, xptr, fmt);
+ if (item == NULL) {
+ Py_DECREF(lst);
+ return NULL;
+ }
+ PyList_SET_ITEM(lst, i, item);
+ }
+
+ return lst;
+}
+
+/* Unpack a multi-dimensional array into a nested list.
+ Assumption: ndim >= 1. */
+static PyObject *
+tolist_rec(PyMemoryViewObject *self, const char *ptr, Py_ssize_t ndim, const Py_ssize_t *shape,
+ const Py_ssize_t *strides, const Py_ssize_t *suboffsets,
+ const char *fmt)
+{
+ PyObject *lst, *item;
+ Py_ssize_t i;
+
+ assert(ndim >= 1);
+ assert(shape != NULL);
+ assert(strides != NULL);
+
+ if (ndim == 1)
+ return tolist_base(self, ptr, shape, strides, suboffsets, fmt);
+
+ lst = PyList_New(shape[0]);
+ if (lst == NULL)
+ return NULL;
+
+ for (i = 0; i < shape[0]; ptr+=strides[0], i++) {
+ const char *xptr = ADJUST_PTR(ptr, suboffsets, 0);
+ item = tolist_rec(self, xptr, ndim-1, shape+1,
+ strides+1, suboffsets ? suboffsets+1 : NULL,
+ fmt);
+ if (item == NULL) {
+ Py_DECREF(lst);
+ return NULL;
+ }
+ PyList_SET_ITEM(lst, i, item);
+ }
+
+ return lst;
+}
+
+/* Return a list representation of the memoryview. Currently only buffers
+ with native format strings are supported. */
+/*[clinic input]
+memoryview.tolist
+
+Return the data in the buffer as a list of elements.
+[clinic start generated code]*/
+
+static PyObject *
+memoryview_tolist_impl(PyMemoryViewObject *self)
+/*[clinic end generated code: output=a6cda89214fd5a1b input=21e7d0c1860b211a]*/
+{
+ const Py_buffer *view = &self->view;
+ const char *fmt;
+
+ CHECK_RELEASED(self);
+
+ fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return NULL;
+ if (view->ndim == 0) {
+ return unpack_single(self, view->buf, fmt);
+ }
+ else if (view->ndim == 1) {
+ return tolist_base(self, view->buf, view->shape,
+ view->strides, view->suboffsets,
+ fmt);
+ }
+ else {
+ return tolist_rec(self, view->buf, view->ndim, view->shape,
+ view->strides, view->suboffsets,
+ fmt);
+ }
+}
+
+/*[clinic input]
+memoryview.tobytes
+
+ order: str(accept={str, NoneType}, c_default="NULL") = 'C'
+
+Return the data in the buffer as a byte string.
+
+Order can be {'C', 'F', 'A'}. When order is 'C' or 'F', the data of the
+original array is converted to C or Fortran order. For contiguous views,
+'A' returns an exact copy of the physical memory. In particular, in-memory
+Fortran order is preserved. For non-contiguous views, the data is converted
+to C first. order=None is the same as order='C'.
+[clinic start generated code]*/
+
+static PyObject *
+memoryview_tobytes_impl(PyMemoryViewObject *self, const char *order)
+/*[clinic end generated code: output=1288b62560a32a23 input=0efa3ddaeda573a8]*/
+{
+ Py_buffer *src = VIEW_ADDR(self);
+ char ord = 'C';
+ PyObject *bytes;
+
+ CHECK_RELEASED(self);
+
+ if (order) {
+ if (strcmp(order, "F") == 0) {
+ ord = 'F';
+ }
+ else if (strcmp(order, "A") == 0) {
+ ord = 'A';
+ }
+ else if (strcmp(order, "C") != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "order must be 'C', 'F' or 'A'");
+ return NULL;
+ }
+ }
+
+ bytes = PyBytes_FromStringAndSize(NULL, src->len);
+ if (bytes == NULL)
+ return NULL;
+
+ if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, ord) < 0) {
+ Py_DECREF(bytes);
+ return NULL;
+ }
+
+ return bytes;
+}
+
+/*[clinic input]
+memoryview.hex
+
+ sep: object = NULL
+ An optional single character or byte to separate hex bytes.
+ bytes_per_sep: int = 1
+ How many bytes between separators. Positive values count from the
+ right, negative values count from the left.
+
+Return the data in the buffer as a str of hexadecimal numbers.
+
+Example:
+>>> value = memoryview(b'\xb9\x01\xef')
+>>> value.hex()
+'b901ef'
+>>> value.hex(':')
+'b9:01:ef'
+>>> value.hex(':', 2)
+'b9:01ef'
+>>> value.hex(':', -2)
+'b901:ef'
+[clinic start generated code]*/
+
+static PyObject *
+memoryview_hex_impl(PyMemoryViewObject *self, PyObject *sep,
+ int bytes_per_sep)
+/*[clinic end generated code: output=430ca760f94f3ca7 input=539f6a3a5fb56946]*/
+{
+ Py_buffer *src = VIEW_ADDR(self);
+ PyObject *bytes;
+ PyObject *ret;
+
+ CHECK_RELEASED(self);
+
+ if (MV_C_CONTIGUOUS(self->flags)) {
+ return _Py_strhex_with_sep(src->buf, src->len, sep, bytes_per_sep);
+ }
+
+ bytes = PyBytes_FromStringAndSize(NULL, src->len);
+ if (bytes == NULL)
+ return NULL;
+
+ if (PyBuffer_ToContiguous(PyBytes_AS_STRING(bytes), src, src->len, 'C') < 0) {
+ Py_DECREF(bytes);
+ return NULL;
+ }
+
+ ret = _Py_strhex_with_sep(
+ PyBytes_AS_STRING(bytes), PyBytes_GET_SIZE(bytes),
+ sep, bytes_per_sep);
+ Py_DECREF(bytes);
+
+ return ret;
+}
+
+static PyObject *
+memory_repr(PyMemoryViewObject *self)
+{
+ if (self->flags & _Py_MEMORYVIEW_RELEASED)
+ return PyUnicode_FromFormat("<released memory at %p>", self);
+ else
+ return PyUnicode_FromFormat("<memory at %p>", self);
+}
+
+
+/**************************************************************************/
+/* Indexing and slicing */
+/**************************************************************************/
+
+static char *
+lookup_dimension(const Py_buffer *view, char *ptr, int dim, Py_ssize_t index)
+{
+ Py_ssize_t nitems; /* items in the given dimension */
+
+ assert(view->shape);
+ assert(view->strides);
+
+ nitems = view->shape[dim];
+ if (index < 0) {
+ index += nitems;
+ }
+ if (index < 0 || index >= nitems) {
+ PyErr_Format(PyExc_IndexError,
+ "index out of bounds on dimension %d", dim + 1);
+ return NULL;
+ }
+
+ ptr += view->strides[dim] * index;
+
+ ptr = ADJUST_PTR(ptr, view->suboffsets, dim);
+
+ return ptr;
+}
+
+/* Get the pointer to the item at index. */
+static char *
+ptr_from_index(const Py_buffer *view, Py_ssize_t index)
+{
+ char *ptr = (char *)view->buf;
+ return lookup_dimension(view, ptr, 0, index);
+}
+
+/* Get the pointer to the item at tuple. */
+static char *
+ptr_from_tuple(const Py_buffer *view, PyObject *tup)
+{
+ char *ptr = (char *)view->buf;
+ Py_ssize_t dim, nindices = PyTuple_GET_SIZE(tup);
+
+ if (nindices > view->ndim) {
+ PyErr_Format(PyExc_TypeError,
+ "cannot index %zd-dimension view with %zd-element tuple",
+ view->ndim, nindices);
+ return NULL;
+ }
+
+ for (dim = 0; dim < nindices; dim++) {
+ Py_ssize_t index;
+ index = PyNumber_AsSsize_t(PyTuple_GET_ITEM(tup, dim),
+ PyExc_IndexError);
+ if (index == -1 && PyErr_Occurred())
+ return NULL;
+ ptr = lookup_dimension(view, ptr, (int)dim, index);
+ if (ptr == NULL)
+ return NULL;
+ }
+ return ptr;
+}
+
+/* Return the item at index. In a one-dimensional view, this is an object
+ with the type specified by view->format. Otherwise, the item is a sub-view.
+ The function is used in memory_subscript() and memory_as_sequence. */
+static PyObject *
+memory_item(PyMemoryViewObject *self, Py_ssize_t index)
+{
+ Py_buffer *view = &(self->view);
+ const char *fmt;
+
+ CHECK_RELEASED(self);
+
+ fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return NULL;
+
+ if (view->ndim == 0) {
+ PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
+ return NULL;
+ }
+ if (view->ndim == 1) {
+ char *ptr = ptr_from_index(view, index);
+ if (ptr == NULL)
+ return NULL;
+ return unpack_single(self, ptr, fmt);
+ }
+
+ PyErr_SetString(PyExc_NotImplementedError,
+ "multi-dimensional sub-views are not implemented");
+ return NULL;
+}
+
+/* Return the item at position *key* (a tuple of indices). */
+static PyObject *
+memory_item_multi(PyMemoryViewObject *self, PyObject *tup)
+{
+ Py_buffer *view = &(self->view);
+ const char *fmt;
+ Py_ssize_t nindices = PyTuple_GET_SIZE(tup);
+ char *ptr;
+
+ CHECK_RELEASED(self);
+
+ fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return NULL;
+
+ if (nindices < view->ndim) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "sub-views are not implemented");
+ return NULL;
+ }
+ ptr = ptr_from_tuple(view, tup);
+ if (ptr == NULL)
+ return NULL;
+ return unpack_single(self, ptr, fmt);
+}
+
+static inline int
+init_slice(Py_buffer *base, PyObject *key, int dim)
+{
+ Py_ssize_t start, stop, step, slicelength;
+
+ if (PySlice_Unpack(key, &start, &stop, &step) < 0) {
+ return -1;
+ }
+ slicelength = PySlice_AdjustIndices(base->shape[dim], &start, &stop, step);
+
+
+ if (base->suboffsets == NULL || dim == 0) {
+ adjust_buf:
+ base->buf = (char *)base->buf + base->strides[dim] * start;
+ }
+ else {
+ Py_ssize_t n = dim-1;
+ while (n >= 0 && base->suboffsets[n] < 0)
+ n--;
+ if (n < 0)
+ goto adjust_buf; /* all suboffsets are negative */
+ base->suboffsets[n] = base->suboffsets[n] + base->strides[dim] * start;
+ }
+ base->shape[dim] = slicelength;
+ base->strides[dim] = base->strides[dim] * step;
+
+ return 0;
+}
+
+static int
+is_multislice(PyObject *key)
+{
+ Py_ssize_t size, i;
+
+ if (!PyTuple_Check(key))
+ return 0;
+ size = PyTuple_GET_SIZE(key);
+ if (size == 0)
+ return 0;
+
+ for (i = 0; i < size; i++) {
+ PyObject *x = PyTuple_GET_ITEM(key, i);
+ if (!PySlice_Check(x))
+ return 0;
+ }
+ return 1;
+}
+
+static Py_ssize_t
+is_multiindex(PyObject *key)
+{
+ Py_ssize_t size, i;
+
+ if (!PyTuple_Check(key))
+ return 0;
+ size = PyTuple_GET_SIZE(key);
+ for (i = 0; i < size; i++) {
+ PyObject *x = PyTuple_GET_ITEM(key, i);
+ if (!_PyIndex_Check(x)) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* mv[obj] returns an object holding the data for one element if obj
+ fully indexes the memoryview or another memoryview object if it
+ does not.
+
+ 0-d memoryview objects can be referenced using mv[...] or mv[()]
+ but not with anything else. */
+static PyObject *
+memory_subscript(PyMemoryViewObject *self, PyObject *key)
+{
+ Py_buffer *view;
+ view = &(self->view);
+
+ CHECK_RELEASED(self);
+
+ if (view->ndim == 0) {
+ if (PyTuple_Check(key) && PyTuple_GET_SIZE(key) == 0) {
+ const char *fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return NULL;
+ return unpack_single(self, view->buf, fmt);
+ }
+ else if (key == Py_Ellipsis) {
+ return Py_NewRef(self);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "invalid indexing of 0-dim memory");
+ return NULL;
+ }
+ }
+
+ if (_PyIndex_Check(key)) {
+ Py_ssize_t index;
+ index = PyNumber_AsSsize_t(key, PyExc_IndexError);
+ if (index == -1 && PyErr_Occurred())
+ return NULL;
+ return memory_item(self, index);
+ }
+ else if (PySlice_Check(key)) {
+ CHECK_RESTRICTED(self);
+ PyMemoryViewObject *sliced;
+
+ sliced = (PyMemoryViewObject *)mbuf_add_view(self->mbuf, view);
+ if (sliced == NULL)
+ return NULL;
+
+ if (init_slice(&sliced->view, key, 0) < 0) {
+ Py_DECREF(sliced);
+ return NULL;
+ }
+ init_len(&sliced->view);
+ init_flags(sliced);
+
+ return (PyObject *)sliced;
+ }
+ else if (is_multiindex(key)) {
+ return memory_item_multi(self, key);
+ }
+ else if (is_multislice(key)) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "multi-dimensional slicing is not implemented");
+ return NULL;
+ }
+
+ PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
+ return NULL;
+}
+
+static int
+memory_ass_sub(PyMemoryViewObject *self, PyObject *key, PyObject *value)
+{
+ Py_buffer *view = &(self->view);
+ Py_buffer src;
+ const char *fmt;
+ char *ptr;
+
+ CHECK_RELEASED_INT(self);
+
+ fmt = adjust_fmt(view);
+ if (fmt == NULL)
+ return -1;
+
+ if (view->readonly) {
+ PyErr_SetString(PyExc_TypeError, "cannot modify read-only memory");
+ return -1;
+ }
+ if (value == NULL) {
+ PyErr_SetString(PyExc_TypeError, "cannot delete memory");
+ return -1;
+ }
+ if (view->ndim == 0) {
+ if (key == Py_Ellipsis ||
+ (PyTuple_Check(key) && PyTuple_GET_SIZE(key)==0)) {
+ ptr = (char *)view->buf;
+ return pack_single(self, ptr, value, fmt);
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "invalid indexing of 0-dim memory");
+ return -1;
+ }
+ }
+
+ if (_PyIndex_Check(key)) {
+ Py_ssize_t index;
+ if (1 < view->ndim) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "sub-views are not implemented");
+ return -1;
+ }
+ index = PyNumber_AsSsize_t(key, PyExc_IndexError);
+ if (index == -1 && PyErr_Occurred())
+ return -1;
+ ptr = ptr_from_index(view, index);
+ if (ptr == NULL)
+ return -1;
+ return pack_single(self, ptr, value, fmt);
+ }
+ /* one-dimensional: fast path */
+ if (PySlice_Check(key) && view->ndim == 1) {
+ Py_buffer dest; /* sliced view */
+ Py_ssize_t arrays[3];
+ int ret = -1;
+
+ /* rvalue must be an exporter */
+ if (PyObject_GetBuffer(value, &src, PyBUF_FULL_RO) < 0)
+ return ret;
+
+ dest = *view;
+ dest.shape = &arrays[0]; dest.shape[0] = view->shape[0];
+ dest.strides = &arrays[1]; dest.strides[0] = view->strides[0];
+ if (view->suboffsets) {
+ dest.suboffsets = &arrays[2]; dest.suboffsets[0] = view->suboffsets[0];
+ }
+
+ if (init_slice(&dest, key, 0) < 0)
+ goto end_block;
+ dest.len = dest.shape[0] * dest.itemsize;
+
+ ret = copy_single(self, &dest, &src);
+
+ end_block:
+ PyBuffer_Release(&src);
+ return ret;
+ }
+ if (is_multiindex(key)) {
+ char *ptr;
+ if (PyTuple_GET_SIZE(key) < view->ndim) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "sub-views are not implemented");
+ return -1;
+ }
+ ptr = ptr_from_tuple(view, key);
+ if (ptr == NULL)
+ return -1;
+ return pack_single(self, ptr, value, fmt);
+ }
+ if (PySlice_Check(key) || is_multislice(key)) {
+ /* Call memory_subscript() to produce a sliced lvalue, then copy
+ rvalue into lvalue. This is already implemented in _testbuffer.c. */
+ PyErr_SetString(PyExc_NotImplementedError,
+ "memoryview slice assignments are currently restricted "
+ "to ndim = 1");
+ return -1;
+ }
+
+ PyErr_SetString(PyExc_TypeError, "memoryview: invalid slice key");
+ return -1;
+}
+
+static Py_ssize_t
+memory_length(PyMemoryViewObject *self)
+{
+ CHECK_RELEASED_INT(self);
+ if (self->view.ndim == 0) {
+ PyErr_SetString(PyExc_TypeError, "0-dim memory has no length");
+ return -1;
+ }
+ return self->view.shape[0];
+}
+
+/* As mapping */
+static PyMappingMethods memory_as_mapping = {
+ (lenfunc)memory_length, /* mp_length */
+ (binaryfunc)memory_subscript, /* mp_subscript */
+ (objobjargproc)memory_ass_sub, /* mp_ass_subscript */
+};
+
+/* As sequence */
+static PySequenceMethods memory_as_sequence = {
+ (lenfunc)memory_length, /* sq_length */
+ 0, /* sq_concat */
+ 0, /* sq_repeat */
+ (ssizeargfunc)memory_item, /* sq_item */
+};
+
+
+/**************************************************************************/
+/* Comparisons */
+/**************************************************************************/
+
+#define MV_COMPARE_EX -1 /* exception */
+#define MV_COMPARE_NOT_IMPL -2 /* not implemented */
+
+/* Translate a StructError to "not equal". Preserve other exceptions. */
+static int
+fix_struct_error_int(void)
+{
+ assert(PyErr_Occurred());
+ /* XXX Cannot get at StructError directly? */
+ if (PyErr_ExceptionMatches(PyExc_ImportError) ||
+ PyErr_ExceptionMatches(PyExc_MemoryError)) {
+ return MV_COMPARE_EX;
+ }
+ /* StructError: invalid or unknown format -> not equal */
+ PyErr_Clear();
+ return 0;
+}
+
+/* Unpack and compare single items of p and q using the struct module. */
+static int
+struct_unpack_cmp(const char *p, const char *q,
+ struct unpacker *unpack_p, struct unpacker *unpack_q)
+{
+ PyObject *v, *w;
+ int ret;
+
+ /* At this point any exception from the struct module should not be
+ StructError, since both formats have been accepted already. */
+ v = struct_unpack_single(p, unpack_p);
+ if (v == NULL)
+ return MV_COMPARE_EX;
+
+ w = struct_unpack_single(q, unpack_q);
+ if (w == NULL) {
+ Py_DECREF(v);
+ return MV_COMPARE_EX;
+ }
+
+ /* MV_COMPARE_EX == -1: exceptions are preserved */
+ ret = PyObject_RichCompareBool(v, w, Py_EQ);
+ Py_DECREF(v);
+ Py_DECREF(w);
+
+ return ret;
+}
+
+/* Unpack and compare single items of p and q. If both p and q have the same
+ single element native format, the comparison uses a fast path (gcc creates
+ a jump table and converts memcpy into simple assignments on x86/x64).
+
+ Otherwise, the comparison is delegated to the struct module, which is
+ 30-60x slower. */
+#define CMP_SINGLE(p, q, type) \
+ do { \
+ type x; \
+ type y; \
+ memcpy((char *)&x, p, sizeof x); \
+ memcpy((char *)&y, q, sizeof y); \
+ equal = (x == y); \
+ } while (0)
+
+static inline int
+unpack_cmp(const char *p, const char *q, char fmt,
+ struct unpacker *unpack_p, struct unpacker *unpack_q)
+{
+ int equal;
+
+ switch (fmt) {
+
+ /* signed integers and fast path for 'B' */
+ case 'B': return *((const unsigned char *)p) == *((const unsigned char *)q);
+ case 'b': return *((const signed char *)p) == *((const signed char *)q);
+ case 'h': CMP_SINGLE(p, q, short); return equal;
+ case 'i': CMP_SINGLE(p, q, int); return equal;
+ case 'l': CMP_SINGLE(p, q, long); return equal;
+
+ /* boolean */
+ case '?': CMP_SINGLE(p, q, _Bool); return equal;
+
+ /* unsigned integers */
+ case 'H': CMP_SINGLE(p, q, unsigned short); return equal;
+ case 'I': CMP_SINGLE(p, q, unsigned int); return equal;
+ case 'L': CMP_SINGLE(p, q, unsigned long); return equal;
+
+ /* native 64-bit */
+ case 'q': CMP_SINGLE(p, q, long long); return equal;
+ case 'Q': CMP_SINGLE(p, q, unsigned long long); return equal;
+
+ /* ssize_t and size_t */
+ case 'n': CMP_SINGLE(p, q, Py_ssize_t); return equal;
+ case 'N': CMP_SINGLE(p, q, size_t); return equal;
+
+ /* floats */
+ /* XXX DBL_EPSILON? */
+ case 'f': CMP_SINGLE(p, q, float); return equal;
+ case 'd': CMP_SINGLE(p, q, double); return equal;
+ case 'e': {
+#if PY_LITTLE_ENDIAN
+ int endian = 1;
+#else
+ int endian = 0;
+#endif
+ /* Note: PyFloat_Unpack2 should never fail */
+ double u = PyFloat_Unpack2(p, endian);
+ double v = PyFloat_Unpack2(q, endian);
+ return (u == v);
+ }
+
+ /* bytes object */
+ case 'c': return *p == *q;
+
+ /* pointer */
+ case 'P': CMP_SINGLE(p, q, void *); return equal;
+
+ /* use the struct module */
+ case '_':
+ assert(unpack_p);
+ assert(unpack_q);
+ return struct_unpack_cmp(p, q, unpack_p, unpack_q);
+ }
+
+ /* NOT REACHED */
+ PyErr_SetString(PyExc_RuntimeError,
+ "memoryview: internal error in richcompare");
+ return MV_COMPARE_EX;
+}
+
+/* Base case for recursive array comparisons. Assumption: ndim == 1. */
+static int
+cmp_base(const char *p, const char *q, const Py_ssize_t *shape,
+ const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
+ const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
+ char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
+{
+ Py_ssize_t i;
+ int equal;
+
+ for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
+ const char *xp = ADJUST_PTR(p, psuboffsets, 0);
+ const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
+ equal = unpack_cmp(xp, xq, fmt, unpack_p, unpack_q);
+ if (equal <= 0)
+ return equal;
+ }
+
+ return 1;
+}
+
+/* Recursively compare two multi-dimensional arrays that have the same
+ logical structure. Assumption: ndim >= 1. */
+static int
+cmp_rec(const char *p, const char *q,
+ Py_ssize_t ndim, const Py_ssize_t *shape,
+ const Py_ssize_t *pstrides, const Py_ssize_t *psuboffsets,
+ const Py_ssize_t *qstrides, const Py_ssize_t *qsuboffsets,
+ char fmt, struct unpacker *unpack_p, struct unpacker *unpack_q)
+{
+ Py_ssize_t i;
+ int equal;
+
+ assert(ndim >= 1);
+ assert(shape != NULL);
+ assert(pstrides != NULL);
+ assert(qstrides != NULL);
+
+ if (ndim == 1) {
+ return cmp_base(p, q, shape,
+ pstrides, psuboffsets,
+ qstrides, qsuboffsets,
+ fmt, unpack_p, unpack_q);
+ }
+
+ for (i = 0; i < shape[0]; p+=pstrides[0], q+=qstrides[0], i++) {
+ const char *xp = ADJUST_PTR(p, psuboffsets, 0);
+ const char *xq = ADJUST_PTR(q, qsuboffsets, 0);
+ equal = cmp_rec(xp, xq, ndim-1, shape+1,
+ pstrides+1, psuboffsets ? psuboffsets+1 : NULL,
+ qstrides+1, qsuboffsets ? qsuboffsets+1 : NULL,
+ fmt, unpack_p, unpack_q);
+ if (equal <= 0)
+ return equal;
+ }
+
+ return 1;
+}
+
+static PyObject *
+memory_richcompare(PyObject *v, PyObject *w, int op)
+{
+ PyObject *res;
+ Py_buffer wbuf, *vv;
+ Py_buffer *ww = NULL;
+ struct unpacker *unpack_v = NULL;
+ struct unpacker *unpack_w = NULL;
+ char vfmt, wfmt;
+ int equal = MV_COMPARE_NOT_IMPL;
+
+ if (op != Py_EQ && op != Py_NE)
+ goto result; /* Py_NotImplemented */
+
+ assert(PyMemoryView_Check(v));
+ if (BASE_INACCESSIBLE(v)) {
+ equal = (v == w);
+ goto result;
+ }
+ vv = VIEW_ADDR(v);
+
+ if (PyMemoryView_Check(w)) {
+ if (BASE_INACCESSIBLE(w)) {
+ equal = (v == w);
+ goto result;
+ }
+ ww = VIEW_ADDR(w);
+ }
+ else {
+ if (PyObject_GetBuffer(w, &wbuf, PyBUF_FULL_RO) < 0) {
+ PyErr_Clear();
+ goto result; /* Py_NotImplemented */
+ }
+ ww = &wbuf;
+ }
+
+ if (!equiv_shape(vv, ww)) {
+ PyErr_Clear();
+ equal = 0;
+ goto result;
+ }
+
+ /* Use fast unpacking for identical primitive C type formats. */
+ if (get_native_fmtchar(&vfmt, vv->format) < 0)
+ vfmt = '_';
+ if (get_native_fmtchar(&wfmt, ww->format) < 0)
+ wfmt = '_';
+ if (vfmt == '_' || wfmt == '_' || vfmt != wfmt) {
+ /* Use struct module unpacking. NOTE: Even for equal format strings,
+ memcmp() cannot be used for item comparison since it would give
+ incorrect results in the case of NaNs or uninitialized padding
+ bytes. */
+ vfmt = '_';
+ unpack_v = struct_get_unpacker(vv->format, vv->itemsize);
+ if (unpack_v == NULL) {
+ equal = fix_struct_error_int();
+ goto result;
+ }
+ unpack_w = struct_get_unpacker(ww->format, ww->itemsize);
+ if (unpack_w == NULL) {
+ equal = fix_struct_error_int();
+ goto result;
+ }
+ }
+
+ if (vv->ndim == 0) {
+ equal = unpack_cmp(vv->buf, ww->buf,
+ vfmt, unpack_v, unpack_w);
+ }
+ else if (vv->ndim == 1) {
+ equal = cmp_base(vv->buf, ww->buf, vv->shape,
+ vv->strides, vv->suboffsets,
+ ww->strides, ww->suboffsets,
+ vfmt, unpack_v, unpack_w);
+ }
+ else {
+ equal = cmp_rec(vv->buf, ww->buf, vv->ndim, vv->shape,
+ vv->strides, vv->suboffsets,
+ ww->strides, ww->suboffsets,
+ vfmt, unpack_v, unpack_w);
+ }
+
+result:
+ if (equal < 0) {
+ if (equal == MV_COMPARE_NOT_IMPL)
+ res = Py_NotImplemented;
+ else /* exception */
+ res = NULL;
+ }
+ else if ((equal && op == Py_EQ) || (!equal && op == Py_NE))
+ res = Py_True;
+ else
+ res = Py_False;
+
+ if (ww == &wbuf)
+ PyBuffer_Release(ww);
+
+ unpacker_free(unpack_v);
+ unpacker_free(unpack_w);
+
+ return Py_XNewRef(res);
+}
+
+/**************************************************************************/
+/* Hash */
+/**************************************************************************/
+
+static Py_hash_t
+memory_hash(PyMemoryViewObject *self)
+{
+ if (self->hash == -1) {
+ Py_buffer *view = &self->view;
+ char *mem = view->buf;
+ Py_ssize_t ret;
+ char fmt;
+
+ CHECK_RELEASED_INT(self);
+
+ if (!view->readonly) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot hash writable memoryview object");
+ return -1;
+ }
+ ret = get_native_fmtchar(&fmt, view->format);
+ if (ret < 0 || !IS_BYTE_FORMAT(fmt)) {
+ PyErr_SetString(PyExc_ValueError,
+ "memoryview: hashing is restricted to formats 'B', 'b' or 'c'");
+ return -1;
+ }
+ if (view->obj != NULL && PyObject_Hash(view->obj) == -1) {
+ /* Keep the original error message */
+ return -1;
+ }
+
+ if (!MV_C_CONTIGUOUS(self->flags)) {
+ mem = PyMem_Malloc(view->len);
+ if (mem == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ if (buffer_to_contiguous(mem, view, 'C') < 0) {
+ PyMem_Free(mem);
+ return -1;
+ }
+ }
+
+ /* Can't fail */
+ self->hash = _Py_HashBytes(mem, view->len);
+
+ if (mem != view->buf)
+ PyMem_Free(mem);
+ }
+
+ return self->hash;
+}
+
+
+/**************************************************************************/
+/* getters */
+/**************************************************************************/
+
+static PyObject *
+_IntTupleFromSsizet(int len, Py_ssize_t *vals)
+{
+ int i;
+ PyObject *o;
+ PyObject *intTuple;
+
+ if (vals == NULL)
+ return PyTuple_New(0);
+
+ intTuple = PyTuple_New(len);
+ if (!intTuple)
+ return NULL;
+ for (i=0; i<len; i++) {
+ o = PyLong_FromSsize_t(vals[i]);
+ if (!o) {
+ Py_DECREF(intTuple);
+ return NULL;
+ }
+ PyTuple_SET_ITEM(intTuple, i, o);
+ }
+ return intTuple;
+}
+
+static PyObject *
+memory_obj_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ Py_buffer *view = &self->view;
+
+ CHECK_RELEASED(self);
+ if (view->obj == NULL) {
+ Py_RETURN_NONE;
+ }
+ return Py_NewRef(view->obj);
+}
+
+static PyObject *
+memory_nbytes_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return PyLong_FromSsize_t(self->view.len);
+}
+
+static PyObject *
+memory_format_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return PyUnicode_FromString(self->view.format);
+}
+
+static PyObject *
+memory_itemsize_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return PyLong_FromSsize_t(self->view.itemsize);
+}
+
+static PyObject *
+memory_shape_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return _IntTupleFromSsizet(self->view.ndim, self->view.shape);
+}
+
+static PyObject *
+memory_strides_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return _IntTupleFromSsizet(self->view.ndim, self->view.strides);
+}
+
+static PyObject *
+memory_suboffsets_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return _IntTupleFromSsizet(self->view.ndim, self->view.suboffsets);
+}
+
+static PyObject *
+memory_readonly_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(self->view.readonly);
+}
+
+static PyObject *
+memory_ndim_get(PyMemoryViewObject *self, void *Py_UNUSED(ignored))
+{
+ CHECK_RELEASED(self);
+ return PyLong_FromLong(self->view.ndim);
+}
+
+static PyObject *
+memory_c_contiguous(PyMemoryViewObject *self, PyObject *dummy)
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(MV_C_CONTIGUOUS(self->flags));
+}
+
+static PyObject *
+memory_f_contiguous(PyMemoryViewObject *self, PyObject *dummy)
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(MV_F_CONTIGUOUS(self->flags));
+}
+
+static PyObject *
+memory_contiguous(PyMemoryViewObject *self, PyObject *dummy)
+{
+ CHECK_RELEASED(self);
+ return PyBool_FromLong(MV_ANY_CONTIGUOUS(self->flags));
+}
+
+PyDoc_STRVAR(memory_obj_doc,
+ "The underlying object of the memoryview.");
+PyDoc_STRVAR(memory_nbytes_doc,
+ "The amount of space in bytes that the array would use in\n"
+ " a contiguous representation.");
+PyDoc_STRVAR(memory_readonly_doc,
+ "A bool indicating whether the memory is read only.");
+PyDoc_STRVAR(memory_itemsize_doc,
+ "The size in bytes of each element of the memoryview.");
+PyDoc_STRVAR(memory_format_doc,
+ "A string containing the format (in struct module style)\n"
+ " for each element in the view.");
+PyDoc_STRVAR(memory_ndim_doc,
+ "An integer indicating how many dimensions of a multi-dimensional\n"
+ " array the memory represents.");
+PyDoc_STRVAR(memory_shape_doc,
+ "A tuple of ndim integers giving the shape of the memory\n"
+ " as an N-dimensional array.");
+PyDoc_STRVAR(memory_strides_doc,
+ "A tuple of ndim integers giving the size in bytes to access\n"
+ " each element for each dimension of the array.");
+PyDoc_STRVAR(memory_suboffsets_doc,
+ "A tuple of integers used internally for PIL-style arrays.");
+PyDoc_STRVAR(memory_c_contiguous_doc,
+ "A bool indicating whether the memory is C contiguous.");
+PyDoc_STRVAR(memory_f_contiguous_doc,
+ "A bool indicating whether the memory is Fortran contiguous.");
+PyDoc_STRVAR(memory_contiguous_doc,
+ "A bool indicating whether the memory is contiguous.");
+
+
+static PyGetSetDef memory_getsetlist[] = {
+ {"obj", (getter)memory_obj_get, NULL, memory_obj_doc},
+ {"nbytes", (getter)memory_nbytes_get, NULL, memory_nbytes_doc},
+ {"readonly", (getter)memory_readonly_get, NULL, memory_readonly_doc},
+ {"itemsize", (getter)memory_itemsize_get, NULL, memory_itemsize_doc},
+ {"format", (getter)memory_format_get, NULL, memory_format_doc},
+ {"ndim", (getter)memory_ndim_get, NULL, memory_ndim_doc},
+ {"shape", (getter)memory_shape_get, NULL, memory_shape_doc},
+ {"strides", (getter)memory_strides_get, NULL, memory_strides_doc},
+ {"suboffsets", (getter)memory_suboffsets_get, NULL, memory_suboffsets_doc},
+ {"c_contiguous", (getter)memory_c_contiguous, NULL, memory_c_contiguous_doc},
+ {"f_contiguous", (getter)memory_f_contiguous, NULL, memory_f_contiguous_doc},
+ {"contiguous", (getter)memory_contiguous, NULL, memory_contiguous_doc},
+ {NULL, NULL, NULL, NULL},
+};
+
+
+static PyMethodDef memory_methods[] = {
+ MEMORYVIEW_RELEASE_METHODDEF
+ MEMORYVIEW_TOBYTES_METHODDEF
+ MEMORYVIEW_HEX_METHODDEF
+ MEMORYVIEW_TOLIST_METHODDEF
+ MEMORYVIEW_CAST_METHODDEF
+ MEMORYVIEW_TOREADONLY_METHODDEF
+ MEMORYVIEW__FROM_FLAGS_METHODDEF
+ {"__enter__", memory_enter, METH_NOARGS, NULL},
+ {"__exit__", memory_exit, METH_VARARGS, NULL},
+ {NULL, NULL}
+};
+
+/**************************************************************************/
+/* Memoryview Iterator */
+/**************************************************************************/
+
+PyTypeObject _PyMemoryIter_Type;
+
+typedef struct {
+ PyObject_HEAD
+ Py_ssize_t it_index;
+ PyMemoryViewObject *it_seq; // Set to NULL when iterator is exhausted
+ Py_ssize_t it_length;
+ const char *it_fmt;
+} memoryiterobject;
+
+static void
+memoryiter_dealloc(memoryiterobject *it)
+{
+ _PyObject_GC_UNTRACK(it);
+ Py_XDECREF(it->it_seq);
+ PyObject_GC_Del(it);
+}
+
+static int
+memoryiter_traverse(memoryiterobject *it, visitproc visit, void *arg)
+{
+ Py_VISIT(it->it_seq);
+ return 0;
+}
+
+static PyObject *
+memoryiter_next(memoryiterobject *it)
+{
+ PyMemoryViewObject *seq;
+ seq = it->it_seq;
+ if (seq == NULL) {
+ return NULL;
+ }
+
+ if (it->it_index < it->it_length) {
+ CHECK_RELEASED(seq);
+ Py_buffer *view = &(seq->view);
+ char *ptr = (char *)seq->view.buf;
+
+ ptr += view->strides[0] * it->it_index++;
+ ptr = ADJUST_PTR(ptr, view->suboffsets, 0);
+ if (ptr == NULL) {
+ return NULL;
+ }
+ return unpack_single(seq, ptr, it->it_fmt);
+ }
+
+ it->it_seq = NULL;
+ Py_DECREF(seq);
+ return NULL;
+}
+
+static PyObject *
+memory_iter(PyObject *seq)
+{
+ if (!PyMemoryView_Check(seq)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+ PyMemoryViewObject *obj = (PyMemoryViewObject *)seq;
+ int ndims = obj->view.ndim;
+ if (ndims == 0) {
+ PyErr_SetString(PyExc_TypeError, "invalid indexing of 0-dim memory");
+ return NULL;
+ }
+ if (ndims != 1) {
+ PyErr_SetString(PyExc_NotImplementedError,
+ "multi-dimensional sub-views are not implemented");
+ return NULL;
+ }
+
+ const char *fmt = adjust_fmt(&obj->view);
+ if (fmt == NULL) {
+ return NULL;
+ }
+
+ memoryiterobject *it;
+ it = PyObject_GC_New(memoryiterobject, &_PyMemoryIter_Type);
+ if (it == NULL) {
+ return NULL;
+ }
+ it->it_fmt = fmt;
+ it->it_length = memory_length(obj);
+ it->it_index = 0;
+ it->it_seq = (PyMemoryViewObject*)Py_NewRef(obj);
+ _PyObject_GC_TRACK(it);
+ return (PyObject *)it;
+}
+
+PyTypeObject _PyMemoryIter_Type = {
+ PyVarObject_HEAD_INIT(&PyType_Type, 0)
+ .tp_name = "memory_iterator",
+ .tp_basicsize = sizeof(memoryiterobject),
+ // methods
+ .tp_dealloc = (destructor)memoryiter_dealloc,
+ .tp_getattro = PyObject_GenericGetAttr,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)memoryiter_traverse,
+ .tp_iter = PyObject_SelfIter,
+ .tp_iternext = (iternextfunc)memoryiter_next,
+};
+
+PyTypeObject PyMemoryView_Type = {
+ PyVarObject_HEAD_INIT(&PyType_Type, 0)
+ "memoryview", /* tp_name */
+ offsetof(PyMemoryViewObject, ob_array), /* tp_basicsize */
+ sizeof(Py_ssize_t), /* tp_itemsize */
+ (destructor)memory_dealloc, /* tp_dealloc */
+ 0, /* tp_vectorcall_offset */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_as_async */
+ (reprfunc)memory_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ &memory_as_sequence, /* tp_as_sequence */
+ &memory_as_mapping, /* tp_as_mapping */
+ (hashfunc)memory_hash, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ &memory_as_buffer, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
+ Py_TPFLAGS_SEQUENCE, /* tp_flags */
+ memoryview__doc__, /* tp_doc */
+ (traverseproc)memory_traverse, /* tp_traverse */
+ (inquiry)memory_clear, /* tp_clear */
+ memory_richcompare, /* tp_richcompare */
+ offsetof(PyMemoryViewObject, weakreflist),/* tp_weaklistoffset */
+ memory_iter, /* tp_iter */
+ 0, /* tp_iternext */
+ memory_methods, /* tp_methods */
+ 0, /* tp_members */
+ memory_getsetlist, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ memoryview, /* tp_new */
+};