1"""
2Tests for array coercion, mainly through testing `np.array` results directly.
3Note that other such tests exist e.g. in `test_api.py` and many corner-cases
4are tested (sometimes indirectly) elsewhere.
5"""
6
7import pytest
8from pytest import param
9
10from itertools import product
11
12import numpy as np
13from numpy.core._rational_tests import rational
14from numpy.core._multiarray_umath import _discover_array_parameters
15
16from numpy.testing import (
17    assert_array_equal, assert_warns, IS_PYPY)
18
19
20def arraylikes():
21    """
22    Generator for functions converting an array into various array-likes.
23    If full is True (default) includes array-likes not capable of handling
24    all dtypes
25    """
26    # base array:
27    def ndarray(a):
28        return a
29
30    yield param(ndarray, id="ndarray")
31
32    # subclass:
33    class MyArr(np.ndarray):
34        pass
35
36    def subclass(a):
37        return a.view(MyArr)
38
39    yield subclass
40
41    class _SequenceLike():
42        # We are giving a warning that array-like's were also expected to be
43        # sequence-like in `np.array([array_like])`, this can be removed
44        # when the deprecation exired (started NumPy 1.20)
45        def __len__(self):
46            raise TypeError
47
48        def __getitem__(self):
49            raise TypeError
50
51    # Array-interface
52    class ArrayDunder(_SequenceLike):
53        def __init__(self, a):
54            self.a = a
55
56        def __array__(self, dtype=None):
57            return self.a
58
59    yield param(ArrayDunder, id="__array__")
60
61    # memory-view
62    yield param(memoryview, id="memoryview")
63
64    # Array-interface
65    class ArrayInterface(_SequenceLike):
66        def __init__(self, a):
67            self.a = a  # need to hold on to keep interface valid
68            self.__array_interface__ = a.__array_interface__
69
70    yield param(ArrayInterface, id="__array_interface__")
71
72    # Array-Struct
73    class ArrayStruct(_SequenceLike):
74        def __init__(self, a):
75            self.a = a  # need to hold on to keep struct valid
76            self.__array_struct__ = a.__array_struct__
77
78    yield param(ArrayStruct, id="__array_struct__")
79
80
81def scalar_instances(times=True, extended_precision=True, user_dtype=True):
82    # Hard-coded list of scalar instances.
83    # Floats:
84    yield param(np.sqrt(np.float16(5)), id="float16")
85    yield param(np.sqrt(np.float32(5)), id="float32")
86    yield param(np.sqrt(np.float64(5)), id="float64")
87    if extended_precision:
88        yield param(np.sqrt(np.longdouble(5)), id="longdouble")
89
90    # Complex:
91    yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
92    yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
93    if extended_precision:
94        yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
95
96    # Bool:
97    # XFAIL: Bool should be added, but has some bad properties when it
98    # comes to strings, see also gh-9875
99    # yield param(np.bool_(0), id="bool")
100
101    # Integers:
102    yield param(np.int8(2), id="int8")
103    yield param(np.int16(2), id="int16")
104    yield param(np.int32(2), id="int32")
105    yield param(np.int64(2), id="int64")
106
107    yield param(np.uint8(2), id="uint8")
108    yield param(np.uint16(2), id="uint16")
109    yield param(np.uint32(2), id="uint32")
110    yield param(np.uint64(2), id="uint64")
111
112    # Rational:
113    if user_dtype:
114        yield param(rational(1, 2), id="rational")
115
116    # Cannot create a structured void scalar directly:
117    structured = np.array([(1, 3)], "i,i")[0]
118    assert isinstance(structured, np.void)
119    assert structured.dtype == np.dtype("i,i")
120    yield param(structured, id="structured")
121
122    if times:
123        # Datetimes and timedelta
124        yield param(np.timedelta64(2), id="timedelta64[generic]")
125        yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
126        yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
127
128        yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
129        yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
130
131    # Strings and unstructured void:
132    yield param(np.bytes_(b"1234"), id="bytes")
133    yield param(np.unicode_("2345"), id="unicode")
134    yield param(np.void(b"4321"), id="unstructured_void")
135
136
137def is_parametric_dtype(dtype):
138    """Returns True if the the dtype is a parametric legacy dtype (itemsize
139    is 0, or a datetime without units)
140    """
141    if dtype.itemsize == 0:
142        return True
143    if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
144        if dtype.name.endswith("64"):
145            # Generic time units
146            return True
147    return False
148
149
150class TestStringDiscovery:
151    @pytest.mark.parametrize("obj",
152            [object(), 1.2, 10**43, None, "string"],
153            ids=["object", "1.2", "10**43", "None", "string"])
154    def test_basic_stringlength(self, obj):
155        length = len(str(obj))
156        expected = np.dtype(f"S{length}")
157
158        assert np.array(obj, dtype="S").dtype == expected
159        assert np.array([obj], dtype="S").dtype == expected
160
161        # A nested array is also discovered correctly
162        arr = np.array(obj, dtype="O")
163        assert np.array(arr, dtype="S").dtype == expected
164        # Check that .astype() behaves identical
165        assert arr.astype("S").dtype == expected
166
167    @pytest.mark.parametrize("obj",
168            [object(), 1.2, 10**43, None, "string"],
169            ids=["object", "1.2", "10**43", "None", "string"])
170    def test_nested_arrays_stringlength(self, obj):
171        length = len(str(obj))
172        expected = np.dtype(f"S{length}")
173        arr = np.array(obj, dtype="O")
174        assert np.array([arr, arr], dtype="S").dtype == expected
175
176    @pytest.mark.parametrize("arraylike", arraylikes())
177    def test_unpack_first_level(self, arraylike):
178        # We unpack exactly one level of array likes
179        obj = np.array([None])
180        obj[0] = np.array(1.2)
181        # the length of the included item, not of the float dtype
182        length = len(str(obj[0]))
183        expected = np.dtype(f"S{length}")
184
185        obj = arraylike(obj)
186        # casting to string usually calls str(obj)
187        arr = np.array([obj], dtype="S")
188        assert arr.shape == (1, 1)
189        assert arr.dtype == expected
190
191
192class TestScalarDiscovery:
193    def test_void_special_case(self):
194        # Void dtypes with structures discover tuples as elements
195        arr = np.array((1, 2, 3), dtype="i,i,i")
196        assert arr.shape == ()
197        arr = np.array([(1, 2, 3)], dtype="i,i,i")
198        assert arr.shape == (1,)
199
200    def test_char_special_case(self):
201        arr = np.array("string", dtype="c")
202        assert arr.shape == (6,)
203        assert arr.dtype.char == "c"
204        arr = np.array(["string"], dtype="c")
205        assert arr.shape == (1, 6)
206        assert arr.dtype.char == "c"
207
208    def test_char_special_case_deep(self):
209        # Check that the character special case errors correctly if the
210        # array is too deep:
211        nested = ["string"]  # 2 dimensions (due to string being sequence)
212        for i in range(np.MAXDIMS - 2):
213            nested = [nested]
214
215        arr = np.array(nested, dtype='c')
216        assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
217        with pytest.raises(ValueError):
218            np.array([nested], dtype="c")
219
220    def test_unknown_object(self):
221        arr = np.array(object())
222        assert arr.shape == ()
223        assert arr.dtype == np.dtype("O")
224
225    @pytest.mark.parametrize("scalar", scalar_instances())
226    def test_scalar(self, scalar):
227        arr = np.array(scalar)
228        assert arr.shape == ()
229        assert arr.dtype == scalar.dtype
230
231        arr = np.array([[scalar, scalar]])
232        assert arr.shape == (1, 2)
233        assert arr.dtype == scalar.dtype
234
235    # Additionally to string this test also runs into a corner case
236    # with datetime promotion (the difference is the promotion order).
237    def test_scalar_promotion(self):
238        for sc1, sc2 in product(scalar_instances(), scalar_instances()):
239            sc1, sc2 = sc1.values[0], sc2.values[0]
240            # test all combinations:
241            try:
242                arr = np.array([sc1, sc2])
243            except (TypeError, ValueError):
244                # The promotion between two times can fail
245                # XFAIL (ValueError): Some object casts are currently undefined
246                continue
247            assert arr.shape == (2,)
248            try:
249                dt1, dt2 = sc1.dtype, sc2.dtype
250                expected_dtype = np.promote_types(dt1, dt2)
251                assert arr.dtype == expected_dtype
252            except TypeError as e:
253                # Will currently always go to object dtype
254                assert arr.dtype == np.dtype("O")
255
256    @pytest.mark.parametrize("scalar", scalar_instances())
257    def test_scalar_coercion(self, scalar):
258        # This tests various scalar coercion paths, mainly for the numerical
259        # types.  It includes some paths not directly related to `np.array`
260        if isinstance(scalar, np.inexact):
261            # Ensure we have a full-precision number if available
262            scalar = type(scalar)((scalar * 2)**0.5)
263
264        if type(scalar) is rational:
265            # Rational generally fails due to a missing cast. In the future
266            # object casts should automatically be defined based on `setitem`.
267            pytest.xfail("Rational to object cast is undefined currently.")
268
269        # Use casting from object:
270        arr = np.array(scalar, dtype=object).astype(scalar.dtype)
271
272        # Test various ways to create an array containing this scalar:
273        arr1 = np.array(scalar).reshape(1)
274        arr2 = np.array([scalar])
275        arr3 = np.empty(1, dtype=scalar.dtype)
276        arr3[0] = scalar
277        arr4 = np.empty(1, dtype=scalar.dtype)
278        arr4[:] = [scalar]
279        # All of these methods should yield the same results
280        assert_array_equal(arr, arr1)
281        assert_array_equal(arr, arr2)
282        assert_array_equal(arr, arr3)
283        assert_array_equal(arr, arr4)
284
285    @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
286    @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
287    @pytest.mark.parametrize("cast_to", scalar_instances())
288    def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
289        """
290        Test that in most cases:
291           * `np.array(scalar, dtype=dtype)`
292           * `np.empty((), dtype=dtype)[()] = scalar`
293           * `np.array(scalar).astype(dtype)`
294        should behave the same.  The only exceptions are paramteric dtypes
295        (mainly datetime/timedelta without unit) and void without fields.
296        """
297        dtype = cast_to.dtype  # use to parametrize only the target dtype
298
299        for scalar in scalar_instances(times=False):
300            scalar = scalar.values[0]
301
302            if dtype.type == np.void:
303               if scalar.dtype.fields is not None and dtype.fields is None:
304                    # Here, coercion to "V6" works, but the cast fails.
305                    # Since the types are identical, SETITEM takes care of
306                    # this, but has different rules than the cast.
307                    with pytest.raises(TypeError):
308                        np.array(scalar).astype(dtype)
309                    np.array(scalar, dtype=dtype)
310                    np.array([scalar], dtype=dtype)
311                    continue
312
313            # The main test, we first try to use casting and if it succeeds
314            # continue below testing that things are the same, otherwise
315            # test that the alternative paths at least also fail.
316            try:
317                cast = np.array(scalar).astype(dtype)
318            except (TypeError, ValueError, RuntimeError):
319                # coercion should also raise (error type may change)
320                with pytest.raises(Exception):
321                    np.array(scalar, dtype=dtype)
322
323                if (isinstance(scalar, rational) and
324                        np.issubdtype(dtype, np.signedinteger)):
325                    return
326
327                with pytest.raises(Exception):
328                    np.array([scalar], dtype=dtype)
329                # assignment should also raise
330                res = np.zeros((), dtype=dtype)
331                with pytest.raises(Exception):
332                    res[()] = scalar
333
334                return
335
336            # Non error path:
337            arr = np.array(scalar, dtype=dtype)
338            assert_array_equal(arr, cast)
339            # assignment behaves the same
340            ass = np.zeros((), dtype=dtype)
341            ass[()] = scalar
342            assert_array_equal(ass, cast)
343
344    @pytest.mark.parametrize("dtype_char", np.typecodes["All"])
345    def test_default_dtype_instance(self, dtype_char):
346        if dtype_char in "SU":
347            dtype = np.dtype(dtype_char + "1")
348        elif dtype_char == "V":
349            # Legacy behaviour was to use V8. The reason was float64 being the
350            # default dtype and that having 8 bytes.
351            dtype = np.dtype("V8")
352        else:
353            dtype = np.dtype(dtype_char)
354
355        discovered_dtype, _ = _discover_array_parameters([], type(dtype))
356
357        assert discovered_dtype == dtype
358        assert discovered_dtype.itemsize == dtype.itemsize
359
360    @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
361    def test_scalar_to_int_coerce_does_not_cast(self, dtype):
362        """
363        Signed integers are currently different in that they do not cast other
364        NumPy scalar, but instead use scalar.__int__(). The harcoded
365        exception to this rule is `np.array(scalar, dtype=integer)`.
366        """
367        dtype = np.dtype(dtype)
368        invalid_int = np.ulonglong(-1)
369
370        float_nan = np.float64(np.nan)
371
372        for scalar in [float_nan, invalid_int]:
373            # This is a special case using casting logic and thus not failing:
374            coerced = np.array(scalar, dtype=dtype)
375            cast = np.array(scalar).astype(dtype)
376            assert_array_equal(coerced, cast)
377
378            # However these fail:
379            with pytest.raises((ValueError, OverflowError)):
380                np.array([scalar], dtype=dtype)
381            with pytest.raises((ValueError, OverflowError)):
382                cast[()] = scalar
383
384
385class TestTimeScalars:
386    @pytest.mark.parametrize("dtype", [np.int64, np.float32])
387    @pytest.mark.parametrize("scalar",
388            [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
389             param(np.timedelta64(123, "s"), id="timedelta64[s]"),
390             param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
391             param(np.datetime64(1, "D"), id="datetime64[D]")],)
392    def test_coercion_basic(self, dtype, scalar):
393        # Note the `[scalar]` is there because np.array(scalar) uses stricter
394        # `scalar.__int__()` rules for backward compatibility right now.
395        arr = np.array(scalar, dtype=dtype)
396        cast = np.array(scalar).astype(dtype)
397        assert_array_equal(arr, cast)
398
399        ass = np.ones((), dtype=dtype)
400        if issubclass(dtype, np.integer):
401            with pytest.raises(TypeError):
402                # raises, as would np.array([scalar], dtype=dtype), this is
403                # conversion from times, but behaviour of integers.
404                ass[()] = scalar
405        else:
406            ass[()] = scalar
407            assert_array_equal(ass, cast)
408
409    @pytest.mark.parametrize("dtype", [np.int64, np.float32])
410    @pytest.mark.parametrize("scalar",
411            [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
412             param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
413    def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
414        # Only "ns" and "generic" timedeltas can be converted to numbers
415        # so these are slightly special.
416        arr = np.array(scalar, dtype=dtype)
417        cast = np.array(scalar).astype(dtype)
418        ass = np.ones((), dtype=dtype)
419        ass[()] = scalar  # raises, as would np.array([scalar], dtype=dtype)
420
421        assert_array_equal(arr, cast)
422        assert_array_equal(cast, cast)
423
424    @pytest.mark.parametrize("dtype", ["S6", "U6"])
425    @pytest.mark.parametrize(["val", "unit"],
426            [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
427    def test_coercion_assignment_datetime(self, val, unit, dtype):
428        # String from datetime64 assignment is currently special cased to
429        # never use casting.  This is because casting will error in this
430        # case, and traditionally in most cases the behaviour is maintained
431        # like this.  (`np.array(scalar, dtype="U6")` would have failed before)
432        # TODO: This discrepency _should_ be resolved, either by relaxing the
433        #       cast, or by deprecating the first part.
434        scalar = np.datetime64(val, unit)
435        dtype = np.dtype(dtype)
436        cut_string = dtype.type(str(scalar)[:6])
437
438        arr = np.array(scalar, dtype=dtype)
439        assert arr[()] == cut_string
440        ass = np.ones((), dtype=dtype)
441        ass[()] = scalar
442        assert ass[()] == cut_string
443
444        with pytest.raises(RuntimeError):
445            # However, unlike the above assignment using `str(scalar)[:6]`
446            # due to being handled by the string DType and not be casting
447            # the explicit cast fails:
448            np.array(scalar).astype(dtype)
449
450
451    @pytest.mark.parametrize(["val", "unit"],
452            [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
453    def test_coercion_assignment_timedelta(self, val, unit):
454        scalar = np.timedelta64(val, unit)
455
456        # Unlike datetime64, timedelta allows the unsafe cast:
457        np.array(scalar, dtype="S6")
458        cast = np.array(scalar).astype("S6")
459        ass = np.ones((), dtype="S6")
460        ass[()] = scalar
461        expected = scalar.astype("S")[:6]
462        assert cast[()] == expected
463        assert ass[()] == expected
464
465class TestNested:
466    def test_nested_simple(self):
467        initial = [1.2]
468        nested = initial
469        for i in range(np.MAXDIMS - 1):
470            nested = [nested]
471
472        arr = np.array(nested, dtype="float64")
473        assert arr.shape == (1,) * np.MAXDIMS
474        with pytest.raises(ValueError):
475            np.array([nested], dtype="float64")
476
477        # We discover object automatically at this time:
478        with assert_warns(np.VisibleDeprecationWarning):
479            arr = np.array([nested])
480        assert arr.dtype == np.dtype("O")
481        assert arr.shape == (1,) * np.MAXDIMS
482        assert arr.item() is initial
483
484    def test_pathological_self_containing(self):
485        # Test that this also works for two nested sequences
486        l = []
487        l.append(l)
488        arr = np.array([l, l, l], dtype=object)
489        assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
490
491        # Also check a ragged case:
492        arr = np.array([l, [None], l], dtype=object)
493        assert arr.shape == (3, 1)
494
495    @pytest.mark.parametrize("arraylike", arraylikes())
496    def test_nested_arraylikes(self, arraylike):
497        # We try storing an array like into an array, but the array-like
498        # will have too many dimensions.  This means the shape discovery
499        # decides that the array-like must be treated as an object (a special
500        # case of ragged discovery).  The result will be an array with one
501        # dimension less than the maximum dimensions, and the array being
502        # assigned to it (which does work for object or if `float(arraylike)`
503        # works).
504        initial = arraylike(np.ones((1, 1)))
505
506        nested = initial
507        for i in range(np.MAXDIMS - 1):
508            nested = [nested]
509
510        with pytest.warns(DeprecationWarning):
511            # It will refuse to assign the array into
512            np.array(nested, dtype="float64")
513
514        # If this is object, we end up assigning a (1, 1) array into (1,)
515        # (due to running out of dimensions), this is currently supported but
516        # a special case which is not ideal.
517        arr = np.array(nested, dtype=object)
518        assert arr.shape == (1,) * np.MAXDIMS
519        assert arr.item() == np.array(initial).item()
520
521    @pytest.mark.parametrize("arraylike", arraylikes())
522    def test_uneven_depth_ragged(self, arraylike):
523        arr = np.arange(4).reshape((2, 2))
524        arr = arraylike(arr)
525
526        # Array is ragged in the second dimension already:
527        out = np.array([arr, [arr]], dtype=object)
528        assert out.shape == (2,)
529        assert out[0] is arr
530        assert type(out[1]) is list
531
532        # Array is ragged in the third dimension:
533        with pytest.raises(ValueError):
534            # This is a broadcast error during assignment, because
535            # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
536            np.array([arr, [arr, arr]], dtype=object)
537
538    def test_empty_sequence(self):
539        arr = np.array([[], [1], [[1]]], dtype=object)
540        assert arr.shape == (3,)
541
542        # The empty sequence stops further dimension discovery, so the
543        # result shape will be (0,) which leads to an error during:
544        with pytest.raises(ValueError):
545            np.array([[], np.empty((0, 1))], dtype=object)
546
547    def test_array_of_different_depths(self):
548        # When multiple arrays (or array-likes) are included in a
549        # sequences and have different depth, we currently discover
550        # as many dimensions as they share. (see also gh-17224)
551        arr = np.zeros((3, 2))
552        mismatch_first_dim = np.zeros((1, 2))
553        mismatch_second_dim = np.zeros((3, 3))
554
555        dtype, shape = _discover_array_parameters(
556            [arr, mismatch_second_dim], dtype=np.dtype("O"))
557        assert shape == (2, 3)
558
559        dtype, shape = _discover_array_parameters(
560            [arr, mismatch_first_dim], dtype=np.dtype("O"))
561        assert shape == (2,)
562        # The second case is currently supported because the arrays
563        # can be stored as objects:
564        res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
565        assert res[0] is arr
566        assert res[1] is mismatch_first_dim
567
568
569class TestBadSequences:
570    # These are tests for bad objects passed into `np.array`, in general
571    # these have undefined behaviour.  In the old code they partially worked
572    # when now they will fail.  We could (and maybe should) create a copy
573    # of all sequences to be safe against bad-actors.
574
575    def test_growing_list(self):
576        # List to coerce, `mylist` will append to it during coercion
577        obj = []
578        class mylist(list):
579            def __len__(self):
580                obj.append([1, 2])
581                return super().__len__()
582
583        obj.append(mylist([1, 2]))
584
585        with pytest.raises(RuntimeError):
586            np.array(obj)
587
588    # Note: We do not test a shrinking list.  These do very evil things
589    #       and the only way to fix them would be to copy all sequences.
590    #       (which may be a real option in the future).
591
592    def test_mutated_list(self):
593        # List to coerce, `mylist` will mutate the first element
594        obj = []
595        class mylist(list):
596            def __len__(self):
597                obj[0] = [2, 3]  # replace with a different list.
598                return super().__len__()
599
600        obj.append([2, 3])
601        obj.append(mylist([1, 2]))
602        with pytest.raises(RuntimeError):
603            np.array(obj)
604
605    def test_replace_0d_array(self):
606        # List to coerce, `mylist` will mutate the first element
607        obj = []
608        class baditem:
609            def __len__(self):
610                obj[0][0] = 2  # replace with a different list.
611                raise ValueError("not actually a sequence!")
612
613            def __getitem__(self):
614                pass
615
616        # Runs into a corner case in the new code, the `array(2)` is cached
617        # so replacing it invalidates the cache.
618        obj.append([np.array(2), baditem()])
619        with pytest.raises(RuntimeError):
620            np.array(obj)
621
622
623class TestArrayLikes:
624    @pytest.mark.parametrize("arraylike", arraylikes())
625    def test_0d_object_special_case(self, arraylike):
626        arr = np.array(0.)
627        obj = arraylike(arr)
628        # A single array-like is always converted:
629        res = np.array(obj, dtype=object)
630        assert_array_equal(arr, res)
631
632        # But a single 0-D nested array-like never:
633        res = np.array([obj], dtype=object)
634        assert res[0] is obj
635
636    def test_0d_generic_special_case(self):
637        class ArraySubclass(np.ndarray):
638            def __float__(self):
639                raise TypeError("e.g. quantities raise on this")
640
641        arr = np.array(0.)
642        obj = arr.view(ArraySubclass)
643        res = np.array(obj)
644        # The subclass is simply cast:
645        assert_array_equal(arr, res)
646
647        # If the 0-D array-like is included, __float__ is currently
648        # guaranteed to be used.  We may want to change that, quantities
649        # and masked arrays half make use of this.
650        with pytest.raises(TypeError):
651            np.array([obj])
652
653        # The same holds for memoryview:
654        obj = memoryview(arr)
655        res = np.array(obj)
656        assert_array_equal(arr, res)
657        with pytest.raises(ValueError):
658            # The error type does not matter much here.
659            np.array([obj])
660
661    def test_arraylike_classes(self):
662        # The classes of array-likes should generally be acceptable to be
663        # stored inside a numpy (object) array.  This tests all of the
664        # special attributes (since all are checked during coercion).
665        arr = np.array(np.int64)
666        assert arr[()] is np.int64
667        arr = np.array([np.int64])
668        assert arr[0] is np.int64
669
670        # This also works for properties/unbound methods:
671        class ArrayLike:
672            @property
673            def __array_interface__(self):
674                pass
675
676            @property
677            def __array_struct__(self):
678                pass
679
680            def __array__(self):
681                pass
682
683        arr = np.array(ArrayLike)
684        assert arr[()] is ArrayLike
685        arr = np.array([ArrayLike])
686        assert arr[0] is ArrayLike
687
688    @pytest.mark.skipif(
689            np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
690    def test_too_large_array_error_paths(self):
691        """Test the error paths, including for memory leaks"""
692        arr = np.array(0, dtype="uint8")
693        # Guarantees that a contiguous copy won't work:
694        arr = np.broadcast_to(arr, 2**62)
695
696        for i in range(5):
697            # repeat, to ensure caching cannot have an effect:
698            with pytest.raises(MemoryError):
699                np.array(arr)
700            with pytest.raises(MemoryError):
701                np.array([arr])
702
703    @pytest.mark.parametrize("attribute",
704        ["__array_interface__", "__array__", "__array_struct__"])
705    @pytest.mark.parametrize("error", [RecursionError, MemoryError])
706    def test_bad_array_like_attributes(self, attribute, error):
707        # RecursionError and MemoryError are considered fatal. All errors
708        # (except AttributeError) should probably be raised in the future,
709        # but shapely made use of it, so it will require a deprecation.
710
711        class BadInterface:
712            def __getattr__(self, attr):
713                if attr == attribute:
714                    raise error
715                super().__getattr__(attr)
716
717        with pytest.raises(error):
718            np.array(BadInterface())
719
720    @pytest.mark.parametrize("error", [RecursionError, MemoryError])
721    def test_bad_array_like_bad_length(self, error):
722        # RecursionError and MemoryError are considered "critical" in
723        # sequences. We could expand this more generally though. (NumPy 1.20)
724        class BadSequence:
725            def __len__(self):
726                raise error
727            def __getitem__(self):
728                # must have getitem to be a Sequence
729                return 1
730
731        with pytest.raises(error):
732            np.array(BadSequence())
733
734