1# Licensed to the Apache Software Foundation (ASF) under one 2# or more contributor license agreements. See the NOTICE file 3# distributed with this work for additional information 4# regarding copyright ownership. The ASF licenses this file 5# to you under the Apache License, Version 2.0 (the 6# "License"); you may not use this file except in compliance 7# with the License. You may obtain a copy of the License at 8# 9# http://www.apache.org/licenses/LICENSE-2.0 10# 11# Unless required by applicable law or agreed to in writing, 12# software distributed under the License is distributed on an 13# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14# KIND, either express or implied. See the License for the 15# specific language governing permissions and limitations 16# under the License. 17 18# coding: utf-8 19# pylint: disable=too-many-lines, protected-access 20# pylint: disable=import-error, no-name-in-module, undefined-variable 21 22"""NDArray API of MXNet.""" 23 24 25try: 26 from __builtin__ import slice as py_slice 27except ImportError: 28 from builtins import slice as py_slice 29 30from array import array as native_array 31import ctypes 32import warnings 33import operator 34from functools import reduce # pylint: disable=redefined-builtin 35import numpy as np 36from ..base import _LIB, numeric_types, integer_types 37from ..base import c_str, c_array, c_array_buf, c_handle_array, mx_real_t 38from ..base import mx_uint, NDArrayHandle, check_call, DLPackHandle, mx_int, mx_int64 39from ..base import ctypes2buffer 40from ..runtime import Features 41from ..context import Context, current_context 42from ..util import is_np_array 43from . import _internal 44from . import op 45from ._internal import NDArrayBase 46 47__all__ = ["NDArray", "concatenate", "_DTYPE_NP_TO_MX", "_DTYPE_MX_TO_NP", "_GRAD_REQ_MAP", 48 "ones", "add", "arange", "linspace", "eye", "divide", "equal", "full", "greater", 49 "greater_equal", "imdecode", "lesser", "lesser_equal", "logical_and", "logical_or", 50 "logical_xor", "maximum", "minimum", "moveaxis", "modulo", "multiply", "not_equal", 51 "onehot_encode", "power", "subtract", "true_divide", "waitall", "_new_empty_handle", 52 "histogram", "split_v2", "to_dlpack_for_read", "to_dlpack_for_write", "from_dlpack", 53 "from_numpy", "zeros", "indexing_key_expand_implicit_axes", "get_indexing_dispatch_code", 54 "get_oshape_of_gather_nd_op"] 55 56_STORAGE_TYPE_UNDEFINED = -1 57_STORAGE_TYPE_DEFAULT = 0 58_STORAGE_TYPE_ROW_SPARSE = 1 59_STORAGE_TYPE_CSR = 2 60_SIGNED_INT32_UPPER_LIMIT = (2**31 - 1) 61 62# pylint: disable= no-member 63_DTYPE_NP_TO_MX = { 64 None: -1, 65 np.float32: 0, 66 np.float64: 1, 67 np.float16: 2, 68 np.uint8: 3, 69 np.int32: 4, 70 np.int8: 5, 71 np.int64: 6, 72 np.bool_: 7, 73 np.dtype([('bfloat16', np.uint16)]): 12, 74} 75 76_DTYPE_MX_TO_NP = { 77 -1: None, 78 0: np.float32, 79 1: np.float64, 80 2: np.float16, 81 3: np.uint8, 82 4: np.int32, 83 5: np.int8, 84 6: np.int64, 85 7: np.bool_, 86 12: np.dtype([('bfloat16', np.uint16)]), 87} 88 89_STORAGE_TYPE_STR_TO_ID = { 90 'undefined': _STORAGE_TYPE_UNDEFINED, 91 'default': _STORAGE_TYPE_DEFAULT, 92 'row_sparse': _STORAGE_TYPE_ROW_SPARSE, 93 'csr': _STORAGE_TYPE_CSR, 94} 95 96_STORAGE_TYPE_ID_TO_STR = { 97 _STORAGE_TYPE_UNDEFINED: 'undefined', 98 _STORAGE_TYPE_DEFAULT: 'default', 99 _STORAGE_TYPE_ROW_SPARSE: 'row_sparse', 100 _STORAGE_TYPE_CSR: 'csr', 101} 102 103_GRAD_REQ_MAP = { 104 'null': 0, 105 'write': 1, 106 'add': 3 107} 108# pylint: enable= no-member 109 110# Return code for dispatching indexing function call 111_NDARRAY_UNSUPPORTED_INDEXING = -1 112_NDARRAY_BASIC_INDEXING = 0 113_NDARRAY_ADVANCED_INDEXING = 1 114_NDARRAY_EMPTY_TUPLE_INDEXING = 2 115 116# Return code for 0-d boolean array handler 117_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1 118_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0 119_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1 120 121# Caching whether MXNet was built with INT64 support or not 122_INT64_TENSOR_SIZE_ENABLED = None 123 124def _int64_enabled(): 125 global _INT64_TENSOR_SIZE_ENABLED 126 if _INT64_TENSOR_SIZE_ENABLED is None: 127 _INT64_TENSOR_SIZE_ENABLED = Features().is_enabled('INT64_TENSOR_SIZE') 128 return _INT64_TENSOR_SIZE_ENABLED 129 130def _new_empty_handle(): 131 """Returns a new empty handle. 132 133 Empty handle can be used to hold a result. 134 135 Returns 136 ------- 137 handle 138 A new empty `NDArray` handle. 139 """ 140 hdl = NDArrayHandle() 141 check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl))) 142 return hdl 143 144 145def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): 146 """Return a new handle with specified shape and context. 147 148 Empty handle is only used to hold results. 149 150 Returns 151 ------- 152 handle 153 A new empty `NDArray` handle. 154 """ 155 hdl = NDArrayHandle() 156 if _int64_enabled(): 157 check_call(_LIB.MXNDArrayCreateEx64( 158 c_array_buf(mx_int64, native_array('q', shape)), 159 ctypes.c_int(len(shape)), 160 ctypes.c_int(ctx.device_typeid), 161 ctypes.c_int(ctx.device_id), 162 ctypes.c_int(int(delay_alloc)), 163 ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])), 164 ctypes.byref(hdl))) 165 else: 166 # When shape is larger than unit32 then there is an overflow error at python end itself. 167 # It needs to be caught here since the call doesn't even reach backend. 168 size = 1 169 for idx in shape: 170 size = size * idx 171 if size > _SIGNED_INT32_UPPER_LIMIT: 172 raise Exception("[_new_alloc_handle] Size of tensor you are trying to allocate is " + 173 "larger than 2^31 elements. Please build with flag " + 174 "USE_INT64_TENSOR_SIZE=1") 175 if np.dtype(dtype) == np.dtype([('bfloat16', np.uint16)]): 176 dtype_type = np.dtype(dtype) 177 else: 178 dtype_type = np.dtype(dtype).type 179 check_call(_LIB.MXNDArrayCreateEx( 180 c_array_buf(mx_uint, native_array('I', shape)), 181 mx_uint(len(shape)), 182 ctypes.c_int(ctx.device_typeid), 183 ctypes.c_int(ctx.device_id), 184 ctypes.c_int(int(delay_alloc)), 185 ctypes.c_int(int(_DTYPE_NP_TO_MX[dtype_type])), 186 ctypes.byref(hdl))) 187 return hdl 188 189 190def _new_from_shared_mem(shared_pid, shared_id, shape, dtype): 191 hdl = NDArrayHandle() 192 check_call(_LIB.MXNDArrayCreateFromSharedMemEx( 193 ctypes.c_int(shared_pid), 194 ctypes.c_int(shared_id), 195 c_array(mx_int, shape), 196 mx_int(len(shape)), 197 ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])), 198 ctypes.byref(hdl))) 199 return hdl 200 201 202def waitall(): 203 """Wait for all async operations to finish in MXNet. 204 205 This function is used for benchmarking only. 206 207 .. note:: 208 209 If your mxnet code throws an exception, then waitall can cause performance impact. 210 """ 211 check_call(_LIB.MXNDArrayWaitAll()) 212 213 214def _storage_type(handle): 215 storage_type = ctypes.c_int(0) 216 check_call(_LIB.MXNDArrayGetStorageType(handle, ctypes.byref(storage_type))) 217 return storage_type.value 218 219 220class NDArray(NDArrayBase): 221 """An array object representing a multidimensional, homogeneous array of 222fixed-size items. 223 224 """ 225 __slots__ = [] 226 # make numpy functions return NDArray instead of numpy object array 227 __array_priority__ = 1000.0 228 # Extension type code for TVM function. 229 # See C++ side of definition(kTVMNDArrayTypeCode) at include/mxmet/tensor_blob.h 230 _tvm_tcode = 19 231 # pylint: disable= no-member, undefined-variable 232 233 def as_np_ndarray(self): 234 """Convert mxnet.ndarray.NDArray to mxnet.numpy.ndarray.""" 235 storage_type = self.stype 236 if storage_type != 'default': 237 raise ValueError('cannot convert ndarray of stype {} to numpy ndarray' 238 .format(str(type(storage_type)))) 239 from ..numpy import ndarray 240 hdl = NDArrayHandle() 241 check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl))) 242 return ndarray(handle=hdl, writable=self.writable) 243 244 def as_nd_ndarray(self): 245 """A convenience function for creating a classic ndarray from the current 246 ndarray with zero copy. For this class, it just returns itself since it is 247 already a classic ndarray.""" 248 return self 249 250 @property 251 def _tvm_handle(self): 252 return self.handle.value 253 254 def __repr__(self): 255 """Returns a string representation of the array.""" 256 shape_info = 'x'.join(['%d' % x for x in self.shape]) 257 return '\n%s\n<%s %s @%s>' % (str(self.asnumpy()), 258 self.__class__.__name__, 259 shape_info, self.ctx) 260 261 def __reduce__(self): 262 return NDArray, (None,), self.__getstate__() 263 264 def _to_shared_mem(self): 265 shared_pid = ctypes.c_int() 266 shared_id = ctypes.c_int() 267 check_call(_LIB.MXNDArrayGetSharedMemHandle( 268 self.handle, ctypes.byref(shared_pid), ctypes.byref(shared_id))) 269 return shared_pid.value, shared_id.value, self.shape, self.dtype 270 271 def __abs__(self): 272 """x.__abs__() <=> abs(x) <=> x.abs() <=> mx.nd.abs(x, y)""" 273 return self.abs() 274 275 def __add__(self, other): 276 """x.__add__(y) <=> x+y <=> mx.nd.add(x, y) """ 277 return add(self, other) 278 279 def __iadd__(self, other): 280 """x.__iadd__(y) <=> x+=y """ 281 if not self.writable: 282 raise ValueError('trying to add to a readonly NDArray') 283 if isinstance(other, NDArray): 284 return op.broadcast_add(self, other, out=self) 285 elif isinstance(other, numeric_types): 286 return _internal._plus_scalar(self, float(other), out=self) 287 else: 288 raise TypeError('type %s not supported' % str(type(other))) 289 290 def __radd__(self, other): 291 return self.__add__(other) 292 293 def __sub__(self, other): 294 """x.__sub__(y) <=> x-y <=> mx.nd.subtract(x, y) """ 295 return subtract(self, other) 296 297 def __isub__(self, other): 298 """x.__isub__(y) <=> x-=y """ 299 if not self.writable: 300 raise ValueError('trying to subtract from a readonly NDArray') 301 if isinstance(other, NDArray): 302 return op.broadcast_sub(self, other, out=self) 303 elif isinstance(other, numeric_types): 304 return _internal._minus_scalar(self, float(other), out=self) 305 else: 306 raise TypeError('type %s not supported' % str(type(other))) 307 308 def __rsub__(self, other): 309 """x.__rsub__(y) <=> y-x <=> mx.nd.subtract(y, x) """ 310 return subtract(other, self) 311 312 def __mul__(self, other): 313 """x.__mul__(y) <=> x*y <=> mx.nd.multiply(x, y) """ 314 return multiply(self, other) 315 316 def __neg__(self): 317 """x.__neg__(y) <=> -x """ 318 return _internal._mul_scalar(self, -1.0) 319 320 def __imul__(self, other): 321 """x.__imul__(y) <=> x*=y """ 322 if not self.writable: 323 raise ValueError('trying to multiply to a readonly NDArray') 324 if isinstance(other, NDArray): 325 return op.broadcast_mul(self, other, out=self) 326 elif isinstance(other, numeric_types): 327 return _internal._mul_scalar(self, float(other), out=self) 328 else: 329 raise TypeError('type %s not supported' % str(type(other))) 330 331 def __rmul__(self, other): 332 return self.__mul__(other) 333 334 def __div__(self, other): 335 """x.__div__(y) <=> x/y <=> mx.nd.divide(x, y) """ 336 return divide(self, other) 337 338 def __rdiv__(self, other): 339 """x.__rdiv__(y) <=> y/x <=> mx.nd.divide(y, x) """ 340 return divide(other, self) 341 342 def __idiv__(self, other): 343 """x.__rdiv__(y) <=> x/=y """ 344 if not self.writable: 345 raise ValueError('trying to divide from a readonly NDArray') 346 if isinstance(other, NDArray): 347 return op.broadcast_div(self, other, out=self) 348 elif isinstance(other, numeric_types): 349 return _internal._div_scalar(self, float(other), out=self) 350 else: 351 raise TypeError('type %s not supported' % str(type(other))) 352 353 def __truediv__(self, other): 354 return divide(self, other) 355 356 def __rtruediv__(self, other): 357 return divide(other, self) 358 359 def __itruediv__(self, other): 360 return self.__idiv__(other) 361 362 def __mod__(self, other): 363 """x.__mod__(y) <=> x%y <=> mx.nd.modulo(x, y) """ 364 return modulo(self, other) 365 366 def __rmod__(self, other): 367 """x.__rmod__(y) <=> y%x <=> mx.nd.modulo(y, x) """ 368 return modulo(other, self) 369 370 def __imod__(self, other): 371 """x.__rmod__(y) <=> x%=y """ 372 if not self.writable: 373 raise ValueError('trying to take modulo from a readonly NDArray') 374 if isinstance(other, NDArray): 375 return op.broadcast_mod(self, other, out=self) 376 elif isinstance(other, numeric_types): 377 return _internal._mod_scalar(self, float(other), out=self) 378 else: 379 raise TypeError('type %s not supported' % str(type(other))) 380 381 def __pow__(self, other): 382 """x.__pow__(y) <=> x**y <=> mx.nd.power(x,y) """ 383 return power(self, other) 384 385 def __rpow__(self, other): 386 """x.__pow__(y) <=> y**x <=> mx.nd.power(y,x) """ 387 return power(other, self) 388 389 def __eq__(self, other): 390 """x.__eq__(y) <=> x==y <=> mx.nd.equal(x, y) """ 391 return equal(self, other) 392 393 def __hash__(self): 394 """Default hash function.""" 395 return id(self)//16 396 397 def __ne__(self, other): 398 """x.__ne__(y) <=> x!=y <=> mx.nd.not_equal(x, y) """ 399 return not_equal(self, other) 400 401 def __gt__(self, other): 402 """x.__gt__(y) <=> x>y <=> mx.nd.greater(x, y) """ 403 return greater(self, other) 404 405 def __ge__(self, other): 406 """x.__ge__(y) <=> x>=y <=> mx.nd.greater_equal(x, y) """ 407 return greater_equal(self, other) 408 409 def __lt__(self, other): 410 """x.__lt__(y) <=> x<y <=> mx.nd.lesser(x, y) """ 411 return lesser(self, other) 412 413 def __le__(self, other): 414 """x.__le__(y) <=> x<=y <=> mx.nd.less_equal(x, y) """ 415 return lesser_equal(self, other) 416 417 def __bool__(self): 418 num_elements = reduce(operator.mul, self.shape, 1) 419 if num_elements == 0: 420 return False 421 elif num_elements == 1: 422 return bool(self.asscalar()) 423 else: 424 raise ValueError("The truth value of an NDArray with multiple elements " \ 425 "is ambiguous.") 426 427 __nonzero__ = __bool__ 428 429 def __len__(self): 430 """Number of element along the first axis.""" 431 return self.shape[0] 432 433 def __getstate__(self): 434 handle = self.handle 435 this = {'handle' : None} 436 if handle is not None: 437 length = ctypes.c_size_t() 438 cptr = ctypes.POINTER(ctypes.c_char)() 439 check_call(_LIB.MXNDArraySaveRawBytes(self.handle, 440 ctypes.byref(length), 441 ctypes.byref(cptr))) 442 this['handle'] = ctypes2buffer(cptr, length.value) 443 return this 444 445 def __setstate__(self, state): 446 # pylint: disable=assigning-non-slot 447 handle = state['handle'] 448 if handle is not None: 449 buf = handle 450 handle = NDArrayHandle() 451 ptr = (ctypes.c_char * len(buf)).from_buffer(buf) 452 length = ctypes.c_size_t(len(buf)) 453 check_call(_LIB.MXNDArrayLoadFromRawBytes(ptr, length, ctypes.byref(handle))) 454 self.handle = handle 455 else: 456 self.handle = None 457 458 def __setitem__(self, key, value): 459 """x.__setitem__(i, y) <=> x[i]=y 460 461 Sets ``self[key]`` to ``value``. 462 463 This functions supports advanced indexing as defined in `the NumPy 464 advanced indexing documentation 465 <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_, 466 with the restriction that boolean array indexing is not supported. 467 468 Parameters 469 ---------- 470 key : int, mxnet.ndarray.slice, list, np.ndarray, NDArray, or tuple of all previous types 471 The indexing key. 472 value : scalar or array-like object that can be broadcast to the shape of self[key] 473 The value to set. 474 475 Examples 476 -------- 477 >>> x = mx.nd.zeros((2, 3)) 478 >>> x[:] = 1 479 >>> x.asnumpy() 480 array([[ 1., 1., 1.], 481 [ 1., 1., 1.]], dtype=float32) 482 >>> x[:, 1:2] = 2 483 >>> x.asnumpy() 484 array([[ 1., 2., 1.], 485 [ 1., 2., 1.]], dtype=float32) 486 >>> x[1:2, 1:] = 3 487 >>> x.asnumpy() 488 array([[ 1., 2., 1.], 489 [ 1., 3., 3.]], dtype=float32) 490 >>> x[1:, 0:2] = mx.nd.zeros((1, 2)) 491 >>> x.asnumpy() 492 array([[ 1., 2., 1.], 493 [ 0., 0., 3.]], dtype=float32) 494 >>> x[1, 2] = 4 495 >>> x.asnumpy() 496 array([[ 1., 2., 1.], 497 [ 0., 0., 4.]], dtype=float32) 498 >>> x[[0], [1, 2]] = 5 499 >>> x.asnumpy() 500 array([[ 1., 5., 5.], 501 [ 0., 0., 4.]], dtype=float32) 502 >>> x[::-1, 0:2:2] = [6] 503 >>> x.asnumpy() 504 array([[ 6., 5., 5.], 505 [ 6., 0., 4.]], dtype=float32) 506 """ 507 if self.ndim == 0: 508 if not isinstance(key, (tuple, py_slice)): 509 raise IndexError('scalar tensor can only accept `()` and `:` as index') 510 if isinstance(key, tuple) and len(key) != 0: 511 raise IndexError('scalar tensor can only accept `()` and `:` as index') 512 if isinstance(value, numeric_types): 513 self._full(value) 514 elif isinstance(value, NDArray) and value.size == 1: 515 if value.shape != self.shape: 516 value = value.reshape(self.shape) 517 value.copyto(self) 518 elif isinstance(value, (np.ndarray, np.generic)) and value.size == 1: 519 if isinstance(value, np.generic) or value.shape != self.shape: 520 value = value.reshape(self.shape) 521 self._sync_copyfrom(value) 522 else: 523 raise ValueError('setting an array element with a sequence.') 524 525 elif self.size == 0: 526 return 527 528 else: 529 key, _ = indexing_key_expand_implicit_axes(key, self.shape) 530 slc_key = tuple(idx for idx in key if idx is not None) 531 532 if len(slc_key) < self.ndim: 533 raise RuntimeError( 534 'too few indices after normalization: expected `ndim` ({}) ' 535 'but got {}. This is a bug, please report it!' 536 ''.format(self.ndim, len(slc_key)) 537 ) 538 if len(slc_key) > self.ndim: 539 raise IndexError( 540 'too many indices ({}) for array with {} dimensions' 541 ''.format(len(slc_key), self.ndim) 542 ) 543 544 indexing_dispatch_code = get_indexing_dispatch_code(slc_key) 545 if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING: 546 self._set_nd_basic_indexing(key, value) 547 elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING: 548 self._set_nd_advanced_indexing(key, value) 549 else: 550 raise ValueError( 551 'Indexing NDArray with index {} of type {} is not supported' 552 ''.format(key, type(key)) 553 ) 554 555 def __getitem__(self, key): # pylint: disable=too-many-return-statements 556 """x.__getitem__(i) <=> x[i] 557 558 Returns a sliced view of this array if the elements fetched are contiguous in memory; 559 otherwise, returns a newly created NDArray. 560 This functions supports advanced indexing defined in the following reference with 561 some restrictions. 562 563 For basic indexing, i.e., if ``key`` consists only of integers, 564 ``slice``, ``Ellipsis`` (``...``) and ``None``, a mutable view is 565 returned that shares memory with this array if the accessed portion is 566 contiguous in memory. 567 Otherwise, a newly created ``NDArray`` is returned. 568 569 This functions supports advanced indexing as defined in `the NumPy 570 advanced indexing documentation 571 <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_, 572 with the restriction that boolean array indexing is not supported. 573 574 Parameters 575 ---------- 576 key : int, mxnet.ndarray.slice, list, np.ndarray, NDArray, or tuple of all previous types 577 Indexing key. 578 579 Examples 580 -------- 581 The default is to give explicit indices for all axes: 582 583 >>> x = mx.nd.arange(0, 6).reshape((2, 3)) 584 >>> x.asnumpy() 585 array([[ 0., 1., 2.], 586 [ 3., 4., 5.]], dtype=float32) 587 >>> x[0, :].asnumpy() 588 array([0., 1., 2.], dtype=float32) 589 >>> x[0, :2].asnumpy() 590 array([0., 1.], dtype=float32) 591 >>> x[:, :-1].asnumpy() 592 array([[0., 1.], 593 [3., 4.]], dtype=float32) 594 595 If fewer indices are given, they are automatically supplemented by an 596 appropriate number of ``slice(None)`` ("``:``") to the right. For 597 instance, a single integer indexes along the first axis: 598 599 >>> x = mx.nd.arange(0, 6).reshape((2, 3)) 600 >>> x[0].asnumpy() 601 array([0., 1., 2.], dtype=float32) 602 >>> x[1:].asnumpy() 603 array([[3., 4., 5.]], dtype=float32) 604 605 To omit a range of axes that should be kept as-is, an `Ellipsis` 606 ("``...``") can be used: 607 608 >>> x = mx.nd.arange(0, 16).reshape((2, 2, 2, 2)) 609 >>> x[0, ..., 1].asnumpy() 610 array([[1., 3.], 611 [5., 7.]], dtype=float32) 612 >>> x[0, :, :, 1].asnumpy() # equivalent 613 array([[1., 3.], 614 [5., 7.]], dtype=float32) 615 616 New axes of length 1 can be created by inserting ``None`` 617 (`numpy.newaxis`) in the index: 618 619 >>> x = mx.nd.arange(0, 6).reshape((2, 3)) 620 >>> x[None, :, :].asnumpy() 621 array([[[0., 1., 2.], 622 [3., 4., 5.]]], dtype=float32) 623 >>> x[None, :, :].shape 624 (1, 2, 3) 625 626 If the indexed portion of the array is contiguous in memory, no data 627 is copied. Instead, a shared-memory view of the original array is 628 returned, and changes to that view affect the original array: 629 630 >>> x = mx.nd.arange(0, 8).reshape((2, 2, 2)) 631 >>> y = x[0] # contiguous 632 >>> y.asnumpy() 633 array([[0., 1.], 634 [2., 3.]], dtype=float32) 635 >>> y[:] = -1 636 >>> x.asnumpy() 637 array([[[-1., -1.], 638 [-1., -1.]], 639 <BLANKLINE> 640 [[ 4., 5.], 641 [ 6., 7.]]], dtype=float32) 642 >>> x = mx.nd.arange(0, 8).reshape((2, 2, 2)) 643 >>> y = x[1, :1, :] # contiguous 644 >>> y.asnumpy() 645 array([[4., 5.]], dtype=float32) 646 >>> y[:] = -1 647 >>> x.asnumpy() 648 array([[[ 0., 1.], 649 [ 2., 3.]], 650 <BLANKLINE> 651 [[-1., -1.], 652 [ 6., 7.]]], dtype=float32) 653 >>> x = mx.nd.arange(0, 8).reshape((2, 2, 2)) 654 >>> y = x[:, :, 1] # not contiguous 655 >>> y.asnumpy() 656 array([[1., 3.], 657 [5., 7.]], dtype=float32) 658 >>> y[:] = -1 659 >>> x.asnumpy() 660 array([[[0., 1.], 661 [2., 3.]], 662 <BLANKLINE> 663 [[4., 5.], 664 [6., 7.]]], dtype=float32) 665 666 If the indexing key contains `list`, `numpy.ndarray` or `NDArray` 667 objects, advanced indexing is triggered, which always returns a 668 copy: 669 670 >>> x = mx.nd.arange(0, 8).reshape((2, 2, 2)) 671 >>> x[[0, 1]].asnumpy() 672 array([[[0., 1.], 673 [2., 3.]], 674 <BLANKLINE> 675 [[4., 5.], 676 [6., 7.]]], dtype=float32) 677 >>> x[[0, 1], :].asnumpy() # equivalent 678 array([[[0., 1.], 679 [2., 3.]], 680 <BLANKLINE> 681 [[4., 5.], 682 [6., 7.]]], dtype=float32) 683 >>> y = np.array([0, 1], dtype='int32') 684 >>> x[1:, y].asnumpy() 685 array([[[4., 5.], 686 [6., 7.]]], dtype=float32) 687 >>> y = mx.nd.array([0, 1], dtype='int32') 688 >>> x[1:, y].asnumpy() 689 array([[[4., 5.], 690 [6., 7.]]], dtype=float32) 691 """ 692 ndim = self.ndim 693 shape = self.shape 694 695 if ndim == 0 and (key == () or key == slice(None, None, None)): 696 return self 697 698 # Handle simple cases for higher speed 699 if isinstance(key, tuple) and len(key) == 0: 700 return self 701 if isinstance(key, tuple) and len(key) == ndim\ 702 and all(isinstance(idx, integer_types) for idx in key): 703 out = self 704 for idx in key: 705 out = out[idx] 706 return out 707 if isinstance(key, integer_types): 708 if key > shape[0] - 1: 709 raise IndexError( 710 'index {} is out of bounds for axis 0 with size {}'.format( 711 key, shape[0])) 712 return self._at(key) 713 elif isinstance(key, py_slice): 714 if (key.step is None or key.step == 1): 715 if key.start is not None or key.stop is not None: 716 return self._slice(key.start, key.stop) 717 else: 718 return self 719 elif key.step == 0: 720 raise ValueError("slice step cannot be zero") 721 722 key, _ = indexing_key_expand_implicit_axes(key, self.shape) 723 if len(key) == 0: 724 raise ValueError('indexing key cannot be an empty tuple') 725 726 indexing_dispatch_code = get_indexing_dispatch_code(key) 727 if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING: 728 return self._get_nd_basic_indexing(key) 729 elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING: 730 return self._get_nd_advanced_indexing(key) 731 else: 732 raise RuntimeError 733 734 def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None): 735 """Return a broadcast `NDArray` with same context and dtype as ``self``. 736 For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the 737 value_nd is assigned to not yet expanded space in original array. 738 `value`: numeric types or array like. 739 `bcast_shape`: a shape tuple. 740 `squeeze_axes`: a sequence of axes to squeeze in the value array. 741 """ 742 if isinstance(value, numeric_types): 743 value_nd = full(bcast_shape, value, ctx=self.ctx, dtype=self.dtype) 744 elif type(value) == self.__class__: # pylint: disable=unidiomatic-typecheck 745 value_nd = value.as_in_context(self.ctx) 746 if value_nd.dtype != self.dtype: 747 value_nd = value_nd.astype(self.dtype) 748 else: 749 try: 750 value_nd = array(value, ctx=self.ctx, dtype=self.dtype) 751 except: 752 raise TypeError('{} does not support assignment with non-array-like ' 753 'object {} of type {}'.format(self.__class__, value, type(value))) 754 755 # For setitem, if there is None in indices, we need to squeeze the assigned value_nd 756 # since None is also ignored in slicing the original array. 757 if squeeze_axes and value_nd.ndim > len(bcast_shape): 758 squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)]) 759 value_nd = value_nd.squeeze(axis=tuple(squeeze_axes)) 760 761 # handle the cases like the following 762 # a = nd.zeros((3, 3)), b = nd.ones((1, 1, 1, 1, 3)), a[0] = b 763 # b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed 764 if value_nd.ndim > len(bcast_shape): 765 squeeze_axes = [] 766 for i in range(value_nd.ndim - len(bcast_shape)): 767 if value_nd.shape[i] == 1: 768 squeeze_axes.append(i) 769 else: 770 break 771 if squeeze_axes: 772 value_nd = value_nd.squeeze(squeeze_axes) 773 774 if value_nd.shape != bcast_shape: 775 if value_nd.size == 0: 776 value_nd = value_nd.reshape(bcast_shape) 777 else: 778 value_nd = value_nd.broadcast_to(bcast_shape) 779 return value_nd 780 781 # pylint: disable=invalid-name 782 @staticmethod 783 def _basic_indexing_key_to_begin_end_step(idcs, shape, keep_none=True): 784 """Map a tuple of ``slice`` and ``None`` (ignored) to begin, end, step tuples.""" 785 idcs = [idx for idx in idcs if idx is not None] 786 idcs = [idx if isinstance(idx, py_slice) else _int_to_slice(idx) 787 for idx in idcs] 788 789 if keep_none: 790 sss_list = [(slc.start, slc.stop, slc.step) for slc, n in zip(idcs, shape)] 791 else: 792 sss_list = [slc.indices(n) for slc, n in zip(idcs, shape)] 793 return tuple(zip(*sss_list)) 794 # pylint: enable=invalid-name 795 796 # pylint: disable=invalid-name 797 @staticmethod 798 def _basic_indexing_key_int_to_slice(idcs): 799 """Return the converted indexing tuple and the integer axes.""" 800 int_axes = [] 801 conv_idcs = [] 802 for ax, idx in enumerate(idcs): 803 if isinstance(idx, integer_types): 804 conv_idcs.append(_int_to_slice(idx)) 805 int_axes.append(ax) 806 else: 807 conv_idcs.append(idx) 808 809 return tuple(conv_idcs), tuple(int_axes) 810 # pylint: enable=invalid-name 811 812 @staticmethod 813 def _new_axes_after_basic_indexing(axes, key): 814 """Return indices of ``axes`` after slicing with ``key``. 815 816 This function is used to calculate the positions where new axes should 817 end up after indexing, taking into account the removal of axes by 818 integer indexing. 819 820 The ``key`` sequence should be the exapanded key including slices, integer types 821 and ``None``. 822 """ 823 steps = [0] + [0 if isinstance(idx, integer_types) else 1 for idx in key] 824 cum_steps = np.cumsum(steps) 825 axes_after = tuple(cum_steps[axes]) 826 return axes_after 827 828 @staticmethod 829 def _new_axes_after_advanced_indexing(key, adv_axs, bcast_adv_ndim, adv_are_adjacent): # pylint: disable=invalid-name 830 """ 831 Return indices of ``axes`` after slicing with ``key_nd``. 832 833 This function is used to calculate the positions where new axes should 834 end up after indexing, taking into account the removal of axes by 835 integer indexing. 836 837 The ``key`` sequence should be the exapanded key including slices, array like objects, 838 integer types and ``None``. 839 ``adv_axes`` is the sequence of indices of advanced axes. 840 ``bcast_adv_ndim`` is the number of dimensions of advanced indexing subspace. 841 ``adv_are_adjacent`` is a boolean value. Value being True means all advanced indicies are adjacent. 842 843 Note: integer indices are also considered advanced indices here. 844 """ 845 new_axes = [ax for ax in range(len(key)) if key[ax] is None] 846 adv_axs_set = set(adv_axs) 847 if not adv_are_adjacent: 848 steps = [bcast_adv_ndim] + [0 if ax in adv_axs_set else 1 for ax in range(len(key))] 849 else: 850 steps = [0] + [0 if ax in adv_axs_set else 1 for ax in range(len(key))] 851 cum_steps = np.cumsum(steps) 852 axes_after = tuple(cum_steps[new_axes]) 853 return axes_after 854 855 # pylint: disable=invalid-name 856 @staticmethod 857 def _basic_indexing_slice_is_contiguous(slc_key, shape): 858 """Whether indexing with the given key results in a contiguous array. 859 860 The rule is: From right to left, if in an axis, a slice produces a 861 proper subset, the later slice must have <=1 elements. 862 863 The ``slc_key`` sequence must have the same length as ``shape`` and 864 only contain `slice` objects. 865 """ 866 assert len(slc_key) == len(shape) 867 is_subset = False 868 total_sliced_elements = np.prod([_get_slice_len(slc, n) 869 for slc, n in zip(slc_key, shape)]) 870 if total_sliced_elements in (0, 1): 871 return True 872 for idx, n in zip(reversed(slc_key), reversed(shape)): 873 _, _, step = idx.indices(n) 874 num_elements = _get_slice_len(idx, n) 875 if num_elements == 0: 876 return True 877 elif num_elements > 1 and (step > 1 or step < 0): 878 # We do not support the case of reverse slicing of multiple elements and 879 # forward slicing of #elements > 1 and step > 1 880 return False 881 elif is_subset: 882 if num_elements > 1: 883 return False 884 else: 885 if num_elements < n: 886 is_subset = True 887 return True 888 # pylint: enable=invalid-name 889 890 @staticmethod 891 def _basic_indexing_sliced_shape(slc_key, shape): 892 """Return the shape after slicing with the given key.""" 893 assert len(slc_key) == len(shape) 894 sliced_shape = [] 895 for slc, n in zip(slc_key, shape): 896 num_elements = _get_slice_len(slc, n) 897 sliced_shape.append(num_elements) 898 return tuple(sliced_shape) 899 900 # pylint: disable=invalid-name 901 @staticmethod 902 def _basic_indexing_contiguous_flat_begin_end(slc_key, shape): 903 """Return the flat indices of begin and end for contiguous slicing.""" 904 assert len(slc_key) == len(shape) 905 flat_begin, flat_end = 0, 0 906 for slc, n in zip(slc_key, shape): 907 flat_begin *= n 908 flat_end *= n 909 begin, _, _ = slc.indices(n) 910 num_elements = _get_slice_len(slc, n) 911 if num_elements == 0: 912 return 0, 0 913 else: 914 flat_begin += begin 915 flat_end += begin + num_elements - 1 916 return flat_begin, flat_end + 1 917 # pylint: enable=invalid-name 918 919 @staticmethod 920 def _drop_int_axes(indexed_shape, int_axes): 921 """drop the axis of indexed_shape corresponding to int axes""" 922 bcast_shape = [] 923 for i, size in enumerate(indexed_shape): 924 if i not in int_axes: 925 bcast_shape.append(size) 926 if not bcast_shape: 927 bcast_shape = [1] 928 return tuple(bcast_shape) 929 930 def _set_nd_basic_indexing(self, key, value): 931 """This function indexes ``self`` with a tuple of ``slice`` objects only.""" 932 for idx in key: 933 if idx is not None and not isinstance(idx, (py_slice, integer_types)): 934 raise RuntimeError( 935 '`key` may only contain `slice` or integer objects in the ' 936 'basic implementation, got object of type {}. ' 937 'This is a bug, please report it!' 938 ''.format(type(idx))) 939 key_nd = tuple(idx for idx in key if idx is not None) 940 int_axes = [ 941 ax for ax in range(len(key_nd)) if isinstance(key_nd[ax], integer_types) 942 ] 943 944 # Check bounds for integer axes 945 for ax in int_axes: # pylint: disable=invalid-name 946 if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]: 947 raise IndexError( 948 'index {} is out of bounds for axis {} with size {}' 949 ''.format(key_nd[ax], ax, self.shape[ax])) 950 951 begin, end, step = self._basic_indexing_key_to_begin_end_step( 952 key, self.shape, keep_none=False 953 ) 954 indexed_shape = tuple( 955 _get_dim_size(b, e, s) for b, e, s in zip(begin, end, step) 956 ) 957 can_assign_directly = ( 958 (indexed_shape == self.shape) and all(s > 0 for s in step) 959 ) 960 begin, end, step = self._basic_indexing_key_to_begin_end_step( 961 key, self.shape, keep_none=True 962 ) 963 none_axes = [ax for ax in range(len(key)) if key[ax] is None] 964 new_axes = self._new_axes_after_basic_indexing(none_axes, key) 965 966 if can_assign_directly: 967 # Easy case, overwrite whole array. 968 if type(value) == self.__class__: # pylint: disable=unidiomatic-typecheck 969 if value.handle is not self.handle: 970 # Need to do this before `broadcast_to`. 971 bcast_shape = self._drop_int_axes(indexed_shape, int_axes) 972 value_nd = self._prepare_value_nd(value, bcast_shape=bcast_shape, squeeze_axes=new_axes) 973 value_nd = value_nd.reshape(indexed_shape) 974 value_nd.copyto(self) 975 976 elif isinstance(value, numeric_types): 977 if isinstance(value, bool): 978 self._full(int(value)) 979 else: 980 self._full(value) 981 982 elif isinstance(value, (np.ndarray, np.generic)): 983 tmp_shape = _shape_for_bcast( 984 value.shape, target_ndim=self.ndim, new_axes=int_axes 985 ) 986 value = value.reshape(tmp_shape) 987 if isinstance(value, np.generic) or value.shape != self.shape: 988 value = np.broadcast_to(value, self.shape) 989 self._sync_copyfrom(value) 990 991 else: 992 # Other array-like 993 # drop the axis of indexed_shape corresponding to int axes 994 bcast_shape = self._drop_int_axes(indexed_shape, int_axes) 995 value_nd = self._prepare_value_nd(value, bcast_shape=bcast_shape, squeeze_axes=new_axes) 996 value_nd = value_nd.reshape(indexed_shape) 997 value_nd.copyto(self) 998 999 elif isinstance(value, numeric_types): 1000 self.slice_assign_scalar(float(value), begin, end, step) 1001 1002 else: 1003 # drop the axis of indexed_shape corresponding to int axes 1004 bcast_shape = self._drop_int_axes(indexed_shape, int_axes) 1005 value_nd = self._prepare_value_nd(value, bcast_shape=bcast_shape, squeeze_axes=new_axes) 1006 value_nd = value_nd.reshape(indexed_shape) 1007 self.slice_assign(value_nd, begin, end, step) 1008 1009 def _get_nd_basic_indexing(self, key): 1010 """This function indexes ``self`` with a tuple of `slice` objects only.""" 1011 key_nd = tuple(idx for idx in key if idx is not None) 1012 if len(key_nd) < self.ndim: 1013 raise RuntimeError( 1014 'too few indices after normalization: expected `ndim` ({}) ' 1015 'but got {}. This is a bug, please report it!' 1016 ''.format(self.ndim, len(key_nd)) 1017 ) 1018 if len(key_nd) > self.ndim: 1019 raise IndexError( 1020 'too many indices ({}) for array with {} dimensions' 1021 ''.format(len(key_nd), self.ndim) 1022 ) 1023 slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd) 1024 none_axes = [ax for ax in range(len(key)) if key[ax] is None] 1025 if none_axes: 1026 new_axes = self._new_axes_after_basic_indexing(none_axes, key) 1027 else: 1028 new_axes = [] 1029 1030 # Check bounds for integer axes 1031 for ax in int_axes: # pylint: disable=invalid-name 1032 if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]: 1033 raise IndexError( 1034 'index {} is out of bounds for axis {} with size {}' 1035 ''.format(key_nd[ax], ax, self.shape[ax])) 1036 1037 # Convert to begin, end and step, and return immediately if the slice 1038 # is empty 1039 begin, end, step = self._basic_indexing_key_to_begin_end_step( 1040 slc_key, self.shape, keep_none=False 1041 ) 1042 1043 if self._basic_indexing_slice_is_contiguous(slc_key, self.shape): 1044 # Create a shared-memory view by using low-level flat slicing 1045 flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end( 1046 slc_key, self.shape 1047 ) 1048 handle = NDArrayHandle() 1049 flat_self = self.reshape(-1) 1050 if _int64_enabled(): 1051 check_call( 1052 _LIB.MXNDArraySlice64( 1053 flat_self.handle, 1054 ctypes.c_int64(flat_begin), 1055 ctypes.c_int64(flat_end), 1056 ctypes.byref(handle), 1057 ) 1058 ) 1059 else: 1060 check_call( 1061 _LIB.MXNDArraySlice( 1062 flat_self.handle, 1063 ctypes.c_uint32(flat_begin), 1064 ctypes.c_uint32(flat_end), 1065 ctypes.byref(handle), 1066 ) 1067 ) 1068 sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape) 1069 sliced = NDArray(handle=handle, writable=self.writable).reshape(sliced_shape) 1070 else: 1071 begin, end, step = self._basic_indexing_key_to_begin_end_step( 1072 slc_key, self.shape, keep_none=True 1073 ) 1074 sliced = op.slice(self, begin, end, step) 1075 1076 # Reshape to final shape due to integer and `None` entries in `key`. 1077 final_shape = [sliced.shape[i] for i in range(sliced.ndim) 1078 if i not in int_axes] 1079 for ax in new_axes: # pylint: disable=invalid-name 1080 final_shape.insert(ax, 1) 1081 1082 if len(final_shape) == 0: 1083 # Override for single element indexing 1084 final_shape = [1] 1085 return sliced.reshape(final_shape) 1086 1087 @staticmethod 1088 def _advanced_index_to_array(idx, ax_len, ctx): 1089 """Convert ``idx`` to `NDArray` for advanced indexing. 1090 1091 The ``ax_len`` is used to convert `slice` objects to integer arrays. 1092 """ 1093 if _int64_enabled(): 1094 idx_dtype = 'int64' 1095 else: 1096 idx_dtype = 'int32' 1097 if isinstance(idx, NDArray): 1098 if idx.dtype != idx_dtype: 1099 idx = idx.astype(idx_dtype) 1100 return idx.as_in_context(ctx) 1101 elif isinstance(idx, (np.ndarray, list, tuple)): 1102 return array(idx, ctx, idx_dtype) 1103 elif isinstance(idx, integer_types): 1104 return array([idx], ctx, idx_dtype) 1105 elif isinstance(idx, py_slice): 1106 start, stop, step = idx.indices(ax_len) 1107 return arange(start, stop, step, ctx=ctx, dtype=idx_dtype) 1108 elif isinstance(idx, range): 1109 return arange(idx.start, idx.stop, idx.step, ctx=ctx, dtype=idx_dtype) 1110 else: 1111 raise RuntimeError('illegal index type {}'.format(type(idx))) 1112 1113 # pylint: disable=invalid-name 1114 @staticmethod 1115 def _broadcast_advanced_indices(arrays, block_axes): 1116 """Broadcast arrays according to position in the sequence. 1117 1118 Here, "according to position" means that an array of dimension 1 1119 (which is the case for all except ``block_axes``) will have shape 1120 ``(1, ..., 1, N, 1, ..., 1)``, where ``N`` is the length, and the 1121 position of ``N`` in the shape is the same as the position of the 1122 array in the ``arrays`` sequence, plus extra dimensions of the 1123 advanced block if it is left of the array. 1124 1125 The arrays at ``block_axes`` are the advanced indices. They are assumed to 1126 be ready for mutual broadcasting to produce the advanced indexing block. 1127 It is further assumed that the numbers in ``block_axes`` are consecutive. 1128 1129 The return value is a tuple containing the arrays with broadcast shapes. 1130 """ 1131 block_shape = _broadcast_shapes([arrays[ax] for ax in block_axes]) 1132 ndim_blk = len(block_shape) 1133 ndim_blk_delta = ndim_blk - len(block_axes) 1134 ndim_lead = block_axes[0] 1135 ndim_trail = len(arrays) - (block_axes[-1] + 1) 1136 1137 bcast_shape = ( 1138 tuple(arrays[ax].shape[0] for ax in range(ndim_lead)) + 1139 block_shape + 1140 tuple(arrays[ax].shape[0] for ax in range(block_axes[-1] + 1, len(arrays))) 1141 ) 1142 1143 bcast_arrays = [None] * len(arrays) 1144 for ax in block_axes: 1145 arr = arrays[ax].broadcast_to(block_shape) 1146 shp = (1,) * ndim_lead + block_shape + (1,) * ndim_trail 1147 bcast_arrays[ax] = arr.reshape(shp).broadcast_to(bcast_shape) 1148 1149 for ax in set(range(len(arrays))) - set(block_axes): 1150 shp = [1] * len(bcast_shape) 1151 if ax < ndim_lead: 1152 shp[ax] = arrays[ax].shape[0] 1153 else: 1154 shp[ax + ndim_blk_delta] = arrays[ax].shape[0] 1155 bcast_arrays[ax] = arrays[ax].reshape(shp).broadcast_to(bcast_shape) 1156 1157 return tuple(bcast_arrays) 1158 # pylint: enable=invalid-name 1159 1160 @staticmethod 1161 def _drop_slice_none_at_end(key): 1162 """Remove ``slice(None)`` at the end of a key. 1163 1164 This is used for efficiency in advanced indexing, to avoid generating 1165 ``arange(n)`` arrays for these axes. The `gather_nd` and `scatter_nd` 1166 handle implicit full trailing axes automatically. 1167 """ 1168 key = list(key) 1169 while isinstance(key[-1], py_slice) and key[-1] == slice(None): 1170 key.pop() 1171 return tuple(key) 1172 1173 def _get_index_nd(self, key): 1174 """ 1175 Return an index array for use in `scatter_nd` and `gather_nd`, 1176 and a list of positions of new_axes in ouptut shape. 1177 """ 1178 key_nd = tuple(idx for idx in key if idx is not None) 1179 if len(key_nd) < self.ndim: 1180 raise RuntimeError( 1181 'too few indices after normalization: expected `ndim` ({}) ' 1182 'but got {}. This is a bug, please report it!' 1183 ''.format(self.ndim, len(key_nd)) 1184 ) 1185 if len(key_nd) > self.ndim: 1186 raise IndexError( 1187 'too many indices ({}) for array with {} dimensions' 1188 ''.format(len(key_nd), self.ndim) 1189 ) 1190 ndim = len(key_nd) 1191 1192 # --- Preparation --- # 1193 1194 # - Make lists for bookkeeping of advanced indices & axes 1195 # - Drop trailing `slice(None)` entries in `key` for efficiency 1196 # - Determine whether the advanced indices are adjacent in `key` 1197 # - Depending on that, make index permutations to move around indices 1198 1199 adv_axs = [ax for ax, idx in enumerate(key) if _is_advanced_index(idx)] 1200 adv_axs_nd = [ax for ax, idx in enumerate(key_nd) if _is_advanced_index(idx)] 1201 adv_idcs_are_adjacent = bool(np.all(np.diff(adv_axs) == 1)) 1202 nonadv_axs_nd = [ax for ax in range(ndim) if ax not in adv_axs_nd] 1203 adv_idcs_nd = [key_nd[ax] for ax in adv_axs_nd] 1204 idcs_short = self._drop_slice_none_at_end(key_nd) 1205 dropped_axs = list(range(len(idcs_short), ndim)) 1206 1207 if adv_idcs_are_adjacent: 1208 # The easy case: the advanced block can stay at its position, and no 1209 # permutation needs to be done (identity permutation) 1210 axs_nd_permut = axs_nd_permut_inv = tuple(range(ndim)) 1211 idcs_permut_short = idcs_short 1212 block_axs_nd = adv_axs_nd 1213 else: 1214 # The more complicated case: during broadcasting, we need to use the 1215 # indices in the *permuted* order, where the advanced block is 1216 # at the beginning, while the final index for `gather_nd` is stacked 1217 # in the *original* order, so that the association of index with 1218 # array axis remains the same. 1219 1220 # This order is used for broadcasting: advanced block at the beginning 1221 idcs_permut_short = ( 1222 adv_idcs_nd + 1223 [key_nd[ax] for ax in range(ndim) 1224 if ax not in adv_axs_nd and ax not in dropped_axs] 1225 ) 1226 block_axs_nd = list(range(len(adv_axs_nd))) 1227 axs_nd_permut = adv_axs_nd + nonadv_axs_nd 1228 axs_nd_permut_inv = list(np.argsort(axs_nd_permut)) 1229 1230 # --- Conversion, broadcasting and index stacking --- # 1231 1232 # - Convert all indices in `key` to arrays: integers to 1-element arrays, 1233 # `slice` objects to arrays with explicit indices 1234 # - Reshape arrays for broadcasting according to their position in the 1235 # *permuted* key 1236 # - Broadcast and stack the indices in the *original* order 1237 1238 shape_nd_permut = tuple(self.shape[ax] for ax in axs_nd_permut) 1239 converted_idcs_short = [ 1240 self._advanced_index_to_array(idx, ax_len, self.ctx) 1241 for idx, ax_len in zip(idcs_permut_short, shape_nd_permut) 1242 ] 1243 bcast_idcs_permut_short = self._broadcast_advanced_indices( 1244 converted_idcs_short, block_axes=block_axs_nd 1245 ) 1246 1247 # Get the ndim of advanced indexing subspace 1248 converted_advanced_idcs = [ 1249 self._advanced_index_to_array(idx, ax_len, self.ctx) 1250 for idx, ax_len in zip(adv_idcs_nd, [self.shape[ax] for ax in adv_axs_nd]) 1251 ] 1252 bcast_advanced_shape = _broadcast_shapes(converted_advanced_idcs) 1253 1254 # Undo the permutation to restore the original order 1255 bcast_idcs_short = [ 1256 bcast_idcs_permut_short[ax] 1257 for ax in axs_nd_permut_inv 1258 if axs_nd_permut[ax] not in dropped_axs 1259 ] 1260 1261 # Calculate where the newaxes are inserted after advanced indexing 1262 new_axes_positions = self._new_axes_after_advanced_indexing(key, adv_axs,\ 1263 len(bcast_advanced_shape), adv_idcs_are_adjacent) 1264 1265 # if any array is numpy.ndarray, stack in numpy ndarray class. 1266 for idcs in bcast_idcs_short: 1267 if type(idcs) != NDArray: # pylint: disable=unidiomatic-typecheck 1268 return bcast_idcs_short, new_axes_positions 1269 1270 return op.stack(*bcast_idcs_short), new_axes_positions 1271 1272 def _set_nd_advanced_indexing(self, key, value): 1273 """This function is called by __setitem__ when key is an advanced index.""" 1274 indices, new_axes = self._get_index_nd(key) 1275 vshape = get_oshape_of_gather_nd_op(self.shape, indices.shape) 1276 value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes) 1277 self._scatter_set_nd(value_nd, indices) 1278 1279 def _get_nd_advanced_indexing(self, key): 1280 """Get item when key is a tuple of any objects of the following types: 1281 NDArray, np.ndarray, list, tuple, slice, and integer.""" 1282 slc_key, new_axes = self._get_index_nd(key) 1283 sliced = op.gather_nd(self, slc_key) 1284 1285 # Reshape due to `None` entries in `key`. 1286 if new_axes: 1287 final_shape = [sliced.shape[i] for i in range(sliced.ndim)] 1288 for ax in new_axes: # pylint: disable=invalid-name 1289 final_shape.insert(ax, 1) 1290 return sliced.reshape(final_shape) 1291 else: 1292 return sliced 1293 1294 def _sync_copyfrom(self, source_array): 1295 """Performs a synchronized copy from the `source_array` to the current array. 1296 This is called through ``x[:] = source_array``, where the `source_array` 1297 is a `numpy.ndarray` or array-like object. 1298 This function blocks until all the pending read/write operations with respect 1299 to the current `NDArray` are finished and carry out the copy operation to the 1300 current NDArray. 1301 1302 Parameters 1303 ---------- 1304 source_array : array_like 1305 The data source we would like to copy from. 1306 1307 Example 1308 ------- 1309 >>> a = mx.nd.array([1, 2]) 1310 >>> a.asnumpy() 1311 array([ 1., 2.], dtype=float32) 1312 >>> a[:] = np.array([3, 4]) 1313 >> a.asnumpy() 1314 array([ 3., 4.], dtype=float32) 1315 """ 1316 if not isinstance(source_array, np.ndarray): 1317 try: 1318 source_array = np.array(source_array, dtype=self.dtype) 1319 except: 1320 raise TypeError('array must consist of array-like data,' + 1321 'type %s is not supported' % str(type(array))) 1322 source_array = np.asarray(source_array, dtype=self.dtype, order='C') 1323 if source_array.shape != self.shape: 1324 raise ValueError('Shape inconsistent: expected %s vs got %s'%( 1325 str(source_array.shape), str(self.shape))) 1326 check_call(_LIB.MXNDArraySyncCopyFromCPU( 1327 self.handle, 1328 source_array.ctypes.data_as(ctypes.c_void_p), 1329 ctypes.c_size_t(source_array.size))) 1330 1331 def _slice(self, start, stop): 1332 """Returns a sliced NDArray that shares memory with the current one. 1333 This is called through ``x[start:stop]``. 1334 1335 Parameters 1336 ---------- 1337 start : int 1338 Starting inclusive index of slice in the first dim. 1339 stop : int 1340 Finishing exclusive index of slice in the first dim. 1341 1342 Returns 1343 ------- 1344 `NDArray` sharing the memory with the current one sliced from 1345 start to stop in the first dim. 1346 1347 Examples: 1348 >>> a = mx.nd.array([[1,2], [3, 4], [5, 6], [7, 8]]) 1349 >>> a[1:2].asnumpy() 1350 array([[ 3., 4.]], dtype=float32) 1351 >>> a[1:1].asnumpy() 1352 array([], shape=(0, 2), dtype=float32) 1353 """ 1354 handle = NDArrayHandle() 1355 start, stop, _ = _get_index_range(start, stop, self.shape[0]) 1356 1357 check_call(_LIB.MXNDArraySlice( 1358 self.handle, mx_uint(start), mx_uint(stop), ctypes.byref(handle))) 1359 return self.__class__(handle=handle, writable=self.writable) 1360 1361 def _at(self, idx): 1362 """Returns a view of the array sliced at `idx` in the first dim. 1363 This is called through ``x[idx]``. 1364 1365 Parameters 1366 ---------- 1367 idx : int 1368 index for slicing the `NDArray` in the first dim. 1369 1370 Returns 1371 ------- 1372 NDArray 1373 `NDArray` sharing the memory with the current one sliced at `idx` in the first dim. 1374 1375 Examples 1376 -------- 1377 >>> a = mx.nd.array([[1,2], [3, 4]]) 1378 >>> a[1].asnumpy() 1379 array([ 3., 4.], dtype=float32) 1380 >>> b = mx.nd.array([1, 2, 3, 4]) 1381 >>> b[0].asnumpy() 1382 array([ 1.], dtype=float32) 1383 """ 1384 handle = NDArrayHandle() 1385 if idx < 0: 1386 length = self.shape[0] 1387 idx += length 1388 if idx < 0: 1389 raise IndexError('index %d is out of bounds for axis 0 with size %d' 1390 % (idx-length, length)) 1391 if _int64_enabled(): 1392 check_call(_LIB.MXNDArrayAt64( 1393 self.handle, ctypes.c_int64(idx), ctypes.byref(handle))) 1394 else: 1395 check_call(_LIB.MXNDArrayAt( 1396 self.handle, ctypes.c_uint32(idx), ctypes.byref(handle))) 1397 return self.__class__(handle=handle, writable=self.writable) 1398 1399 def reshape(self, *shape, **kwargs): 1400 """Returns a **view** of this array with a new shape without altering any data. 1401 1402 Parameters 1403 ---------- 1404 shape : tuple of int, or n ints 1405 The new shape should not change the array size, namely 1406 ``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``. 1407 Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. 1408 The significance of each is explained below: 1409 1410 - ``0`` copy this dimension from the input to the output shape. 1411 1412 Example:: 1413 1414 - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) 1415 - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) 1416 1417 - ``-1`` infers the dimension of the output shape by using the remainder of the 1418 input dimensions keeping the size of the new array same as that of the input array. 1419 At most one dimension of shape can be -1. 1420 1421 Example:: 1422 1423 - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) 1424 - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) 1425 - input shape = (2,3,4), shape=(-1,), output shape = (24,) 1426 1427 - ``-2`` copy all/remainder of the input dimensions to the output shape. 1428 1429 Example:: 1430 1431 - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) 1432 - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) 1433 - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) 1434 1435 - ``-3`` use the product of two consecutive dimensions of the input shape as the 1436 output dimension. 1437 1438 Example:: 1439 1440 - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) 1441 - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) 1442 - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) 1443 - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) 1444 1445 - ``-4`` split one dimension of the input into two dimensions passed subsequent to 1446 -4 in shape (can contain -1). 1447 1448 Example:: 1449 1450 - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) 1451 - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) 1452 1453 - If the argument `reverse` is set to 1, then the special values are inferred from right 1454 to left. 1455 1456 Example:: 1457 1458 - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be \ 1459 (40,5). 1460 - with reverse=1, output shape will be (50,4). 1461 1462 reverse : bool, default False 1463 If true then the special values are inferred from right to left. Only supported as 1464 keyword argument. 1465 1466 1467 Returns 1468 ------- 1469 NDArray 1470 An array with desired shape that shares data with this array. 1471 1472 Examples 1473 -------- 1474 >>> x = mx.nd.arange(0,6).reshape(2,3) 1475 >>> x.asnumpy() 1476 array([[ 0., 1., 2.], 1477 [ 3., 4., 5.]], dtype=float32) 1478 >>> y = x.reshape(3,2) 1479 >>> y.asnumpy() 1480 array([[ 0., 1.], 1481 [ 2., 3.], 1482 [ 4., 5.]], dtype=float32) 1483 >>> y = x.reshape(3,-1) 1484 >>> y.asnumpy() 1485 array([[ 0., 1.], 1486 [ 2., 3.], 1487 [ 4., 5.]], dtype=float32) 1488 >>> y = x.reshape(3,2) 1489 >>> y.asnumpy() 1490 array([[ 0., 1.], 1491 [ 2., 3.], 1492 [ 4., 5.]], dtype=float32) 1493 >>> y = x.reshape(-3) 1494 >>> y.asnumpy() 1495 array([ 0. 1. 2. 3. 4. 5.], dtype=float32) 1496 >>> y[:] = -1 1497 >>> x.asnumpy() 1498 array([[-1., -1., -1.], 1499 [-1., -1., -1.]], dtype=float32) 1500 """ 1501 if len(shape) == 1 and isinstance(shape[0], (list, tuple)): 1502 shape = shape[0] 1503 elif not shape: 1504 shape = kwargs.get('shape') 1505 assert shape, "Shape must be provided." 1506 if not all(k in ['shape', 'reverse'] for k in kwargs): 1507 raise TypeError( 1508 "Got unknown keywords in reshape: {}. " \ 1509 "Accepted keyword arguments are 'shape' and 'reverse'.".format( 1510 ', '.join([k for k in kwargs if k not in ['shape', 'reverse']]))) 1511 reverse = kwargs.get('reverse', False) 1512 handle = NDArrayHandle() 1513 1514 # Actual reshape 1515 check_call(_LIB.MXNDArrayReshape64(self.handle, 1516 len(shape), 1517 c_array(ctypes.c_int64, shape), 1518 reverse, 1519 ctypes.byref(handle))) 1520 res = self.__class__(handle=handle, writable=self.writable) 1521 1522 # Array size should not change 1523 if np.prod(res.shape) != np.prod(self.shape): 1524 raise ValueError('Cannot reshape array of size {} into shape {}'.format(np.prod(self.shape), shape)) 1525 return res 1526 1527 def reshape_like(self, *args, **kwargs): 1528 """Convenience fluent method for :py:func:`reshape_like`. 1529 1530 The arguments are the same as for :py:func:`reshape_like`, with 1531 this array as data. 1532 """ 1533 return op.reshape_like(self, *args, **kwargs) 1534 1535 def zeros_like(self, *args, **kwargs): 1536 """Convenience fluent method for :py:func:`zeros_like`. 1537 1538 The arguments are the same as for :py:func:`zeros_like`, with 1539 this array as data. 1540 """ 1541 return op.zeros_like(self, *args, **kwargs) 1542 1543 def ones_like(self, *args, **kwargs): 1544 """Convenience fluent method for :py:func:`ones_like`. 1545 1546 The arguments are the same as for :py:func:`ones_like`, with 1547 this array as data. 1548 """ 1549 return op.ones_like(self, *args, **kwargs) 1550 1551 def broadcast_axes(self, *args, **kwargs): 1552 """Convenience fluent method for :py:func:`broadcast_axes`. 1553 1554 The arguments are the same as for :py:func:`broadcast_axes`, with 1555 this array as data. 1556 """ 1557 return op.broadcast_axes(self, *args, **kwargs) 1558 1559 def repeat(self, *args, **kwargs): 1560 """Convenience fluent method for :py:func:`repeat`. 1561 1562 The arguments are the same as for :py:func:`repeat`, with 1563 this array as data. 1564 """ 1565 return op.repeat(self, *args, **kwargs) 1566 1567 def pad(self, *args, **kwargs): 1568 """Convenience fluent method for :py:func:`pad`. 1569 1570 The arguments are the same as for :py:func:`pad`, with 1571 this array as data. 1572 """ 1573 return op.pad(self, *args, **kwargs) 1574 1575 def swapaxes(self, *args, **kwargs): 1576 """Convenience fluent method for :py:func:`swapaxes`. 1577 1578 The arguments are the same as for :py:func:`swapaxes`, with 1579 this array as data. 1580 """ 1581 return op.swapaxes(self, *args, **kwargs) 1582 1583 def split(self, *args, **kwargs): 1584 """Convenience fluent method for :py:func:`split`. 1585 1586 The arguments are the same as for :py:func:`split`, with 1587 this array as data. 1588 """ 1589 return op.split(self, *args, **kwargs) 1590 1591 def split_v2(self, *args, **kwargs): 1592 """Convenience fluent method for :py:func:`split_v2`. 1593 1594 The arguments are the same as for :py:func:`split_v2`, with 1595 this array as data. 1596 """ 1597 return split_v2(self, *args, **kwargs) 1598 1599 def slice(self, *args, **kwargs): 1600 """Convenience fluent method for :py:func:`slice`. 1601 1602 The arguments are the same as for :py:func:`slice`, with 1603 this array as data. 1604 """ 1605 return op.slice(self, *args, **kwargs) 1606 1607 def slice_axis(self, *args, **kwargs): 1608 """Convenience fluent method for :py:func:`slice_axis`. 1609 1610 The arguments are the same as for :py:func:`slice_axis`, with 1611 this array as data. 1612 """ 1613 return op.slice_axis(self, *args, **kwargs) 1614 1615 def slice_like(self, *args, **kwargs): 1616 """Convenience fluent method for :py:func:`slice_like`. 1617 1618 The arguments are the same as for :py:func:`slice_like`, with 1619 this array as data. 1620 """ 1621 return op.slice_like(self, *args, **kwargs) 1622 1623 def take(self, *args, **kwargs): 1624 """Convenience fluent method for :py:func:`take`. 1625 1626 The arguments are the same as for :py:func:`take`, with 1627 this array as data. 1628 """ 1629 return op.take(self, *args, **kwargs) 1630 1631 def one_hot(self, *args, **kwargs): 1632 """Convenience fluent method for :py:func:`one_hot`. 1633 1634 The arguments are the same as for :py:func:`one_hot`, with 1635 this array as data. 1636 """ 1637 return op.one_hot(self, *args, **kwargs) 1638 1639 def pick(self, *args, **kwargs): 1640 """Convenience fluent method for :py:func:`pick`. 1641 1642 The arguments are the same as for :py:func:`pick`, with 1643 this array as data. 1644 """ 1645 return op.pick(self, *args, **kwargs) 1646 1647 def sort(self, *args, **kwargs): 1648 """Convenience fluent method for :py:func:`sort`. 1649 1650 The arguments are the same as for :py:func:`sort`, with 1651 this array as data. 1652 """ 1653 return op.sort(self, *args, **kwargs) 1654 1655 def topk(self, *args, **kwargs): 1656 """Convenience fluent method for :py:func:`topk`. 1657 1658 The arguments are the same as for :py:func:`topk`, with 1659 this array as data. 1660 """ 1661 return op.topk(self, *args, **kwargs) 1662 1663 def argsort(self, *args, **kwargs): 1664 """Convenience fluent method for :py:func:`argsort`. 1665 1666 The arguments are the same as for :py:func:`argsort`, with 1667 this array as data. 1668 """ 1669 return op.argsort(self, *args, **kwargs) 1670 1671 def argmax(self, *args, **kwargs): 1672 """Convenience fluent method for :py:func:`argmax`. 1673 1674 The arguments are the same as for :py:func:`argmax`, with 1675 this array as data. 1676 """ 1677 return op.argmax(self, *args, **kwargs) 1678 1679 def argmax_channel(self, *args, **kwargs): 1680 """Convenience fluent method for :py:func:`argmax_channel`. 1681 1682 The arguments are the same as for :py:func:`argmax_channel`, with 1683 this array as data. 1684 """ 1685 return op.argmax_channel(self, *args, **kwargs) 1686 1687 def argmin(self, *args, **kwargs): 1688 """Convenience fluent method for :py:func:`argmin`. 1689 1690 The arguments are the same as for :py:func:`argmin`, with 1691 this array as data. 1692 """ 1693 return op.argmin(self, *args, **kwargs) 1694 1695 def clip(self, *args, **kwargs): 1696 """Convenience fluent method for :py:func:`clip`. 1697 1698 The arguments are the same as for :py:func:`clip`, with 1699 this array as data. 1700 """ 1701 return op.clip(self, *args, **kwargs) 1702 1703 def abs(self, *args, **kwargs): 1704 """Convenience fluent method for :py:func:`abs`. 1705 1706 The arguments are the same as for :py:func:`abs`, with 1707 this array as data. 1708 """ 1709 return op.abs(self, *args, **kwargs) 1710 1711 def sign(self, *args, **kwargs): 1712 """Convenience fluent method for :py:func:`sign`. 1713 1714 The arguments are the same as for :py:func:`sign`, with 1715 this array as data. 1716 """ 1717 return op.sign(self, *args, **kwargs) 1718 1719 def flatten(self, inplace=False): 1720 """Flatten this array without altering any data. 1721 1722 Parameters 1723 ---------- 1724 inplace : bool, default False 1725 If True, this method returns a **view** of this array 1726 that shares data with this array. Otherwise, a copy is returned. 1727 1728 Returns 1729 ------- 1730 NDArray 1731 An array with flattened shape `(d1, d2*...*dk)` that shares data with 1732 this array with shape `(d1, d2, ..., dk)`. 1733 1734 Examples 1735 -------- 1736 >>> x = mx.nd.arange(30).reshape(5,2,3) 1737 >>> y = x.flatten(inplace=True) 1738 >>> z = x.flatten() 1739 >>> y.shape 1740 (5, 6) 1741 >>> y[0].asnumpy() 1742 array([0., 1., 2., 3., 4., 5.], dtype=float32) 1743 >>> y[:] = -1 1744 >>> x[0].asnumpy() 1745 array([[-1., -1., -1.], 1746 [-1., -1., -1.]], dtype=float32) 1747 >>> z[0].asnumpy() 1748 array([0., 1., 2., 3., 4., 5.], dtype=float32) 1749 """ 1750 return op.flatten(self) if not inplace else self.reshape((0, -1)) 1751 1752 def shape_array(self, *args, **kwargs): 1753 """Convenience fluent method for :py:func:`shape_array`. 1754 1755 The arguments are the same as for :py:func:`shape_array`, with 1756 this array as data. 1757 """ 1758 return op.shape_array(self, *args, **kwargs) 1759 1760 def size_array(self, *args, **kwargs): 1761 """Convenience fluent method for :py:func:`size_array`. 1762 1763 The arguments are the same as for :py:func:`size_array`, with 1764 this array as data. 1765 """ 1766 return op.size_array(self, *args, **kwargs) 1767 1768 def expand_dims(self, axis, inplace=False): 1769 """Adds an additional dimension to the current array without altering any data. 1770 1771 Parameters 1772 ---------- 1773 axis : int 1774 Position where new axis is to be inserted. 1775 Suppose that the input NDArray's dimension is ndim, 1776 the range of the inserted axis is [-ndim, ndim]. 1777 inplace : bool, default False 1778 If True, this method returns a **view** of this array 1779 that shares data with this array. Otherwise, a copy is returned. 1780 1781 Returns 1782 ------- 1783 NDArray 1784 An array with expanded shape `(d1, d2, ..., 1, di, ..., dk)` 1785 that shares data with this array with shape `(d1, d2, ..., dk)`, 1786 given input axis `i`. 1787 1788 Examples 1789 -------- 1790 >>> x = mx.nd.arange(6).reshape(2,3) 1791 >>> y = x.expand_dims(1, inplace=True) 1792 >>> z = x.expand_dims(1) 1793 >>> y.shape 1794 (2, 1, 3) 1795 >>> y[0].asnumpy() 1796 array([[0., 1., 2.]], dtype=float32) 1797 >>> y[:] = -1 1798 >>> x.asnumpy() 1799 array([[-1., -1., -1.], 1800 [-1., -1., -1.]], dtype=float32) 1801 >>> z[0].asnumpy() 1802 array([[0., 1., 2.]], dtype=float32) 1803 """ 1804 if not inplace: 1805 return op.expand_dims(self, axis=axis) 1806 else: 1807 new_shape = list(self.shape) 1808 assert -len(new_shape)-1 <= axis <= len(new_shape), \ 1809 "axis {} is out of range for {}d array".format(axis, len(new_shape)) 1810 if axis < 0: 1811 axis += len(new_shape) + 1 1812 new_shape.insert(axis, 1) 1813 return self.reshape(new_shape) 1814 1815 def tile(self, *args, **kwargs): 1816 """Convenience fluent method for :py:func:`tile`. 1817 1818 The arguments are the same as for :py:func:`tile`, with 1819 this array as data. 1820 """ 1821 return op.tile(self, *args, **kwargs) 1822 1823 def transpose(self, *args, **kwargs): 1824 """Convenience fluent method for :py:func:`transpose`. 1825 1826 The arguments are the same as for :py:func:`transpose`, with 1827 this array as data. 1828 """ 1829 return op.transpose(self, *args, **kwargs) 1830 1831 def flip(self, *args, **kwargs): 1832 """Convenience fluent method for :py:func:`flip`. 1833 1834 The arguments are the same as for :py:func:`flip`, with 1835 this array as data. 1836 """ 1837 return op.flip(self, *args, **kwargs) 1838 1839 def depth_to_space(self, *args, **kwargs): 1840 """Convenience fluent method for :py:func:`depth_to_space`. 1841 1842 The arguments are the same as for :py:func:`depth_to_space`, with 1843 this array as data. 1844 """ 1845 return op.depth_to_space(self, *args, **kwargs) 1846 1847 def space_to_depth(self, *args, **kwargs): 1848 """Convenience fluent method for :py:func:`space_to_depth`. 1849 1850 The arguments are the same as for :py:func:`space_to_depth`, with 1851 this array as data. 1852 """ 1853 return op.space_to_depth(self, *args, **kwargs) 1854 1855 def diag(self, k=0, **kwargs): 1856 """Convenience fluent method for :py:func:`diag`. 1857 1858 The arguments are the same as for :py:func:`diag`, with 1859 this array as data. 1860 """ 1861 return op.diag(self, k, **kwargs) 1862 1863 def sum(self, *args, **kwargs): 1864 """Convenience fluent method for :py:func:`sum`. 1865 1866 The arguments are the same as for :py:func:`sum`, with 1867 this array as data. 1868 """ 1869 return op.sum(self, *args, **kwargs) 1870 1871 def nansum(self, *args, **kwargs): 1872 """Convenience fluent method for :py:func:`nansum`. 1873 1874 The arguments are the same as for :py:func:`nansum`, with 1875 this array as data. 1876 """ 1877 return op.nansum(self, *args, **kwargs) 1878 1879 def prod(self, *args, **kwargs): 1880 """Convenience fluent method for :py:func:`prod`. 1881 1882 The arguments are the same as for :py:func:`prod`, with 1883 this array as data. 1884 """ 1885 return op.prod(self, *args, **kwargs) 1886 1887 def nanprod(self, *args, **kwargs): 1888 """Convenience fluent method for :py:func:`nanprod`. 1889 1890 The arguments are the same as for :py:func:`nanprod`, with 1891 this array as data. 1892 """ 1893 return op.nanprod(self, *args, **kwargs) 1894 1895 def mean(self, *args, **kwargs): 1896 """Convenience fluent method for :py:func:`mean`. 1897 1898 The arguments are the same as for :py:func:`mean`, with 1899 this array as data. 1900 """ 1901 return op.mean(self, *args, **kwargs) 1902 1903 def max(self, *args, **kwargs): 1904 """Convenience fluent method for :py:func:`max`. 1905 1906 The arguments are the same as for :py:func:`max`, with 1907 this array as data. 1908 """ 1909 return op.max(self, *args, **kwargs) 1910 1911 def min(self, *args, **kwargs): 1912 """Convenience fluent method for :py:func:`min`. 1913 1914 The arguments are the same as for :py:func:`min`, with 1915 this array as data. 1916 """ 1917 return op.min(self, *args, **kwargs) 1918 1919 def norm(self, *args, **kwargs): 1920 """Convenience fluent method for :py:func:`norm`. 1921 1922 The arguments are the same as for :py:func:`norm`, with 1923 this array as data. 1924 """ 1925 return op.norm(self, *args, **kwargs) 1926 1927 def round(self, *args, **kwargs): 1928 """Convenience fluent method for :py:func:`round`. 1929 1930 The arguments are the same as for :py:func:`round`, with 1931 this array as data. 1932 """ 1933 return op.round(self, *args, **kwargs) 1934 1935 def rint(self, *args, **kwargs): 1936 """Convenience fluent method for :py:func:`rint`. 1937 1938 The arguments are the same as for :py:func:`rint`, with 1939 this array as data. 1940 """ 1941 return op.rint(self, *args, **kwargs) 1942 1943 def fix(self, *args, **kwargs): 1944 """Convenience fluent method for :py:func:`fix`. 1945 1946 The arguments are the same as for :py:func:`fix`, with 1947 this array as data. 1948 """ 1949 return op.fix(self, *args, **kwargs) 1950 1951 def floor(self, *args, **kwargs): 1952 """Convenience fluent method for :py:func:`floor`. 1953 1954 The arguments are the same as for :py:func:`floor`, with 1955 this array as data. 1956 """ 1957 return op.floor(self, *args, **kwargs) 1958 1959 def ceil(self, *args, **kwargs): 1960 """Convenience fluent method for :py:func:`ceil`. 1961 1962 The arguments are the same as for :py:func:`ceil`, with 1963 this array as data. 1964 """ 1965 return op.ceil(self, *args, **kwargs) 1966 1967 def trunc(self, *args, **kwargs): 1968 """Convenience fluent method for :py:func:`trunc`. 1969 1970 The arguments are the same as for :py:func:`trunc`, with 1971 this array as data. 1972 """ 1973 return op.trunc(self, *args, **kwargs) 1974 1975 def sin(self, *args, **kwargs): 1976 """Convenience fluent method for :py:func:`sin`. 1977 1978 The arguments are the same as for :py:func:`sin`, with 1979 this array as data. 1980 """ 1981 return op.sin(self, *args, **kwargs) 1982 1983 def cos(self, *args, **kwargs): 1984 """Convenience fluent method for :py:func:`cos`. 1985 1986 The arguments are the same as for :py:func:`cos`, with 1987 this array as data. 1988 """ 1989 return op.cos(self, *args, **kwargs) 1990 1991 def tan(self, *args, **kwargs): 1992 """Convenience fluent method for :py:func:`tan`. 1993 1994 The arguments are the same as for :py:func:`tan`, with 1995 this array as data. 1996 """ 1997 return op.tan(self, *args, **kwargs) 1998 1999 def arcsin(self, *args, **kwargs): 2000 """Convenience fluent method for :py:func:`arcsin`. 2001 2002 The arguments are the same as for :py:func:`arcsin`, with 2003 this array as data. 2004 """ 2005 return op.arcsin(self, *args, **kwargs) 2006 2007 def arccos(self, *args, **kwargs): 2008 """Convenience fluent method for :py:func:`arccos`. 2009 2010 The arguments are the same as for :py:func:`arccos`, with 2011 this array as data. 2012 """ 2013 return op.arccos(self, *args, **kwargs) 2014 2015 def arctan(self, *args, **kwargs): 2016 """Convenience fluent method for :py:func:`arctan`. 2017 2018 The arguments are the same as for :py:func:`arctan`, with 2019 this array as data. 2020 """ 2021 return op.arctan(self, *args, **kwargs) 2022 2023 def degrees(self, *args, **kwargs): 2024 """Convenience fluent method for :py:func:`degrees`. 2025 2026 The arguments are the same as for :py:func:`degrees`, with 2027 this array as data. 2028 """ 2029 return op.degrees(self, *args, **kwargs) 2030 2031 def radians(self, *args, **kwargs): 2032 """Convenience fluent method for :py:func:`radians`. 2033 2034 The arguments are the same as for :py:func:`radians`, with 2035 this array as data. 2036 """ 2037 return op.radians(self, *args, **kwargs) 2038 2039 def sinh(self, *args, **kwargs): 2040 """Convenience fluent method for :py:func:`sinh`. 2041 2042 The arguments are the same as for :py:func:`sinh`, with 2043 this array as data. 2044 """ 2045 return op.sinh(self, *args, **kwargs) 2046 2047 def cosh(self, *args, **kwargs): 2048 """Convenience fluent method for :py:func:`cosh`. 2049 2050 The arguments are the same as for :py:func:`cosh`, with 2051 this array as data. 2052 """ 2053 return op.cosh(self, *args, **kwargs) 2054 2055 def tanh(self, *args, **kwargs): 2056 """Convenience fluent method for :py:func:`tanh`. 2057 2058 The arguments are the same as for :py:func:`tanh`, with 2059 this array as data. 2060 """ 2061 return op.tanh(self, *args, **kwargs) 2062 2063 def arcsinh(self, *args, **kwargs): 2064 """Convenience fluent method for :py:func:`arcsinh`. 2065 2066 The arguments are the same as for :py:func:`arcsinh`, with 2067 this array as data. 2068 """ 2069 return op.arcsinh(self, *args, **kwargs) 2070 2071 def arccosh(self, *args, **kwargs): 2072 """Convenience fluent method for :py:func:`arccosh`. 2073 2074 The arguments are the same as for :py:func:`arccosh`, with 2075 this array as data. 2076 """ 2077 return op.arccosh(self, *args, **kwargs) 2078 2079 def arctanh(self, *args, **kwargs): 2080 """Convenience fluent method for :py:func:`arctanh`. 2081 2082 The arguments are the same as for :py:func:`arctanh`, with 2083 this array as data. 2084 """ 2085 return op.arctanh(self, *args, **kwargs) 2086 2087 def exp(self, *args, **kwargs): 2088 """Convenience fluent method for :py:func:`exp`. 2089 2090 The arguments are the same as for :py:func:`exp`, with 2091 this array as data. 2092 """ 2093 return op.exp(self, *args, **kwargs) 2094 2095 def expm1(self, *args, **kwargs): 2096 """Convenience fluent method for :py:func:`expm1`. 2097 2098 The arguments are the same as for :py:func:`expm1`, with 2099 this array as data. 2100 """ 2101 return op.expm1(self, *args, **kwargs) 2102 2103 def log(self, *args, **kwargs): 2104 """Convenience fluent method for :py:func:`log`. 2105 2106 The arguments are the same as for :py:func:`log`, with 2107 this array as data. 2108 """ 2109 return op.log(self, *args, **kwargs) 2110 2111 def log10(self, *args, **kwargs): 2112 """Convenience fluent method for :py:func:`log10`. 2113 2114 The arguments are the same as for :py:func:`log10`, with 2115 this array as data. 2116 """ 2117 return op.log10(self, *args, **kwargs) 2118 2119 def log2(self, *args, **kwargs): 2120 """Convenience fluent method for :py:func:`log2`. 2121 2122 The arguments are the same as for :py:func:`log2`, with 2123 this array as data. 2124 """ 2125 return op.log2(self, *args, **kwargs) 2126 2127 def log1p(self, *args, **kwargs): 2128 """Convenience fluent method for :py:func:`log1p`. 2129 2130 The arguments are the same as for :py:func:`log1p`, with 2131 this array as data. 2132 """ 2133 return op.log1p(self, *args, **kwargs) 2134 2135 def sqrt(self, *args, **kwargs): 2136 """Convenience fluent method for :py:func:`sqrt`. 2137 2138 The arguments are the same as for :py:func:`sqrt`, with 2139 this array as data. 2140 """ 2141 return op.sqrt(self, *args, **kwargs) 2142 2143 def rsqrt(self, *args, **kwargs): 2144 """Convenience fluent method for :py:func:`rsqrt`. 2145 2146 The arguments are the same as for :py:func:`rsqrt`, with 2147 this array as data. 2148 """ 2149 return op.rsqrt(self, *args, **kwargs) 2150 2151 def cbrt(self, *args, **kwargs): 2152 """Convenience fluent method for :py:func:`cbrt`. 2153 2154 The arguments are the same as for :py:func:`cbrt`, with 2155 this array as data. 2156 """ 2157 return op.cbrt(self, *args, **kwargs) 2158 2159 def rcbrt(self, *args, **kwargs): 2160 """Convenience fluent method for :py:func:`rcbrt`. 2161 2162 The arguments are the same as for :py:func:`rcbrt`, with 2163 this array as data. 2164 """ 2165 return op.rcbrt(self, *args, **kwargs) 2166 2167 def square(self, *args, **kwargs): 2168 """Convenience fluent method for :py:func:`square`. 2169 2170 The arguments are the same as for :py:func:`square`, with 2171 this array as data. 2172 """ 2173 return op.square(self, *args, **kwargs) 2174 2175 def reciprocal(self, *args, **kwargs): 2176 """Convenience fluent method for :py:func:`reciprocal`. 2177 2178 The arguments are the same as for :py:func:`reciprocal`, with 2179 this array as data. 2180 """ 2181 return op.reciprocal(self, *args, **kwargs) 2182 2183 def relu(self, *args, **kwargs): 2184 """Convenience fluent method for :py:func:`relu`. 2185 2186 The arguments are the same as for :py:func:`relu`, with 2187 this array as data. 2188 """ 2189 return op.relu(self, *args, **kwargs) 2190 2191 def sigmoid(self, *args, **kwargs): 2192 """Convenience fluent method for :py:func:`sigmoid`. 2193 2194 The arguments are the same as for :py:func:`sigmoid`, with 2195 this array as data. 2196 """ 2197 return op.sigmoid(self, *args, **kwargs) 2198 2199 def softmax(self, *args, **kwargs): 2200 """Convenience fluent method for :py:func:`softmax`. 2201 2202 The arguments are the same as for :py:func:`softmax`, with 2203 this array as data. 2204 """ 2205 return op.softmax(self, *args, **kwargs) 2206 2207 def log_softmax(self, *args, **kwargs): 2208 """Convenience fluent method for :py:func:`log_softmax`. 2209 2210 The arguments are the same as for :py:func:`log_softmax`, with 2211 this array as data. 2212 """ 2213 return op.log_softmax(self, *args, **kwargs) 2214 2215 def softmin(self, *args, **kwargs): 2216 """Convenience fluent method for :py:func:`softmin`. 2217 2218 The arguments are the same as for :py:func:`softmin`, with 2219 this array as data. 2220 """ 2221 return op.softmin(self, *args, **kwargs) 2222 2223 def squeeze(self, axis=None, inplace=False): 2224 """Remove dimensions with size 1 from this array without altering any data. 2225 2226 Parameters 2227 ---------- 2228 axis : int, tuple of int, or None 2229 Selects a subset of the single-dimensional entries in the shape. 2230 If an axis is selected with shape entry greater than one, an error is raised. 2231 inplace : bool, default False 2232 If True, this method returns a **view** of this array 2233 that shares data with this array. Otherwise, a copy is returned. 2234 """ 2235 if not inplace: 2236 return op.squeeze(self, axis=axis) 2237 else: 2238 new_shape = list(self.shape) 2239 axes = axis # rename variable for readability 2240 if isinstance(axes, int): 2241 axes = [axes] 2242 if axes: 2243 assert len(axes) == len(set(axes)), \ 2244 "axis {} contains duplicate which is not allowed.".format(axes) 2245 resolved_axes = [i if i >= 0 else i+len(self.shape) for i in axes] 2246 for arg_axis, actual_axis in zip(axes, resolved_axes): 2247 assert -len(new_shape) <= arg_axis < len(new_shape), \ 2248 "axis {} is out of range for {}d array".format(arg_axis, len(new_shape)) 2249 axis_size = new_shape[actual_axis] 2250 assert axis_size == 1, \ 2251 "Squeeze target axis {} must be size 1, got {}.".format(arg_axis, axis_size) 2252 for i in sorted(resolved_axes, reverse=True): 2253 del new_shape[i] 2254 else: 2255 for i in reversed(range(len(new_shape))): 2256 if new_shape[i] == 1: 2257 del new_shape[i] 2258 if not new_shape: 2259 new_shape.append(1) 2260 2261 return self.reshape(new_shape) 2262 2263 # pylint: disable= undefined-variable 2264 def broadcast_to(self, shape): 2265 """Broadcasts the input array to a new shape. 2266 2267 Broadcasting is only allowed on axes with size 1. The new shape cannot change 2268 the number of dimensions. 2269 For example, you could broadcast from shape (2, 1) to (2, 3), but not from 2270 shape (2, 3) to (2, 3, 3). 2271 2272 Parameters 2273 ---------- 2274 shape : tuple of int 2275 The shape of the desired array. 2276 2277 Returns 2278 ------- 2279 NDArray 2280 A NDArray with the desired shape that is not sharing data with this 2281 array, even if the new shape is the same as ``self.shape``. 2282 2283 Examples 2284 -------- 2285 >>> x = mx.nd.arange(0,3).reshape((1,3,1)) 2286 >>> x.asnumpy() 2287 array([[[ 0.], 2288 [ 1.], 2289 [ 2.]]], dtype=float32) 2290 >>> y = x.broadcast_to((2,3,3)) 2291 >>> y.asnumpy() 2292 array([[[ 0., 0., 0.], 2293 [ 1., 1., 1.], 2294 [ 2., 2., 2.]], 2295 <BLANKLINE> 2296 [[ 0., 0., 0.], 2297 [ 1., 1., 1.], 2298 [ 2., 2., 2.]]], dtype=float32) 2299 """ 2300 cur_shape = self.shape 2301 err_str = 'operands could not be broadcast together with remapped shapes' \ 2302 '[original->remapped]: {} and requested shape {}'.format(cur_shape, shape) 2303 if len(shape) < len(cur_shape): 2304 raise ValueError(err_str) 2305 cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape 2306 cur_shape_arr = np.array(cur_shape) 2307 broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape)) 2308 if (cur_shape_arr[broadcasting_axes] != 1).any(): 2309 raise ValueError(err_str) 2310 if cur_shape != self.shape: 2311 return op.broadcast_to(self.reshape(cur_shape), shape=shape) 2312 else: 2313 return op.broadcast_to(self, shape=tuple(shape)) 2314 # pylint: enable= undefined-variable 2315 2316 def broadcast_like(self, other): 2317 """Broadcasts the input array to the shape of other. 2318 2319 Broadcasting is only allowed on axes with size 1. The new shape cannot change 2320 the number of dimensions. 2321 For example, you could broadcast from shape (2, 1) to (2, 3), but not from 2322 shape (2, 3) to (2, 3, 3). 2323 2324 Parameters 2325 ---------- 2326 other : NDArray 2327 Array with shape of the desired array. 2328 2329 Returns 2330 ------- 2331 NDArray 2332 A NDArray with the desired shape that is not sharing data with this 2333 array, even if the new shape is the same as ``self.shape``. 2334 2335 Examples 2336 -------- 2337 >>> x = mx.nd.arange(0,3).reshape((1,3,1)) 2338 >>> x.asnumpy() 2339 array([[[ 0.], 2340 [ 1.], 2341 [ 2.]]], dtype=float32) 2342 >>> y = x.broadcast_like(mx.nd.ones((2,3,3))) 2343 >>> y.asnumpy() 2344 array([[[ 0., 0., 0.], 2345 [ 1., 1., 1.], 2346 [ 2., 2., 2.]], 2347 <BLANKLINE> 2348 [[ 0., 0., 0.], 2349 [ 1., 1., 1.], 2350 [ 2., 2., 2.]]], dtype=float32) 2351 """ 2352 return self.broadcast_to(other.shape) 2353 2354 def wait_to_read(self): 2355 """Waits until all previous write operations on the current array are finished. 2356 2357 This method guarantees that all previous write operations that pushed 2358 into the backend engine for execution are actually finished. 2359 2360 Examples 2361 -------- 2362 >>> import time 2363 >>> tic = time.time() 2364 >>> a = mx.nd.ones((1000,1000)) 2365 >>> b = mx.nd.dot(a, a) 2366 >>> print(time.time() - tic) # doctest: +SKIP 2367 0.003854036331176758 2368 >>> b.wait_to_read() 2369 >>> print(time.time() - tic) # doctest: +SKIP 2370 0.0893700122833252 2371 """ 2372 check_call(_LIB.MXNDArrayWaitToRead(self.handle)) 2373 2374 @property 2375 def ndim(self): 2376 """Returns the number of dimensions of this array 2377 2378 Examples 2379 -------- 2380 >>> x = mx.nd.array([1, 2, 3, 4]) 2381 >>> x.ndim 2382 1 2383 >>> x = mx.nd.array([[1, 2], [3, 4]]) 2384 >>> x.ndim 2385 2 2386 """ 2387 return len(self.shape) 2388 2389 @property 2390 def shape(self): 2391 """Tuple of array dimensions. 2392 2393 Examples 2394 -------- 2395 >>> x = mx.nd.array([1, 2, 3, 4]) 2396 >>> x.shape 2397 (4L,) 2398 >>> y = mx.nd.zeros((2, 3, 4)) 2399 >>> y.shape 2400 (2L, 3L, 4L) 2401 """ 2402 ndim = mx_int() 2403 if _int64_enabled(): 2404 pdata = ctypes.POINTER(mx_int64)() 2405 check_call(_LIB.MXNDArrayGetShapeEx64( 2406 self.handle, ctypes.byref(ndim), ctypes.byref(pdata))) 2407 else: 2408 pdata = ctypes.POINTER(mx_int)() 2409 check_call(_LIB.MXNDArrayGetShapeEx( 2410 self.handle, ctypes.byref(ndim), ctypes.byref(pdata))) 2411 if ndim.value == -1: 2412 return None 2413 else: 2414 return tuple(pdata[:ndim.value]) # pylint: disable=invalid-slice-index 2415 2416 2417 @property 2418 def size(self): 2419 """Number of elements in the array. 2420 2421 Equivalent to the product of the array's dimensions. 2422 2423 Examples 2424 -------- 2425 >>> import numpy as np 2426 >>> x = mx.nd.zeros((3, 5, 2)) 2427 >>> x.size 2428 30 2429 >>> np.prod(x.shape) 2430 30 2431 """ 2432 size = 1 2433 for i in self.shape: 2434 size *= i 2435 return size 2436 2437 @property 2438 def context(self): 2439 """Device context of the array. 2440 2441 Examples 2442 -------- 2443 >>> x = mx.nd.array([1, 2, 3, 4]) 2444 >>> x.context 2445 cpu(0) 2446 >>> type(x.context) 2447 <class 'mxnet.context.Context'> 2448 >>> y = mx.nd.zeros((2,3), mx.gpu(0)) 2449 >>> y.context 2450 gpu(0) 2451 """ 2452 dev_typeid = ctypes.c_int() 2453 dev_id = ctypes.c_int() 2454 check_call(_LIB.MXNDArrayGetContext( 2455 self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id))) 2456 return Context(Context.devtype2str[dev_typeid.value], dev_id.value) 2457 2458 @property 2459 def ctx(self): 2460 """Device context of the array. Has the same meaning as context. 2461 2462 Examples 2463 -------- 2464 >>> x = mx.nd.array([1, 2, 3, 4]) 2465 >>> x.ctx 2466 cpu(0) 2467 >>> type(x.ctx) 2468 <class 'mxnet.context.Context'> 2469 >>> y = mx.nd.zeros((2,3), mx.gpu(0)) 2470 >>> y.ctx 2471 gpu(0) 2472 """ 2473 return self.context 2474 2475 @property 2476 def dtype(self): 2477 """Data-type of the array's elements. 2478 2479 Returns 2480 ------- 2481 numpy.dtype 2482 This NDArray's data type. 2483 2484 Examples 2485 -------- 2486 >>> x = mx.nd.zeros((2,3)) 2487 >>> x.dtype 2488 <type 'numpy.float32'> 2489 >>> y = mx.nd.zeros((2,3), dtype='int32') 2490 >>> y.dtype 2491 <type 'numpy.int32'> 2492 """ 2493 mx_dtype = ctypes.c_int() 2494 check_call(_LIB.MXNDArrayGetDType( 2495 self.handle, ctypes.byref(mx_dtype))) 2496 return _DTYPE_MX_TO_NP[mx_dtype.value] 2497 2498 @property 2499 def stype(self): 2500 """Storage-type of the array. 2501 """ 2502 return _STORAGE_TYPE_ID_TO_STR[_storage_type(self.handle)] 2503 2504 @property 2505 # pylint: disable= invalid-name, undefined-variable 2506 def T(self): 2507 """Returns a copy of the array with axes transposed. 2508 2509 Equivalent to ``mx.nd.transpose(self)`` except that 2510 self is returned if ``self.ndim < 2``. 2511 2512 Unlike ``numpy.ndarray.T``, this function returns a copy 2513 rather than a view of the array unless ``self.ndim < 2``. 2514 2515 Examples 2516 -------- 2517 >>> x = mx.nd.arange(0,6).reshape((2,3)) 2518 >>> x.asnumpy() 2519 array([[ 0., 1., 2.], 2520 [ 3., 4., 5.]], dtype=float32) 2521 >>> x.T.asnumpy() 2522 array([[ 0., 3.], 2523 [ 1., 4.], 2524 [ 2., 5.]], dtype=float32) 2525 2526 """ 2527 if len(self.shape) < 2: 2528 return self 2529 return op.transpose(self) 2530 # pylint: enable= invalid-name, undefined-variable 2531 2532 @property 2533 def _fresh_grad(self): 2534 """Whether this array's corresponding gradient array 2535 (registered via `autograd.mark_variables`) has been 2536 updated by `autograd.backward` since last reset. 2537 2538 `_fresh_grad` need to be manually set to False 2539 after consuming gradient (usually after updating this 2540 array). 2541 """ 2542 out = ctypes.c_int() 2543 check_call(_LIB.MXNDArrayGetGradState(self.handle, ctypes.byref(out))) 2544 return out.value 2545 2546 @_fresh_grad.setter 2547 def _fresh_grad(self, state): 2548 check_call(_LIB.MXNDArraySetGradState(self.handle, ctypes.c_int(state))) 2549 2550 def asnumpy(self): 2551 """Returns a ``numpy.ndarray`` object with value copied from this array. 2552 2553 Examples 2554 -------- 2555 >>> x = mx.nd.ones((2,3)) 2556 >>> y = x.asnumpy() 2557 >>> type(y) 2558 <type 'numpy.ndarray'> 2559 >>> y 2560 array([[ 1., 1., 1.], 2561 [ 1., 1., 1.]], dtype=float32) 2562 >>> z = mx.nd.ones((2,3), dtype='int32') 2563 >>> z.asnumpy() 2564 array([[1, 1, 1], 2565 [1, 1, 1]], dtype=int32) 2566 """ 2567 data = np.empty(self.shape, dtype=self.dtype) 2568 check_call(_LIB.MXNDArraySyncCopyToCPU( 2569 self.handle, 2570 data.ctypes.data_as(ctypes.c_void_p), 2571 ctypes.c_size_t(data.size))) 2572 return data 2573 2574 def asscalar(self): 2575 """Returns a scalar whose value is copied from this array. 2576 2577 This function is equivalent to ``self.asnumpy()[0]``. This NDArray must have shape (1,). 2578 2579 Examples 2580 -------- 2581 >>> x = mx.nd.ones((1,), dtype='int32') 2582 >>> x.asscalar() 2583 1 2584 >>> type(x.asscalar()) 2585 <type 'numpy.int32'> 2586 """ 2587 if self.size != 1: 2588 raise ValueError("The current array is not a scalar") 2589 if self.ndim == 1: 2590 return self.asnumpy()[0] 2591 else: 2592 return self.asnumpy()[()] 2593 2594 def astype(self, dtype, copy=True): 2595 """Returns a copy of the array after casting to a specified type. 2596 2597 Parameters 2598 ---------- 2599 dtype : numpy.dtype or str 2600 The type of the returned array. 2601 copy : bool 2602 Default `True`. By default, astype always returns a newly 2603 allocated ndarray on the same context. If this is set to 2604 `False`, and the dtype requested is the same as the ndarray's 2605 dtype, the ndarray is returned instead of a copy. 2606 2607 Returns 2608 ------- 2609 NDArray, CSRNDArray or RowSparseNDArray 2610 The copied array after casting to the specified type, or 2611 the same array if copy=False and dtype is the same as the input 2612 array. 2613 2614 Examples 2615 -------- 2616 >>> x = mx.nd.zeros((2,3), dtype='float32') 2617 >>> y = x.astype('int32') 2618 >>> y.dtype 2619 <type 'numpy.int32'> 2620 """ 2621 2622 if not copy and np.dtype(dtype) == self.dtype: 2623 return self 2624 2625 res = empty(self.shape, ctx=self.ctx, dtype=dtype) 2626 self.copyto(res) 2627 return res 2628 2629 def copyto(self, other): 2630 """Copies the value of this array to another array. 2631 2632 If ``other`` is a ``NDArray`` object, then ``other.shape`` and 2633 ``self.shape`` should be the same. This function copies the value from 2634 ``self`` to ``other``. 2635 2636 If ``other`` is a context, a new ``NDArray`` will be first created on 2637 the target context, and the value of ``self`` is copied. 2638 2639 Parameters 2640 ---------- 2641 other : NDArray or Context 2642 The destination array or context. 2643 2644 Returns 2645 ------- 2646 NDArray, CSRNDArray or RowSparseNDArray 2647 The copied array. If ``other`` is an ``NDArray``, then the return value 2648 and ``other`` will point to the same ``NDArray``. 2649 2650 Examples 2651 -------- 2652 >>> x = mx.nd.ones((2,3)) 2653 >>> y = mx.nd.zeros((2,3), mx.gpu(0)) 2654 >>> z = x.copyto(y) 2655 >>> z is y 2656 True 2657 >>> y.asnumpy() 2658 array([[ 1., 1., 1.], 2659 [ 1., 1., 1.]], dtype=float32) 2660 >>> y.copyto(mx.gpu(0)) 2661 <NDArray 2x3 @gpu(0)> 2662 2663 """ 2664 if isinstance(other, NDArray): 2665 if other.handle is self.handle: 2666 warnings.warn('You are attempting to copy an array to itself', RuntimeWarning) 2667 return False 2668 return _internal._copyto(self, out=other) 2669 elif isinstance(other, Context): 2670 hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype)) 2671 return _internal._copyto(self, out=hret) 2672 else: 2673 raise TypeError('copyto does not support type ' + str(type(other))) 2674 2675 def copy(self): 2676 """Makes a copy of this ``NDArray``, keeping the same context. 2677 2678 Returns 2679 ------- 2680 NDArray, CSRNDArray or RowSparseNDArray 2681 The copied array 2682 2683 Examples 2684 -------- 2685 >>> x = mx.nd.ones((2,3)) 2686 >>> y = x.copy() 2687 >>> y.asnumpy() 2688 array([[ 1., 1., 1.], 2689 [ 1., 1., 1.]], dtype=float32) 2690 """ 2691 return self.copyto(self.ctx) 2692 2693 def slice_assign_scalar(self, value, begin, end, step): 2694 """ 2695 Assign the scalar to a cropped subset of this NDArray. Value will broadcast to the shape of the cropped shape 2696 and will be cast to the same dtype of the NDArray. 2697 2698 Parameters 2699 ---------- 2700 value: numeric value 2701 Value and this NDArray should be of the same data type. 2702 The shape of rhs should be the same as the cropped shape of this NDArray. 2703 begin: tuple of begin indices 2704 end: tuple of end indices 2705 step: tuple of step lenghths 2706 2707 Returns 2708 ------- 2709 This NDArray. 2710 2711 Examples 2712 -------- 2713 >>> from mxnet import nd 2714 >>> x = nd.ones((2, 2, 2)) 2715 >>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None)) 2716 >>> y 2717 [[[0. 0.] 2718 [1. 1.]] 2719 2720 [[1. 1.] 2721 [1. 1.]]] 2722 <NDArray 2x2x2 @cpu(0)> 2723 >>> x 2724 [[[0. 0.] 2725 [1. 1.]] 2726 2727 [[1. 1.] 2728 [1. 1.]]] 2729 <NDArray 2x2x2 @cpu(0)> 2730 2731 """ 2732 return _internal._slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self) 2733 2734 def slice_assign(self, rhs, begin, end, step): 2735 """ 2736 Assign the rhs to a cropped subset of this NDarray in place. 2737 Returns the view of this NDArray. 2738 2739 Parameters 2740 ---------- 2741 rhs: NDArray. 2742 rhs and this NDArray should be of the same data type, and on the same device. 2743 The shape of rhs should be the same as the cropped shape of this NDArray. 2744 begin: tuple of begin indices 2745 end: tuple of end indices 2746 step: tuple of step lenghths 2747 2748 Returns 2749 ------- 2750 This NDArray. 2751 2752 Examples 2753 -------- 2754 >>> x = nd.ones((2, 2, 2)) 2755 >>> assigned = nd.zeros((1, 1, 2)) 2756 >>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None)) 2757 >>> y 2758 [[[0. 0.] 2759 [1. 1.]] 2760 2761 [[1. 1.] 2762 [1. 1.]]] 2763 <NDArray 2x2x2 @cpu(0)> 2764 >>> x 2765 [[[0. 0.] 2766 [1. 1.]] 2767 2768 [[1. 1.] 2769 [1. 1.]]] 2770 <NDArray 2x2x2 @cpu(0)> 2771 """ 2772 return _internal._slice_assign(self, rhs, begin=begin, end=end, step=step, out=self) 2773 2774 2775 def as_in_context(self, context): 2776 """Returns an array on the target device with the same value as this array. 2777 2778 If the target context is the same as ``self.context``, then ``self`` is 2779 returned. Otherwise, a copy is made. 2780 2781 Parameters 2782 ---------- 2783 context : Context 2784 The target context. 2785 2786 Returns 2787 ------- 2788 NDArray, CSRNDArray or RowSparseNDArray 2789 The target array. 2790 2791 2792 Examples 2793 -------- 2794 >>> x = mx.nd.ones((2,3)) 2795 >>> y = x.as_in_context(mx.cpu()) 2796 >>> y is x 2797 True 2798 >>> z = x.as_in_context(mx.gpu(0)) 2799 >>> z is x 2800 False 2801 """ 2802 if self.context == context: 2803 return self 2804 return self.copyto(context) 2805 2806 def attach_grad(self, grad_req='write', stype=None): 2807 """Attach a gradient buffer to this NDArray, so that `backward` 2808 can compute gradient with respect to it. 2809 2810 The gradient is initialized to zeros. 2811 2812 Parameters 2813 ---------- 2814 grad_req : {'write', 'add', 'null'} 2815 How gradient will be accumulated. 2816 - 'write': gradient will be overwritten on every backward. 2817 - 'add': gradient will be added to existing value on every backward. 2818 - 'null': do not compute gradient for this NDArray. 2819 stype : str, optional 2820 The storage type of the gradient array. Defaults to the same stype of this NDArray. 2821 """ 2822 from . import zeros as _zeros 2823 if stype is not None: 2824 grad = _zeros(self.shape, stype=stype) 2825 else: 2826 grad = op.zeros_like(self) # pylint: disable=undefined-variable 2827 grad_req = _GRAD_REQ_MAP[grad_req] 2828 check_call(_LIB.MXAutogradMarkVariables( 2829 1, ctypes.pointer(self.handle), 2830 ctypes.pointer(mx_uint(grad_req)), 2831 ctypes.pointer(grad.handle))) 2832 2833 @property 2834 def grad(self): 2835 """Returns gradient buffer attached to this NDArray.""" 2836 from . import _ndarray_cls 2837 hdl = NDArrayHandle() 2838 check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl))) 2839 if hdl.value is None: 2840 return None 2841 return _ndarray_cls(hdl) 2842 2843 def detach(self): 2844 """Returns a new NDArray, detached from the current graph.""" 2845 from . import _ndarray_cls 2846 hdl = NDArrayHandle() 2847 check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl))) 2848 return _ndarray_cls(hdl) 2849 2850 def backward(self, out_grad=None, retain_graph=False, train_mode=True): 2851 """Compute the gradients of this NDArray w.r.t variables. 2852 2853 Parameters 2854 ---------- 2855 out_grad : NDArray, optional 2856 Gradient with respect to head. 2857 retain_graph : bool, optional 2858 Whether to retain the computaion graph for another backward 2859 pass on the same graph. By default the computaion history 2860 is cleared. 2861 train_mode : bool, optional 2862 Whether to compute gradient for training or inference. 2863 """ 2864 if out_grad is None: 2865 ograd_handles = [NDArrayHandle(0)] 2866 else: 2867 ograd_handles = [out_grad.handle] 2868 2869 check_call(_LIB.MXAutogradBackwardEx( 2870 1, c_handle_array([self]), 2871 c_array(NDArrayHandle, ograd_handles), 2872 0, 2873 ctypes.c_void_p(0), 2874 ctypes.c_int(retain_graph), 2875 ctypes.c_int(0), 2876 ctypes.c_int(train_mode), 2877 ctypes.c_void_p(0), 2878 ctypes.c_void_p(0))) 2879 2880 def tostype(self, stype): 2881 """Return a copy of the array with chosen storage type. 2882 2883 See Also 2884 ---------- 2885 :meth:`mxnet.ndarray.cast_storage`. 2886 2887 Returns 2888 ------- 2889 NDArray, CSRNDArray or RowSparseNDArray 2890 A copy of the array with the chosen storage stype 2891 """ 2892 if stype == 'csr' and len(self.shape) != 2: 2893 raise ValueError("To convert to a CSR, the NDArray should be 2 Dimensional. Current " 2894 "shape is %s" % str(self.shape)) 2895 2896 return op.cast_storage(self, stype=stype) 2897 2898 def to_dlpack_for_read(self): 2899 """Returns a reference view of NDArray that represents as DLManagedTensor until 2900 all previous write operations on the current array are finished. 2901 2902 Returns 2903 ------- 2904 PyCapsule (the pointer of DLManagedTensor) 2905 a reference view of NDArray that represents as DLManagedTensor. 2906 2907 Examples 2908 -------- 2909 >>> x = mx.nd.ones((2,3)) 2910 >>> y = mx.nd.to_dlpack_for_read(x) 2911 >>> type(y) 2912 <class 'PyCapsule'> 2913 >>> z = mx.nd.from_dlpack(y) 2914 >>> z 2915 [[1. 1. 1.] 2916 [1. 1. 1.]] 2917 <NDArray 2x3 @cpu(0)> 2918 """ 2919 return to_dlpack_for_read(self) 2920 2921 def to_dlpack_for_write(self): 2922 """Returns a reference view of NDArray that represents as DLManagedTensor until 2923 all previous read/write operations on the current array are finished. 2924 2925 Returns 2926 ------- 2927 PyCapsule (the pointer of DLManagedTensor) 2928 a reference view of NDArray that represents as DLManagedTensor. 2929 2930 Examples 2931 -------- 2932 >>> x = mx.nd.ones((2,3)) 2933 >>> w = mx.nd.to_dlpack_for_write(x) 2934 >>> type(w) 2935 <class 'PyCapsule'> 2936 >>> u = mx.nd.from_dlpack(w) 2937 >>> u += 1 2938 >>> x 2939 [[2. 2. 2.] 2940 [2. 2. 2.]] 2941 <NDArray 2x3 @cpu(0)> 2942 """ 2943 return to_dlpack_for_write(self) 2944 2945 def _full(self, value): 2946 """ 2947 This is added as an NDArray class method in order to support polymorphism in NDArray and numpy.ndarray indexing 2948 """ 2949 return _internal._full(self.shape, value=value, ctx=self.ctx, dtype=self.dtype, out=self) 2950 2951 def _scatter_set_nd(self, value_nd, indices): 2952 """ 2953 This is added as an NDArray class method in order to support polymorphism in NDArray and numpy.ndarray indexing 2954 """ 2955 return _internal._scatter_set_nd( 2956 lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self 2957 ) 2958 2959def check_boolean_array_dimension(array_shape, axis, bool_shape): 2960 """ 2961 Advanced boolean indexing is implemented through the use of `nonzero`. 2962 Size check is necessary to make sure that the boolean array 2963 has exactly as many dimensions as it is supposed to work with before the conversion 2964 """ 2965 for i, val in enumerate(bool_shape): 2966 if array_shape[axis + i] != val: 2967 raise IndexError('boolean index did not match indexed array along axis {};' 2968 ' size is {} but corresponding boolean size is {}' 2969 .format(axis + i, array_shape[axis + i], val)) 2970 2971def indexing_key_expand_implicit_axes(key, shape): 2972 """ 2973 Make implicit axes explicit by adding ``slice(None)`` 2974 and convert boolean array to integer array through `nonzero`. 2975 2976 Examples 2977 -------- 2978 >>> shape = (3, 4, 5) 2979 >>> indexing_key_expand_implicit_axes(np.s_[2, 1, 1], shape) 2980 (2, 1, 1) 2981 >>> indexing_key_expand_implicit_axes(np.s_[0], shape) 2982 (0, slice(None, None, None), slice(None, None, None)) 2983 >>> indexing_key_expand_implicit_axes(np.s_[0, ...], shape) # equivalent 2984 (0, slice(None, None, None), slice(None, None, None)) 2985 >>> indexing_key_expand_implicit_axes(np.s_[:2, None, 0, ...], shape) 2986 (slice(None, 2, None), None, 0, slice(None, None, None)) 2987 >>> bool_array = np.array([[True, False, True, False], 2988 [False, True, False, True], 2989 [True, False, True, False]], dtype=np.bool) 2990 >>> indexing_key_expand_implicit_axes(np.s_[bool_array, None, 0:2], shape) 2991 (array([0, 0, 1, 1, 2, 2], dtype=int64), array([0, 2, 1, 3, 0, 2], dtype=int64), None, slice(None, 2, None)) 2992 """ 2993 if not isinstance(key, tuple): 2994 key = (key,) 2995 # We need to loop explicitly since tuple functions like `index()` or 2996 # `count()` use `==` internally, which doesn't play well with fancy 2997 # indexing. 2998 ell_idx = None 2999 num_none = 0 3000 nonell_key = [] 3001 3002 # For 0-d boolean indices: A new axis is added, 3003 # but at the same time no axis is "used". So if we have True, 3004 # we add a new axis (a bit like with np.newaxis). If it is 3005 # False, we add a new axis, but this axis has 0 entries. 3006 # prepend is defined to handle this case. 3007 # prepend = _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar 3008 # prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded 3009 # prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be expanded 3010 prepend = _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY 3011 axis = 0 3012 for i, idx in enumerate(key): 3013 if idx is Ellipsis: 3014 if ell_idx is not None: 3015 raise IndexError( 3016 'Cannot use more than one ellipsis (`...`) for indexing' 3017 ) 3018 ell_idx = i 3019 else: 3020 # convert primitive type boolean value to mx.np.bool type 3021 # otherwise will be treated as 1/0 3022 if isinstance(idx, bool): 3023 idx = array(idx, dtype=np.bool_) 3024 if idx is None: 3025 num_none += 1 3026 if isinstance(idx, NDArrayBase) and idx.ndim == 0 and idx.dtype == np.bool_: 3027 if not idx: # array(False) has priority 3028 prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE 3029 else: 3030 prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE 3031 elif isinstance(idx, NDArrayBase) and idx.ndim == 0 and idx.dtype != np.bool_: 3032 # This handles ndarray of zero dim. e.g array(1) 3033 # while advoid converting zero dim boolean array 3034 # float type will be converted to int 3035 nonell_key.append(int(idx.item())) 3036 axis += 1 3037 elif isinstance(idx, NDArrayBase) and idx.dtype == np.bool_: 3038 # Necessary size check before using `nonzero` 3039 check_boolean_array_dimension(shape, axis, idx.shape) 3040 # If the whole array is false and npx.set_np() is not set_up 3041 # the program will throw infer shape error 3042 if not is_np_array(): 3043 raise ValueError('Cannot perform boolean indexing in legacy mode. Please activate' 3044 ' numpy semantics by calling `npx.set_np()` in the global scope' 3045 ' before calling this function.') 3046 # Add the arrays from the nonzero result to the index 3047 nonell_key.extend(idx.nonzero()) 3048 axis += idx.ndim 3049 else: 3050 nonell_key.append(idx) 3051 axis += 1 3052 3053 nonell_key = tuple(nonell_key) 3054 3055 if ell_idx is None: 3056 # This handles the case of "too few" indices, e.g., `nd.zeros((2, 3))[0]`, 3057 # where the ellipsis is implicitly after the last entry. 3058 ell_idx = len(nonell_key) 3059 3060 ell_ndim = len(shape) + num_none - len(nonell_key) 3061 expanded_key = (nonell_key[:ell_idx] + 3062 (slice(None),) * ell_ndim + 3063 nonell_key[ell_idx:]) 3064 3065 return expanded_key, prepend 3066 3067 3068def _int_to_slice(idx): 3069 """Return a slice that indexes the same entries as a single int.""" 3070 if idx == -1: 3071 # Avoid slice(-1, 0) 3072 return slice(-1, None) 3073 else: 3074 return slice(idx, idx + 1) 3075 3076 3077def _shape_for_bcast(shape, target_ndim, new_axes): 3078 """Return shape with added axes for broadcasting in ``target_ndim`` dimensions. 3079 3080 If ``shape`` is shorter than ``target_ndim``, fixed ``1`` entries are inserted 3081 into the returned shape, in locations indexed by ``new_axes``. The rest is 3082 filled from the back with ``shape`` while possible. 3083 """ 3084 new_shape = [None] * target_ndim 3085 if len(shape) < target_ndim: 3086 for new_ax in new_axes: 3087 new_shape[new_ax] = 1 3088 3089 # Replace `None` from the right with `shape` entries from the right as 3090 # long as possible, thereafter with 1. 3091 ax_s = 1 3092 for ax in range(1, target_ndim + 1): 3093 if new_shape[-ax] is None: 3094 try: 3095 new_shape[-ax] = shape[-ax_s] 3096 ax_s += 1 3097 except IndexError: 3098 new_shape[-ax] = 1 3099 3100 return tuple(new_shape) 3101 3102 3103def _is_advanced_index(idx): 3104 """Return whether ``idx`` is an advanced index (array-like or integer). 3105 3106 Note that in contrast to basic indexing, integers are considered advanced 3107 indices in the context of advanced indexing as they participate in 3108 broadcasting. 3109 """ 3110 if isinstance(idx, (NDArray, np.ndarray, integer_types, list, tuple)): 3111 return True 3112 elif isinstance(idx, py_slice) or idx is None: 3113 return False 3114 elif isinstance(idx, range): 3115 return True 3116 else: 3117 raise RuntimeError('illegal index type {}'.format(type(idx))) 3118 3119 3120def get_indexing_dispatch_code(key): 3121 """Returns a dispatch code for calling basic or advanced indexing functions.""" 3122 assert isinstance(key, tuple) 3123 3124 for idx in key: 3125 if isinstance(idx, (NDArray, np.ndarray, list, tuple, range)): 3126 if isinstance(idx, tuple) and len(idx) == 0: 3127 return _NDARRAY_EMPTY_TUPLE_INDEXING 3128 return _NDARRAY_ADVANCED_INDEXING 3129 elif not (isinstance(idx, (py_slice, integer_types)) or idx is None): 3130 raise ValueError( 3131 'NDArray does not support slicing with key {} of type {}.' 3132 ''.format(idx, type(idx)) 3133 ) 3134 return _NDARRAY_BASIC_INDEXING 3135 3136 3137def _get_index_range(start, stop, length, step=1): 3138 """Given start, stop, step and array length, return 3139 absolute values of start, stop, and step for generating index range. 3140 The returned values have been compensated by adding length if they 3141 are less than zero for all the cases but slice(None, None, -1). 3142 Note that the returned value of stop is not necessarily >= 0, since 3143 absolute stop is -1 in the case of slice(None, None, -1).""" 3144 if step == 0: 3145 raise ValueError('step size cannot be zero') 3146 if length < 0: 3147 raise ValueError('array length cannot be less than zero') 3148 if step is None: 3149 step = 1 3150 if start is None: 3151 if step > 0: 3152 start = 0 3153 else: 3154 start = length - 1 3155 elif start < 0: 3156 start += length 3157 if start < 0: 3158 start = 0 3159 elif start >= length: 3160 start = length 3161 3162 if stop is None: 3163 if step > 0: 3164 stop = length 3165 else: 3166 # this supports case such as ::-1 3167 # stop = -1 here refers to the element before index 0, 3168 # instead of the last element in the array 3169 stop = -1 3170 elif stop < 0: 3171 stop += length 3172 if stop < 0: 3173 stop = 0 3174 elif stop > length: 3175 stop = length 3176 3177 return start, stop, step 3178 3179 3180def get_oshape_of_gather_nd_op(dshape, ishape): 3181 """Given data and index shapes, get the output `NDArray` shape. 3182 This basically implements the infer shape logic of op gather_nd.""" 3183 assert len(dshape) > 0 and len(ishape) > 0 3184 oshape = list(ishape[1:]) 3185 if ishape[0] < len(dshape): 3186 oshape.extend(dshape[ishape[0]:]) 3187 return tuple(oshape) 3188 3189 3190def _get_dim_size(start, stop, step): 3191 """Given start, stop, and step, calculate the number of elements 3192 of this slice. 3193 """ 3194 assert step != 0 3195 if stop == start: 3196 return 0 3197 if step > 0: 3198 assert start < stop 3199 dim_size = (stop - start - 1) // step + 1 3200 else: 3201 assert stop < start 3202 dim_size = (start - stop - 1) // (-step) + 1 3203 return dim_size 3204 3205 3206def _get_slice_len(slc, seq_length): 3207 """Given a python slice object and the length of the sequence, calculate the number of elements 3208 in the slice. 3209 3210 Parameters 3211 ---------- 3212 slc : py_slice 3213 The slice object 3214 seq_length : int 3215 The length of the object you are going to apply the slice on 3216 3217 Returns 3218 ------- 3219 ret : int 3220 Total number of elements in the slice 3221 """ 3222 start, stop, step = slc.indices(seq_length) 3223 return max(0, (stop - start + (step - (1 if step > 0 else -1))) // step) 3224 3225 3226def _get_broadcast_shape(shape1, shape2): 3227 """Given two shapes that are not identical, find the shape 3228 that both input shapes can broadcast to.""" 3229 if shape1 == shape2: 3230 return shape1 3231 3232 length1 = len(shape1) 3233 length2 = len(shape2) 3234 if length1 > length2: 3235 shape = list(shape1) 3236 else: 3237 shape = list(shape2) 3238 i = max(length1, length2) - 1 3239 for a, b in zip(shape1[::-1], shape2[::-1]): 3240 if a != 1 and b != 1 and a != b: 3241 raise ValueError('shape1=%s is not broadcastable to shape2=%s' % (shape1, shape2)) 3242 shape[i] = b if a == 1 else a 3243 i -= 1 3244 return tuple(shape) 3245 3246 3247def _broadcast_shapes(seq): 3248 """Return the broadcast shape of all advanced indices in ``seq``. 3249 3250 All entries are assumed to have a ``shape`` property. 3251 """ 3252 return reduce(_get_broadcast_shape, [x.shape for x in seq], ()) 3253 3254 3255def onehot_encode(indices, out): 3256 """One-hot encoding indices into matrix out. 3257 3258 .. note:: `onehot_encode` is deprecated. Use `one_hot` instead. 3259 3260 """ 3261 # pylint: disable= no-member, protected-access 3262 return _internal._onehot_encode(indices, out, out=out) 3263 # pylint: enable= no-member, protected-access 3264 3265 3266def ones(shape, ctx=None, dtype=None, **kwargs): 3267 """Returns a new array filled with all ones, with the given shape and type. 3268 3269 Parameters 3270 ---------- 3271 shape : int or tuple of int or list of int 3272 The shape of the empty array. 3273 ctx : Context, optional 3274 An optional device context. 3275 Defaults to the current default context (``mxnet.context.current_context()``). 3276 dtype : str or numpy.dtype, optional 3277 An optional value type (default is `float32`). 3278 out : NDArray, optional 3279 The output NDArray (default is `None`). 3280 3281 Returns 3282 ------- 3283 NDArray 3284 A new array of the specified shape filled with all ones. 3285 3286 Examples 3287 -------- 3288 >>> mx.nd.ones(1).asnumpy() 3289 array([ 1.], dtype=float32) 3290 >>> mx.nd.ones((1,2), mx.gpu(0)) 3291 <NDArray 1x2 @gpu(0)> 3292 >>> mx.nd.ones((1,2), dtype='float16').asnumpy() 3293 array([[ 1., 1.]], dtype=float16) 3294 """ 3295 # pylint: disable= unused-argument 3296 if ctx is None: 3297 ctx = current_context() 3298 dtype = mx_real_t if dtype is None else dtype 3299 # pylint: disable= no-member, protected-access 3300 return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs) 3301 # pylint: enable= no-member, protected-access 3302 3303 3304def full(shape, val, ctx=None, dtype=mx_real_t, out=None): 3305 """Returns a new array of given shape and type, filled with the given value `val`. 3306 3307 Parameters 3308 -------- 3309 shape : int or tuple of int 3310 The shape of the new array. 3311 val : scalar 3312 Fill value. 3313 ctx : Context, optional 3314 Device context (default is the current default context). 3315 dtype : `str` or `numpy.dtype`, optional 3316 The data type of the returned `NDArray`. The default datatype is `float32`. 3317 out : NDArray, optional 3318 The output NDArray (default is `None`). 3319 3320 Returns 3321 ------- 3322 NDArray 3323 `NDArray` filled with `val`, with the given shape, ctx, and dtype. 3324 3325 Examples 3326 -------- 3327 >>> mx.nd.full(1, 2.0).asnumpy() 3328 array([ 2.], dtype=float32) 3329 >>> mx.nd.full((1, 2), 2.0, mx.gpu(0)) 3330 <NDArray 1x2 @gpu(0)> 3331 >>> mx.nd.full((1, 2), 2.0, dtype='float16').asnumpy() 3332 array([[ 2., 2.]], dtype=float16) 3333 """ 3334 out = empty(shape, ctx, dtype) if out is None else out 3335 out[:] = val 3336 return out 3337 3338 3339def array(source_array, ctx=None, dtype=None): 3340 """Creates an array from any object exposing the array interface. 3341 3342 Parameters 3343 ---------- 3344 source_array : array_like 3345 An object exposing the array interface, an object whose `__array__` 3346 method returns an array, or any (nested) sequence. 3347 ctx : Context, optional 3348 Device context (default is the current default context). 3349 dtype : str or numpy.dtype, optional 3350 The data type of the output array. The default dtype is ``source_array.dtype`` 3351 if `source_array` is an `NDArray`, `float32` otherwise. 3352 3353 Returns 3354 ------- 3355 NDArray 3356 An `NDArray` with the same contents as the `source_array`. 3357 """ 3358 if isinstance(source_array, NDArray): 3359 dtype = source_array.dtype if dtype is None else dtype 3360 else: 3361 dtype = mx_real_t if dtype is None else dtype 3362 if not isinstance(source_array, np.ndarray): 3363 try: 3364 source_array = np.array(source_array, dtype=dtype) 3365 except: 3366 raise TypeError('source_array must be array like object') 3367 3368 if source_array.shape == (): 3369 # In this case we can't assign, so we need to go through an auxiliary array 3370 arr = empty((1,), ctx, dtype) 3371 arr[:] = source_array 3372 return arr.reshape(()) 3373 elif source_array.size == 0: 3374 return empty(source_array.shape, ctx, dtype) 3375 else: 3376 arr = empty(source_array.shape, ctx, dtype) 3377 arr[:] = source_array 3378 return arr 3379 3380 3381def moveaxis(tensor, source, destination): 3382 """Moves the `source` axis into the `destination` position 3383 while leaving the other axes in their original order 3384 3385 Parameters 3386 ---------- 3387 tensor : mx.nd.array 3388 The array which axes should be reordered 3389 source : int or sequence of int 3390 Original position of the axes to move. Can be negative but must be unique. 3391 destination : int or sequence of int 3392 Destination position for each of the original axes. Can be negative but must be unique. 3393 3394 Returns 3395 ------- 3396 result : mx.nd.array 3397 Array with moved axes. 3398 3399 Examples 3400 -------- 3401 >>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]]) 3402 >>> mx.nd.moveaxis(X, 0, 1).shape 3403 (3L, 2L) 3404 3405 >>> X = mx.nd.zeros((3, 4, 5)) 3406 >>> mx.nd.moveaxis(X, [0, 1], [-1, -2]).shape 3407 (5, 4, 3) 3408 """ 3409 try: 3410 source = np.core.numeric.normalize_axis_tuple(source, tensor.ndim) 3411 except IndexError: 3412 raise ValueError('Source should verify 0 <= source < tensor.ndim' 3413 'Got %d' % source) 3414 try: 3415 destination = np.core.numeric.normalize_axis_tuple(destination, tensor.ndim) 3416 except IndexError: 3417 raise ValueError('Destination should verify 0 <= destination < tensor.ndim (%d).' 3418 % tensor.ndim, 'Got %d' % destination) 3419 3420 if len(source) != len(destination): 3421 raise ValueError('`source` and `destination` arguments must have ' 3422 'the same number of elements') 3423 3424 order = [n for n in range(tensor.ndim) if n not in source] 3425 3426 for dest, src in sorted(zip(destination, source)): 3427 order.insert(dest, src) 3428 3429 return op.transpose(tensor, order) 3430 3431 3432# pylint: disable= no-member, protected-access, too-many-arguments, redefined-outer-name 3433def arange(start, stop=None, step=1.0, repeat=1, infer_range=None, ctx=None, dtype=mx_real_t): 3434 """Returns evenly spaced values within a given interval. 3435 3436 Values are generated within the half-open interval [`start`, `stop`). In other 3437 words, the interval includes `start` but excludes `stop`. The function is 3438 similar to the built-in Python function `range` and to `numpy.arange`, 3439 but returns an `NDArray`. 3440 3441 Parameters 3442 ---------- 3443 start : number, optional 3444 Start of interval. The default start value is 0. 3445 stop : number 3446 End of interval. 3447 step : number, optional 3448 Spacing between values. The default step size is 1. 3449 repeat : int, optional 3450 Number of times to repeat each element. The default repeat count is 1. 3451 infer_range : boolean, optional 3452 Infer the stop position from the start, step, repeat, and output tensor size. 3453 Deprecated. Only False is supported. 3454 ctx : Context, optional 3455 Device context. Default context is the current default context. 3456 dtype : str or numpy.dtype, optional 3457 The data type of the `NDArray`. The default datatype is `np.float32`. 3458 3459 Returns 3460 ------- 3461 NDArray 3462 `NDArray` of evenly spaced values in the specified range. 3463 3464 Examples 3465 -------- 3466 >>> mx.nd.arange(3).asnumpy() 3467 array([ 0., 1., 2.], dtype=float32) 3468 >>> mx.nd.arange(2, 6).asnumpy() 3469 array([ 2., 3., 4., 5.], dtype=float32) 3470 >>> mx.nd.arange(2, 6, step=2).asnumpy() 3471 array([ 2., 4.], dtype=float32) 3472 >>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy() 3473 array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32) 3474 >>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy() 3475 array([2, 2, 2, 4, 4, 4], dtype=int32) 3476 """ 3477 if infer_range is not None: 3478 warnings.warn('`infer_range` argument has been deprecated', 3479 DeprecationWarning) 3480 if ctx is None: 3481 ctx = current_context() 3482 return _internal._arange(start=start, stop=stop, step=step, repeat=repeat, 3483 infer_range=False, dtype=dtype, ctx=str(ctx)) 3484# pylint: enable= no-member, protected-access, too-many-arguments 3485 3486 3487# pylint: disable= no-member, protected-access, too-many-arguments 3488def linspace(start, stop, num, endpoint=True, ctx=None, dtype=mx_real_t): 3489 """Return evenly spaced numbers within a specified interval. 3490 3491 Values are generated within the half-open interval [`start`, `stop`) or 3492 closed interval [start, stop] depending on whether `endpoint` is True or 3493 False. The function is similar to `numpy.linspace`, but returns an `NDArray`. 3494 3495 Parameters 3496 ---------- 3497 start : number 3498 Start of interval. 3499 stop : number 3500 End of interval, unless endpoint is set to False. In that case, 3501 the sequence consists of all but the last of `num + 1` evenly spaced 3502 samples, so that stop is excluded. Note that the step size changes 3503 when endpoint is False. 3504 num : number 3505 Number of samples to generate. Must be non-negative. 3506 endpoint : bool 3507 If True, stop is the last sample. Otherwise, it is not included. 3508 The default is True. 3509 ctx : Context, optional 3510 Device context. Default context is the current default context. 3511 dtype : str or numpy.dtype, optional 3512 The data type of the `NDArray`. The default datatype is `np.float32`. 3513 3514 Returns 3515 ------- 3516 NDArray 3517 `NDArray` of evenly spaced values in the specified range. 3518 3519 Examples 3520 -------- 3521 >>> mx.nd.linspace(2.0, 3.0, 5).asnumpy() 3522 array([ 2., 2.25., 2.5, 2.75, 3.], dtype=float32) 3523 >>> mx.nd.linspace(2.0, 3.0, 5, endpoint=False).asnumpy() 3524 array([ 2., 2.2., 2.4, 2.6, 2.8], dtype=float32) 3525 """ 3526 if ctx is None: 3527 ctx = current_context() 3528 return _internal._linspace(start=start, stop=stop, num=num, 3529 endpoint=endpoint, dtype=dtype, ctx=str(ctx)) 3530# pylint: disable= no-member, protected-access, too-many-arguments 3531 3532 3533#pylint: disable= too-many-arguments, no-member, protected-access 3534def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None): 3535 """ Helper function for element-wise operation. 3536 The function will perform numpy-like broadcasting if needed and call different functions. 3537 3538 Parameters 3539 -------- 3540 lhs : NDArray or numeric value 3541 Left-hand side operand. 3542 3543 rhs : NDArray or numeric value 3544 Right-hand operand, 3545 3546 fn_array : function 3547 Function to be called if both lhs and rhs are of ``NDArray`` type. 3548 3549 fn_scalar : function 3550 Function to be called if both lhs and rhs are numeric values. 3551 3552 lfn_scalar : function 3553 Function to be called if lhs is ``NDArray`` while rhs is numeric value 3554 3555 rfn_scalar : function 3556 Function to be called if lhs is numeric value while rhs is ``NDArray``; 3557 if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar 3558 3559 Returns 3560 -------- 3561 NDArray 3562 result array 3563 """ 3564 if isinstance(lhs, numeric_types): 3565 if isinstance(rhs, numeric_types): 3566 return fn_scalar(lhs, rhs) 3567 else: 3568 if rfn_scalar is None: 3569 # commutative function 3570 return lfn_scalar(rhs, float(lhs)) 3571 else: 3572 return rfn_scalar(rhs, float(lhs)) 3573 elif isinstance(rhs, numeric_types): 3574 return lfn_scalar(lhs, float(rhs)) 3575 elif isinstance(rhs, NDArray): 3576 return fn_array(lhs, rhs) 3577 else: 3578 raise TypeError('type %s not supported' % str(type(rhs))) 3579#pylint: enable= too-many-arguments, no-member, protected-access 3580 3581 3582def add(lhs, rhs): 3583 """Returns element-wise sum of the input arrays with broadcasting. 3584 3585 Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and 3586 ``mx.nd.broadcast_plus(lhs, rhs)``. 3587 3588 .. note:: 3589 3590 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 3591 then the arrays are broadcastable to a common shape 3592 3593 Parameters 3594 ---------- 3595 lhs : scalar or mxnet.ndarray.array 3596 First array to be added. 3597 rhs : scalar or mxnet.ndarray.array 3598 Second array to be added. 3599 If ``lhs.shape != rhs.shape``, they must be 3600 broadcastable to a common shape. 3601 3602 Returns 3603 ------- 3604 NDArray 3605 The element-wise sum of the input arrays. 3606 3607 Examples 3608 -------- 3609 >>> x = mx.nd.ones((2,3)) 3610 >>> y = mx.nd.arange(2).reshape((2,1)) 3611 >>> z = mx.nd.arange(2).reshape((1,2)) 3612 >>> x.asnumpy() 3613 array([[ 1., 1., 1.], 3614 [ 1., 1., 1.]], dtype=float32) 3615 >>> y.asnumpy() 3616 array([[ 0.], 3617 [ 1.]], dtype=float32) 3618 >>> z.asnumpy() 3619 array([[ 0., 1.]], dtype=float32) 3620 >>> (x+2).asnumpy() 3621 array([[ 3., 3., 3.], 3622 [ 3., 3., 3.]], dtype=float32) 3623 >>> (x+y).asnumpy() 3624 array([[ 1., 1., 1.], 3625 [ 2., 2., 2.]], dtype=float32) 3626 >>> mx.nd.add(x,y).asnumpy() 3627 array([[ 1., 1., 1.], 3628 [ 2., 2., 2.]], dtype=float32) 3629 >>> (z + y).asnumpy() 3630 array([[ 0., 1.], 3631 [ 1., 2.]], dtype=float32) 3632 """ 3633 # pylint: disable= no-member, protected-access 3634 return _ufunc_helper( 3635 lhs, 3636 rhs, 3637 op.broadcast_add, 3638 operator.add, 3639 _internal._plus_scalar, 3640 None) 3641 # pylint: enable= no-member, protected-access 3642 3643 3644def subtract(lhs, rhs): 3645 """Returns element-wise difference of the input arrays with broadcasting. 3646 3647 Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and 3648 ``mx.nd.broadcast_minus(lhs, rhs)``. 3649 3650 .. note:: 3651 3652 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 3653 then the arrays are broadcastable to a common shape. 3654 3655 Parameters 3656 ---------- 3657 lhs : scalar or mxnet.ndarray.array 3658 First array to be subtracted. 3659 rhs : scalar or mxnet.ndarray.array 3660 Second array to be subtracted. 3661 If ``lhs.shape != rhs.shape``, they must be 3662 broadcastable to a common shape. 3663 3664 Returns 3665 ------- 3666 NDArray 3667 The element-wise difference of the input arrays. 3668 3669 Examples 3670 -------- 3671 >>> x = mx.nd.ones((2,3)) 3672 >>> y = mx.nd.arange(2).reshape((2,1)) 3673 >>> z = mx.nd.arange(2).reshape((1,2)) 3674 >>> x.asnumpy() 3675 array([[ 1., 1., 1.], 3676 [ 1., 1., 1.]], dtype=float32) 3677 >>> y.asnumpy() 3678 array([[ 0.], 3679 [ 1.]], dtype=float32) 3680 >>> z.asnumpy() 3681 array([[ 0., 1.]], dtype=float32) 3682 >>> (x-2).asnumpy() 3683 array([[-1., -1., -1.], 3684 [-1., -1., -1.]], dtype=float32) 3685 >>> (x-y).asnumpy() 3686 array([[ 1., 1., 1.], 3687 [ 0., 0., 0.]], dtype=float32) 3688 >>> mx.nd.subtract(x,y).asnumpy() 3689 array([[ 1., 1., 1.], 3690 [ 0., 0., 0.]], dtype=float32) 3691 >>> (z-y).asnumpy() 3692 array([[ 0., 1.], 3693 [-1., 0.]], dtype=float32) 3694 """ 3695 # pylint: disable= no-member, protected-access 3696 return _ufunc_helper( 3697 lhs, 3698 rhs, 3699 op.broadcast_sub, 3700 operator.sub, 3701 _internal._minus_scalar, 3702 _internal._rminus_scalar) 3703 # pylint: enable= no-member, protected-access 3704 3705 3706def multiply(lhs, rhs): 3707 """Returns element-wise product of the input arrays with broadcasting. 3708 3709 Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``. 3710 3711 .. note:: 3712 3713 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 3714 then the arrays are broadcastable to a common shape. 3715 3716 Parameters 3717 ---------- 3718 lhs : scalar or mxnet.ndarray.array 3719 First array to be multiplied. 3720 rhs : scalar or mxnet.ndarray.array 3721 Second array to be multiplied. 3722 If ``lhs.shape != rhs.shape``, they must be 3723 broadcastable to a common shape. 3724 3725 Returns 3726 ------- 3727 NDArray 3728 The element-wise multiplication of the input arrays. 3729 3730 Examples 3731 -------- 3732 >>> x = mx.nd.ones((2,3)) 3733 >>> y = mx.nd.arange(2).reshape((2,1)) 3734 >>> z = mx.nd.arange(2).reshape((1,2)) 3735 >>> x.asnumpy() 3736 array([[ 1., 1., 1.], 3737 [ 1., 1., 1.]], dtype=float32) 3738 >>> y.asnumpy() 3739 array([[ 0.], 3740 [ 1.]], dtype=float32) 3741 >>> z.asnumpy() 3742 array([[ 0., 1.]], dtype=float32) 3743 >>> (x*2).asnumpy() 3744 array([[ 2., 2., 2.], 3745 [ 2., 2., 2.]], dtype=float32) 3746 >>> (x*y).asnumpy() 3747 array([[ 0., 0., 0.], 3748 [ 1., 1., 1.]], dtype=float32) 3749 >>> mx.nd.multiply(x, y).asnumpy() 3750 array([[ 0., 0., 0.], 3751 [ 1., 1., 1.]], dtype=float32) 3752 >>> (z*y).asnumpy() 3753 array([[ 0., 0.], 3754 [ 0., 1.]], dtype=float32) 3755 """ 3756 # pylint: disable= no-member, protected-access 3757 return _ufunc_helper( 3758 lhs, 3759 rhs, 3760 op.broadcast_mul, 3761 operator.mul, 3762 _internal._mul_scalar, 3763 None) 3764 # pylint: enable= no-member, protected-access 3765 3766 3767def divide(lhs, rhs): 3768 """Returns element-wise division of the input arrays with broadcasting. 3769 3770 Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``. 3771 3772 .. note:: 3773 3774 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 3775 then the arrays are broadcastable to a common shape. 3776 3777 Parameters 3778 ---------- 3779 lhs : scalar or mxnet.ndarray.array 3780 First array in division. 3781 rhs : scalar or mxnet.ndarray.array 3782 Second array in division. 3783 The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be 3784 broadcastable to a common shape. 3785 3786 Returns 3787 ------- 3788 NDArray 3789 The element-wise division of the input arrays. 3790 3791 Examples 3792 -------- 3793 >>> x = mx.nd.ones((2,3))*6 3794 >>> y = mx.nd.ones((2,1))*2 3795 >>> x.asnumpy() 3796 array([[ 6., 6., 6.], 3797 [ 6., 6., 6.]], dtype=float32) 3798 >>> y.asnumpy() 3799 array([[ 2.], 3800 [ 2.]], dtype=float32) 3801 >>> x/2 3802 <NDArray 2x3 @cpu(0)> 3803 >>> (x/3).asnumpy() 3804 array([[ 2., 2., 2.], 3805 [ 2., 2., 2.]], dtype=float32) 3806 >>> (x/y).asnumpy() 3807 array([[ 3., 3., 3.], 3808 [ 3., 3., 3.]], dtype=float32) 3809 >>> mx.nd.divide(x,y).asnumpy() 3810 array([[ 3., 3., 3.], 3811 [ 3., 3., 3.]], dtype=float32) 3812 """ 3813 # pylint: disable= no-member, protected-access 3814 return _ufunc_helper( 3815 lhs, 3816 rhs, 3817 op.broadcast_div, 3818 operator.truediv, 3819 _internal._div_scalar, 3820 _internal._rdiv_scalar) 3821 # pylint: enable= no-member, protected-access 3822 3823 3824def modulo(lhs, rhs): 3825 """Returns element-wise modulo of the input arrays with broadcasting. 3826 3827 Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``. 3828 3829 .. note:: 3830 3831 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 3832 then the arrays are broadcastable to a common shape. 3833 3834 Parameters 3835 ---------- 3836 lhs : scalar or mxnet.ndarray.array 3837 First array in modulo. 3838 rhs : scalar or mxnet.ndarray.array 3839 Second array in modulo. 3840 The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be 3841 broadcastable to a common shape. 3842 3843 Returns 3844 ------- 3845 NDArray 3846 The element-wise modulo of the input arrays. 3847 3848 Examples 3849 -------- 3850 >>> x = mx.nd.ones((2,3))*6 3851 >>> y = mx.nd.ones((2,1))*4 3852 >>> x.asnumpy() 3853 array([[ 6., 6., 6.], 3854 [ 6., 6., 6.]], dtype=float32) 3855 >>> y.asnumpy() 3856 array([[ 4.], 3857 [ 4.]], dtype=float32) 3858 >>> x%5 3859 <NDArray 2x3 @cpu(0)> 3860 >>> (x%5).asnumpy() 3861 array([[ 1., 1., 1.], 3862 [ 1., 1., 1.]], dtype=float32) 3863 >>> (x%y).asnumpy() 3864 array([[ 2., 2., 2.], 3865 [ 2., 2., 2.]], dtype=float32) 3866 >>> mx.nd.modulo(x,y).asnumpy() 3867 array([[ 2., 2., 2.], 3868 [ 2., 2., 2.]], dtype=float32) 3869 """ 3870 # pylint: disable= no-member, protected-access 3871 return _ufunc_helper( 3872 lhs, 3873 rhs, 3874 op.broadcast_mod, 3875 operator.mod, 3876 _internal._mod_scalar, 3877 _internal._rmod_scalar) 3878 # pylint: enable= no-member, protected-access 3879 3880 3881def power(base, exp): 3882 """Returns result of first array elements raised to powers from second array, element-wise 3883 with broadcasting. 3884 3885 Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``. 3886 3887 .. note:: 3888 3889 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 3890 then the arrays are broadcastable to a common shape. 3891 3892 Parameters 3893 ---------- 3894 base : scalar or NDArray 3895 The base array 3896 exp : scalar or NDArray 3897 The exponent array. If ``base.shape != exp.shape``, they must be 3898 broadcastable to a common shape. 3899 3900 Returns 3901 -------- 3902 NDArray 3903 The bases in x raised to the exponents in y. 3904 3905 Examples 3906 -------- 3907 >>> x = mx.nd.ones((2,3))*2 3908 >>> y = mx.nd.arange(1,3).reshape((2,1)) 3909 >>> z = mx.nd.arange(1,3).reshape((2,1)) 3910 >>> x.asnumpy() 3911 array([[ 2., 2., 2.], 3912 [ 2., 2., 2.]], dtype=float32) 3913 >>> y.asnumpy() 3914 array([[ 1.], 3915 [ 2.]], dtype=float32) 3916 >>> z.asnumpy() 3917 array([[ 1.], 3918 [ 2.]], dtype=float32) 3919 >>> (x**2).asnumpy() 3920 array([[ 4., 4., 4.], 3921 [ 4., 4., 4.]], dtype=float32) 3922 >>> (x**y).asnumpy() 3923 array([[ 2., 2., 2.], 3924 [ 4., 4., 4.]], dtype=float32) 3925 >>> mx.nd.power(x,y).asnumpy() 3926 array([[ 2., 2., 2.], 3927 [ 4., 4., 4.]], dtype=float32) 3928 >>> (z**y).asnumpy() 3929 array([[ 1.], 3930 [ 4.]], dtype=float32) 3931 """ 3932 # pylint: disable= no-member, protected-access 3933 return _ufunc_helper( 3934 base, 3935 exp, 3936 op.broadcast_power, 3937 operator.pow, 3938 _internal._power_scalar, 3939 _internal._rpower_scalar) 3940 # pylint: enable= no-member, protected-access 3941 3942 3943def maximum(lhs, rhs): 3944 """Returns element-wise maximum of the input arrays with broadcasting. 3945 3946 Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``. 3947 3948 .. note:: 3949 3950 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 3951 then the arrays are broadcastable to a common shape. 3952 3953 Parameters 3954 ---------- 3955 lhs : scalar or mxnet.ndarray.array 3956 First array to be compared. 3957 rhs : scalar or mxnet.ndarray.array 3958 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 3959 broadcastable to a common shape. 3960 3961 Returns 3962 ------- 3963 NDArray 3964 The element-wise maximum of the input arrays. 3965 3966 Examples 3967 -------- 3968 >>> x = mx.nd.ones((2,3)) 3969 >>> y = mx.nd.arange(2).reshape((2,1)) 3970 >>> z = mx.nd.arange(2).reshape((1,2)) 3971 >>> x.asnumpy() 3972 array([[ 1., 1., 1.], 3973 [ 1., 1., 1.]], dtype=float32) 3974 >>> y.asnumpy() 3975 array([[ 0.], 3976 [ 1.]], dtype=float32) 3977 >>> z.asnumpy() 3978 array([[ 0., 1.]], dtype=float32) 3979 >>> mx.nd.maximum(x, 2).asnumpy() 3980 array([[ 2., 2., 2.], 3981 [ 2., 2., 2.]], dtype=float32) 3982 >>> mx.nd.maximum(x, y).asnumpy() 3983 array([[ 1., 1., 1.], 3984 [ 1., 1., 1.]], dtype=float32) 3985 >>> mx.nd.maximum(y, z).asnumpy() 3986 array([[ 0., 1.], 3987 [ 1., 1.]], dtype=float32) 3988 """ 3989 # pylint: disable= no-member, protected-access 3990 return _ufunc_helper( 3991 lhs, 3992 rhs, 3993 op.broadcast_maximum, 3994 lambda x, y: x if x > y else y, 3995 _internal._maximum_scalar, 3996 None) 3997 # pylint: enable= no-member, protected-access 3998 3999 4000def minimum(lhs, rhs): 4001 """Returns element-wise minimum of the input arrays with broadcasting. 4002 4003 Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``. 4004 4005 .. note:: 4006 4007 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4008 then the arrays are broadcastable to a common shape. 4009 4010 Parameters 4011 ---------- 4012 lhs : scalar or mxnet.ndarray.array 4013 First array to be compared. 4014 rhs : scalar or mxnet.ndarray.array 4015 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 4016 broadcastable to a common shape. 4017 4018 Returns 4019 ------- 4020 NDArray 4021 The element-wise minimum of the input arrays. 4022 4023 Examples 4024 -------- 4025 >>> x = mx.nd.ones((2,3)) 4026 >>> y = mx.nd.arange(2).reshape((2,1)) 4027 >>> z = mx.nd.arange(2).reshape((1,2)) 4028 >>> x.asnumpy() 4029 array([[ 1., 1., 1.], 4030 [ 1., 1., 1.]], dtype=float32) 4031 >>> y.asnumpy() 4032 array([[ 0.], 4033 [ 1.]], dtype=float32) 4034 >>> z.asnumpy() 4035 array([[ 0., 1.]], dtype=float32) 4036 >>> mx.nd.minimum(x, 2).asnumpy() 4037 array([[ 1., 1., 1.], 4038 [ 1., 1., 1.]], dtype=float32) 4039 >>> mx.nd.minimum(x, y).asnumpy() 4040 array([[ 0., 0., 0.], 4041 [ 1., 1., 1.]], dtype=float32) 4042 >>> mx.nd.minimum(z, y).asnumpy() 4043 array([[ 0., 0.], 4044 [ 0., 1.]], dtype=float32) 4045 """ 4046 # pylint: disable= no-member, protected-access 4047 return _ufunc_helper( 4048 lhs, 4049 rhs, 4050 op.broadcast_minimum, 4051 lambda x, y: x if x < y else y, 4052 _internal._minimum_scalar, 4053 None) 4054 # pylint: enable= no-member, protected-access 4055 4056 4057def equal(lhs, rhs): 4058 """Returns the result of element-wise **equal to** (==) comparison operation with 4059 broadcasting. 4060 4061 For each element in input arrays, return 1(true) if corresponding elements are same, 4062 otherwise return 0(false). 4063 4064 Equivalent to ``lhs == rhs`` and ``mx.nd.broadcast_equal(lhs, rhs)``. 4065 4066 .. note:: 4067 4068 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4069 then the arrays are broadcastable to a common shape. 4070 4071 Parameters 4072 ---------- 4073 lhs : scalar or mxnet.ndarray.array 4074 First array to be compared. 4075 rhs : scalar or mxnet.ndarray.array 4076 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 4077 broadcastable to a common shape. 4078 4079 Returns 4080 ------- 4081 NDArray 4082 Output array of boolean values. 4083 4084 Examples 4085 -------- 4086 >>> x = mx.nd.ones((2,3)) 4087 >>> y = mx.nd.arange(2).reshape((2,1)) 4088 >>> z = mx.nd.arange(2).reshape((1,2)) 4089 >>> x.asnumpy() 4090 array([[ 1., 1., 1.], 4091 [ 1., 1., 1.]], dtype=float32) 4092 >>> y.asnumpy() 4093 array([[ 0.], 4094 [ 1.]], dtype=float32) 4095 >>> z.asnumpy() 4096 array([[ 0., 1.]], dtype=float32) 4097 >>> (x == 1).asnumpy() 4098 array([[ 1., 1., 1.], 4099 [ 1., 1., 1.]], dtype=float32) 4100 >>> (x == y).asnumpy() 4101 array([[ 0., 0., 0.], 4102 [ 1., 1., 1.]], dtype=float32) 4103 >>> mx.nd.equal(x,y).asnumpy() 4104 array([[ 0., 0., 0.], 4105 [ 1., 1., 1.]], dtype=float32) 4106 >>> (z == y).asnumpy() 4107 array([[ 1., 0.], 4108 [ 0., 1.]], dtype=float32) 4109 """ 4110 # pylint: disable= no-member, protected-access 4111 return _ufunc_helper( 4112 lhs, 4113 rhs, 4114 op.broadcast_equal, 4115 lambda x, y: 1 if x == y else 0, 4116 _internal._equal_scalar, 4117 None) 4118 # pylint: enable= no-member, protected-access 4119 4120 4121def not_equal(lhs, rhs): 4122 """Returns the result of element-wise **not equal to** (!=) comparison operation 4123 with broadcasting. 4124 4125 For each element in input arrays, return 1(true) if corresponding elements are different, 4126 otherwise return 0(false). 4127 4128 Equivalent to ``lhs != rhs`` and ``mx.nd.broadcast_not_equal(lhs, rhs)``. 4129 4130 .. note:: 4131 4132 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4133 then the arrays are broadcastable to a common shape. 4134 4135 Parameters 4136 ---------- 4137 lhs : scalar or mxnet.ndarray.array 4138 First array to be compared. 4139 rhs : scalar or mxnet.ndarray.array 4140 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 4141 broadcastable to a common shape. 4142 4143 Returns 4144 ------- 4145 NDArray 4146 Output array of boolean values. 4147 4148 Examples 4149 -------- 4150 >>> x = mx.nd.ones((2,3)) 4151 >>> y = mx.nd.arange(2).reshape((2,1)) 4152 >>> z = mx.nd.arange(2).reshape((1,2)) 4153 >>> x.asnumpy() 4154 array([[ 1., 1., 1.], 4155 [ 1., 1., 1.]], dtype=float32) 4156 >>> y.asnumpy() 4157 array([[ 0.], 4158 [ 1.]], dtype=float32) 4159 >>> z.asnumpy() 4160 array([[ 0., 1.]], dtype=float32) 4161 >>> (z == y).asnumpy() 4162 array([[ 1., 0.], 4163 [ 0., 1.]], dtype=float32) 4164 >>> (x != 1).asnumpy() 4165 array([[ 0., 0., 0.], 4166 [ 0., 0., 0.]], dtype=float32) 4167 >>> (x != y).asnumpy() 4168 array([[ 1., 1., 1.], 4169 [ 0., 0., 0.]], dtype=float32) 4170 >>> mx.nd.not_equal(x, y).asnumpy() 4171 array([[ 1., 1., 1.], 4172 [ 0., 0., 0.]], dtype=float32) 4173 >>> (z != y).asnumpy() 4174 array([[ 0., 1.], 4175 [ 1., 0.]], dtype=float32) 4176 """ 4177 # pylint: disable= no-member, protected-access 4178 return _ufunc_helper( 4179 lhs, 4180 rhs, 4181 op.broadcast_not_equal, 4182 lambda x, y: 1 if x != y else 0, 4183 _internal._not_equal_scalar, 4184 None) 4185 # pylint: enable= no-member, protected-access 4186 4187 4188def greater(lhs, rhs): 4189 """Returns the result of element-wise **greater than** (>) comparison operation 4190 with broadcasting. 4191 4192 For each element in input arrays, return 1(true) if lhs elements are greater than rhs, 4193 otherwise return 0(false). 4194 4195 Equivalent to ``lhs > rhs`` and ``mx.nd.broadcast_greater(lhs, rhs)``. 4196 4197 .. note:: 4198 4199 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4200 then the arrays are broadcastable to a common shape. 4201 4202 Parameters 4203 ---------- 4204 lhs : scalar or mxnet.ndarray.array 4205 First array to be compared. 4206 rhs : scalar or mxnet.ndarray.array 4207 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 4208 broadcastable to a common shape. 4209 4210 Returns 4211 ------- 4212 NDArray 4213 Output array of boolean values. 4214 4215 Examples 4216 -------- 4217 >>> x = mx.nd.ones((2,3)) 4218 >>> y = mx.nd.arange(2).reshape((2,1)) 4219 >>> z = mx.nd.arange(2).reshape((1,2)) 4220 >>> x.asnumpy() 4221 array([[ 1., 1., 1.], 4222 [ 1., 1., 1.]], dtype=float32) 4223 >>> y.asnumpy() 4224 array([[ 0.], 4225 [ 1.]], dtype=float32) 4226 >>> z.asnumpy() 4227 array([[ 0., 1.]], dtype=float32) 4228 >>> (x > 1).asnumpy() 4229 array([[ 0., 0., 0.], 4230 [ 0., 0., 0.]], dtype=float32) 4231 >>> (x > y).asnumpy() 4232 array([[ 1., 1., 1.], 4233 [ 0., 0., 0.]], dtype=float32) 4234 >>> mx.nd.greater(x, y).asnumpy() 4235 array([[ 1., 1., 1.], 4236 [ 0., 0., 0.]], dtype=float32) 4237 >>> (z > y).asnumpy() 4238 array([[ 0., 1.], 4239 [ 0., 0.]], dtype=float32) 4240 """ 4241 # pylint: disable= no-member, protected-access 4242 return _ufunc_helper( 4243 lhs, 4244 rhs, 4245 op.broadcast_greater, 4246 lambda x, y: 1 if x > y else 0, 4247 _internal._greater_scalar, 4248 _internal._lesser_scalar) 4249 # pylint: enable= no-member, protected-access 4250 4251 4252def greater_equal(lhs, rhs): 4253 """Returns the result of element-wise **greater than or equal to** (>=) comparison 4254 operation with broadcasting. 4255 4256 For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs, 4257 otherwise return 0(false). 4258 4259 Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``. 4260 4261 .. note:: 4262 4263 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4264 then the arrays are broadcastable to a common shape. 4265 4266 Parameters 4267 ---------- 4268 lhs : scalar or mxnet.ndarray.array 4269 First array to be compared. 4270 rhs : scalar or mxnet.ndarray.array 4271 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 4272 broadcastable to a common shape. 4273 4274 Returns 4275 ------- 4276 NDArray 4277 Output array of boolean values. 4278 4279 Examples 4280 -------- 4281 >>> x = mx.nd.ones((2,3)) 4282 >>> y = mx.nd.arange(2).reshape((2,1)) 4283 >>> z = mx.nd.arange(2).reshape((1,2)) 4284 >>> x.asnumpy() 4285 array([[ 1., 1., 1.], 4286 [ 1., 1., 1.]], dtype=float32) 4287 >>> y.asnumpy() 4288 array([[ 0.], 4289 [ 1.]], dtype=float32) 4290 >>> z.asnumpy() 4291 array([[ 0., 1.]], dtype=float32) 4292 >>> (x >= 1).asnumpy() 4293 array([[ 1., 1., 1.], 4294 [ 1., 1., 1.]], dtype=float32) 4295 >>> (x >= y).asnumpy() 4296 array([[ 1., 1., 1.], 4297 [ 1., 1., 1.]], dtype=float32) 4298 >>> mx.nd.greater_equal(x, y).asnumpy() 4299 array([[ 1., 1., 1.], 4300 [ 1., 1., 1.]], dtype=float32) 4301 >>> (z >= y).asnumpy() 4302 array([[ 1., 1.], 4303 [ 0., 1.]], dtype=float32) 4304 """ 4305 # pylint: disable= no-member, protected-access 4306 return _ufunc_helper( 4307 lhs, 4308 rhs, 4309 op.broadcast_greater_equal, 4310 lambda x, y: 1 if x >= y else 0, 4311 _internal._greater_equal_scalar, 4312 _internal._lesser_equal_scalar) 4313 # pylint: enable= no-member, protected-access 4314 4315 4316def lesser(lhs, rhs): 4317 """Returns the result of element-wise **lesser than** (<) comparison operation 4318 with broadcasting. 4319 4320 For each element in input arrays, return 1(true) if lhs elements are less than rhs, 4321 otherwise return 0(false). 4322 4323 Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``. 4324 4325 .. note:: 4326 4327 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4328 then the arrays are broadcastable to a common shape. 4329 4330 Parameters 4331 ---------- 4332 lhs : scalar or mxnet.ndarray.array 4333 First array to be compared. 4334 rhs : scalar or mxnet.ndarray.array 4335 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 4336 broadcastable to a common shape. 4337 4338 Returns 4339 ------- 4340 NDArray 4341 Output array of boolean values. 4342 4343 Examples 4344 -------- 4345 >>> x = mx.nd.ones((2,3)) 4346 >>> y = mx.nd.arange(2).reshape((2,1)) 4347 >>> z = mx.nd.arange(2).reshape((1,2)) 4348 >>> x.asnumpy() 4349 array([[ 1., 1., 1.], 4350 [ 1., 1., 1.]], dtype=float32) 4351 >>> y.asnumpy() 4352 array([[ 0.], 4353 [ 1.]], dtype=float32) 4354 >>> z.asnumpy() 4355 array([[ 0., 1.]], dtype=float32) 4356 >>> (x < 1).asnumpy() 4357 array([[ 0., 0., 0.], 4358 [ 0., 0., 0.]], dtype=float32) 4359 >>> (x < y).asnumpy() 4360 array([[ 0., 0., 0.], 4361 [ 0., 0., 0.]], dtype=float32) 4362 >>> mx.nd.lesser(x, y).asnumpy() 4363 array([[ 0., 0., 0.], 4364 [ 0., 0., 0.]], dtype=float32) 4365 >>> (z < y).asnumpy() 4366 array([[ 0., 0.], 4367 [ 1., 0.]], dtype=float32) 4368 """ 4369 # pylint: disable= no-member, protected-access 4370 return _ufunc_helper( 4371 lhs, 4372 rhs, 4373 op.broadcast_lesser, 4374 lambda x, y: 1 if x < y else 0, 4375 _internal._lesser_scalar, 4376 _internal._greater_scalar) 4377 # pylint: enable= no-member, protected-access 4378 4379 4380def lesser_equal(lhs, rhs): 4381 """Returns the result of element-wise **lesser than or equal to** (<=) comparison 4382 operation with broadcasting. 4383 4384 For each element in input arrays, return 1(true) if lhs elements are 4385 lesser than equal to rhs, otherwise return 0(false). 4386 4387 Equivalent to ``lhs <= rhs`` and ``mx.nd.broadcast_lesser_equal(lhs, rhs)``. 4388 4389 .. note:: 4390 4391 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4392 then the arrays are broadcastable to a common shape. 4393 4394 Parameters 4395 ---------- 4396 lhs : scalar or mxnet.ndarray.array 4397 First array to be compared. 4398 rhs : scalar or mxnet.ndarray.array 4399 Second array to be compared. If ``lhs.shape != rhs.shape``, they must be 4400 broadcastable to a common shape. 4401 4402 Returns 4403 ------- 4404 NDArray 4405 Output array of boolean values. 4406 4407 Examples 4408 -------- 4409 >>> x = mx.nd.ones((2,3)) 4410 >>> y = mx.nd.arange(2).reshape((2,1)) 4411 >>> z = mx.nd.arange(2).reshape((1,2)) 4412 >>> x.asnumpy() 4413 array([[ 1., 1., 1.], 4414 [ 1., 1., 1.]], dtype=float32) 4415 >>> y.asnumpy() 4416 array([[ 0.], 4417 [ 1.]], dtype=float32) 4418 >>> z.asnumpy() 4419 array([[ 0., 1.]], dtype=float32) 4420 >>> (x <= 1).asnumpy() 4421 array([[ 1., 1., 1.], 4422 [ 1., 1., 1.]], dtype=float32) 4423 >>> (x <= y).asnumpy() 4424 array([[ 0., 0., 0.], 4425 [ 1., 1., 1.]], dtype=float32) 4426 >>> mx.nd.lesser_equal(x, y).asnumpy() 4427 array([[ 0., 0., 0.], 4428 [ 1., 1., 1.]], dtype=float32) 4429 >>> (z <= y).asnumpy() 4430 array([[ 1., 0.], 4431 [ 1., 1.]], dtype=float32) 4432 """ 4433 # pylint: disable= no-member, protected-access 4434 return _ufunc_helper( 4435 lhs, 4436 rhs, 4437 op.broadcast_lesser_equal, 4438 lambda x, y: 1 if x <= y else 0, 4439 _internal._lesser_equal_scalar, 4440 _internal._greater_equal_scalar) 4441 # pylint: enable= no-member, protected-access 4442 4443def logical_and(lhs, rhs): 4444 """Returns the result of element-wise **logical and** comparison 4445 operation with broadcasting. 4446 4447 For each element in input arrays, return 1(true) if lhs elements and rhs elements 4448 are true, otherwise return 0(false). 4449 4450 Equivalent to ``lhs and rhs`` and ``mx.nd.broadcast_logical_and(lhs, rhs)``. 4451 4452 .. note:: 4453 4454 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4455 then the arrays are broadcastable to a common shape. 4456 4457 Parameters 4458 ---------- 4459 lhs : scalar or mxnet.ndarray.array 4460 First input of the function. 4461 rhs : scalar or mxnet.ndarray.array 4462 Second input of the function. If ``lhs.shape != rhs.shape``, they must be 4463 broadcastable to a common shape. 4464 4465 Returns 4466 ------- 4467 NDArray 4468 Output array of boolean values. 4469 4470 Examples 4471 -------- 4472 >>> x = mx.nd.ones((2,3)) 4473 >>> y = mx.nd.arange(2).reshape((2,1)) 4474 >>> z = mx.nd.arange(2).reshape((1,2)) 4475 >>> x.asnumpy() 4476 array([[ 1., 1., 1.], 4477 [ 1., 1., 1.]], dtype=float32) 4478 >>> y.asnumpy() 4479 array([[ 0.], 4480 [ 1.]], dtype=float32) 4481 >>> z.asnumpy() 4482 array([[ 0., 1.]], dtype=float32) 4483 >>> mx.nd.logical_and(x, 1).asnumpy() 4484 array([[ 1., 1., 1.], 4485 [ 1., 1., 1.]], dtype=float32) 4486 >>> mx.nd.logical_and(x, y).asnumpy() 4487 array([[ 0., 0., 0.], 4488 [ 1., 1., 1.]], dtype=float32) 4489 >>> mx.nd.logical_and(z, y).asnumpy() 4490 array([[ 0., 0.], 4491 [ 0., 1.]], dtype=float32) 4492 """ 4493 # pylint: disable= no-member, protected-access 4494 return _ufunc_helper( 4495 lhs, 4496 rhs, 4497 op.broadcast_logical_and, 4498 lambda x, y: 1 if x and y else 0, 4499 _internal._logical_and_scalar, 4500 None) 4501 # pylint: enable= no-member, protected-access 4502 4503def logical_or(lhs, rhs): 4504 """Returns the result of element-wise **logical or** comparison 4505 operation with broadcasting. 4506 4507 For each element in input arrays, return 1(true) if lhs elements or rhs elements 4508 are true, otherwise return 0(false). 4509 4510 Equivalent to ``lhs or rhs`` and ``mx.nd.broadcast_logical_or(lhs, rhs)``. 4511 4512 .. note:: 4513 4514 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4515 then the arrays are broadcastable to a common shape. 4516 4517 Parameters 4518 ---------- 4519 lhs : scalar or mxnet.ndarray.array 4520 First input of the function. 4521 rhs : scalar or mxnet.ndarray.array 4522 Second input of the function. If ``lhs.shape != rhs.shape``, they must be 4523 broadcastable to a common shape. 4524 4525 Returns 4526 ------- 4527 NDArray 4528 Output array of boolean values. 4529 4530 Examples 4531 -------- 4532 >>> x = mx.nd.ones((2,3)) 4533 >>> y = mx.nd.arange(2).reshape((2,1)) 4534 >>> z = mx.nd.arange(2).reshape((1,2)) 4535 >>> x.asnumpy() 4536 array([[ 1., 1., 1.], 4537 [ 1., 1., 1.]], dtype=float32) 4538 >>> y.asnumpy() 4539 array([[ 0.], 4540 [ 1.]], dtype=float32) 4541 >>> z.asnumpy() 4542 array([[ 0., 1.]], dtype=float32) 4543 >>> mx.nd.logical_or(x, 1).asnumpy() 4544 array([[ 1., 1., 1.], 4545 [ 1., 1., 1.]], dtype=float32) 4546 >>> mx.nd.logical_or(x, y).asnumpy() 4547 array([[ 1., 1., 1.], 4548 [ 1., 1., 1.]], dtype=float32) 4549 >>> mx.nd.logical_or(z, y).asnumpy() 4550 array([[ 0., 1.], 4551 [ 1., 1.]], dtype=float32) 4552 """ 4553 # pylint: disable= no-member, protected-access 4554 return _ufunc_helper( 4555 lhs, 4556 rhs, 4557 op.broadcast_logical_or, 4558 lambda x, y: 1 if x or y else 0, 4559 _internal._logical_or_scalar, 4560 None) 4561 # pylint: enable= no-member, protected-access 4562 4563def logical_xor(lhs, rhs): 4564 """Returns the result of element-wise **logical xor** comparison 4565 operation with broadcasting. 4566 4567 For each element in input arrays, return 1(true) if lhs elements or rhs elements 4568 are true, otherwise return 0(false). 4569 4570 Equivalent to ``bool(lhs) ^ bool(rhs)`` and ``mx.nd.broadcast_logical_xor(lhs, rhs)``. 4571 4572 .. note:: 4573 4574 If the corresponding dimensions of two arrays have the same size or one of them has size 1, 4575 then the arrays are broadcastable to a common shape. 4576 4577 Parameters 4578 ---------- 4579 lhs : scalar or mxnet.ndarray.array 4580 First input of the function. 4581 rhs : scalar or mxnet.ndarray.array 4582 Second input of the function. If ``lhs.shape != rhs.shape``, they must be 4583 broadcastable to a common shape. 4584 4585 Returns 4586 ------- 4587 NDArray 4588 Output array of boolean values. 4589 4590 Examples 4591 -------- 4592 >>> x = mx.nd.ones((2,3)) 4593 >>> y = mx.nd.arange(2).reshape((2,1)) 4594 >>> z = mx.nd.arange(2).reshape((1,2)) 4595 >>> x.asnumpy() 4596 array([[ 1., 1., 1.], 4597 [ 1., 1., 1.]], dtype=float32) 4598 >>> y.asnumpy() 4599 array([[ 0.], 4600 [ 1.]], dtype=float32) 4601 >>> z.asnumpy() 4602 array([[ 0., 1.]], dtype=float32) 4603 >>> mx.nd.logical_xor(x, y).asnumpy() 4604 array([[ 1., 1., 1.], 4605 [ 0., 0., 0.]], dtype=float32) 4606 """ 4607 # pylint: disable= no-member, protected-access 4608 return _ufunc_helper( 4609 lhs, 4610 rhs, 4611 op.broadcast_logical_xor, 4612 lambda x, y: 1 if bool(x) ^ bool(y) else 0, 4613 _internal._logical_xor_scalar, 4614 None) 4615 # pylint: enable= no-member, protected-access 4616 4617def true_divide(lhs, rhs): 4618 4619 """This function is similar to :meth:`divide`. 4620 """ 4621 return divide(lhs, rhs) 4622 4623 4624def concatenate(arrays, axis=0, always_copy=True): 4625 """DEPRECATED, use ``concat`` instead 4626 4627 Parameters 4628 ---------- 4629 arrays : list of `NDArray` 4630 Arrays to be concatenate. They must have identical shape except 4631 the first dimension. They also must have the same data type. 4632 axis : int 4633 The axis along which to concatenate. 4634 always_copy : bool 4635 Default `True`. When not `True`, if the arrays only contain one 4636 `NDArray`, that element will be returned directly, avoid copying. 4637 4638 Returns 4639 ------- 4640 NDArray 4641 An `NDArray` that lives on the same context as `arrays[0].context`. 4642 """ 4643 assert isinstance(arrays, list) 4644 assert len(arrays) > 0 4645 assert isinstance(arrays[0], NDArray) 4646 4647 if not always_copy and len(arrays) == 1: 4648 return arrays[0] 4649 4650 shape_axis = arrays[0].shape[axis] 4651 shape_rest1 = arrays[0].shape[0:axis] 4652 shape_rest2 = arrays[0].shape[axis+1:] 4653 dtype = arrays[0].dtype 4654 for arr in arrays[1:]: 4655 shape_axis += arr.shape[axis] 4656 assert shape_rest1 == arr.shape[0:axis] 4657 assert shape_rest2 == arr.shape[axis+1:] 4658 assert dtype == arr.dtype 4659 ret_shape = shape_rest1 + (shape_axis,) + shape_rest2 4660 ret = empty(ret_shape, ctx=arrays[0].ctx, dtype=dtype) 4661 4662 idx = 0 4663 begin = [0 for _ in ret_shape] 4664 end = list(ret_shape) 4665 for arr in arrays: 4666 if axis == 0: 4667 ret[idx:idx+arr.shape[0]] = arr 4668 else: 4669 begin[axis] = idx 4670 end[axis] = idx+arr.shape[axis] 4671 # pylint: disable=no-member,protected-access 4672 _internal._crop_assign(ret, arr, out=ret, 4673 begin=tuple(begin), 4674 end=tuple(end)) 4675 # pylint: enable=no-member,protected-access 4676 idx += arr.shape[axis] 4677 4678 return ret 4679 4680 4681# pylint: disable=redefined-outer-name 4682def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None): 4683 """DEPRECATED, use mx.img instead 4684 4685 Parameters 4686 ---------- 4687 str_img : str 4688 Binary image data 4689 clip_rect : iterable of 4 int 4690 Clip decoded image to rectangle (x0, y0, x1, y1). 4691 out : NDArray 4692 Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w). 4693 index : int 4694 Output decoded image to i-th slice of 4 dimensional buffer. 4695 channels : int 4696 Number of channels to output. Decode to grey scale when channels = 1. 4697 mean : NDArray 4698 Subtract mean from decode image before outputing. 4699 """ 4700 # pylint: disable= no-member, protected-access, too-many-arguments 4701 if mean is None: 4702 mean = NDArray(_new_empty_handle()) 4703 if out is None: 4704 return _internal._imdecode(mean, index, 4705 clip_rect[0], 4706 clip_rect[1], 4707 clip_rect[2], 4708 clip_rect[3], 4709 channels, 4710 len(str_img), 4711 str_img=str_img) 4712 else: 4713 return _internal._imdecode(mean, index, 4714 clip_rect[0], 4715 clip_rect[1], 4716 clip_rect[2], 4717 clip_rect[3], 4718 channels, 4719 len(str_img), 4720 str_img=str_img, 4721 out=out) 4722 4723 4724def zeros(shape, ctx=None, dtype=None, **kwargs): 4725 """Returns a new array filled with all zeros, with the given shape and type. 4726 4727 Parameters 4728 ---------- 4729 shape : int or tuple of int 4730 The shape of the empty array. 4731 ctx : Context, optional 4732 An optional device context (default is the current default context). 4733 dtype : str or numpy.dtype, optional 4734 An optional value type (default is `float32`). 4735 out : NDArray, optional 4736 The output NDArray (default is `None`). 4737 4738 Returns 4739 ------- 4740 NDArray 4741 A created array 4742 4743 Examples 4744 -------- 4745 >>> mx.nd.zeros(1).asnumpy() 4746 array([ 0.], dtype=float32) 4747 >>> mx.nd.zeros((1,2), mx.gpu(0)) 4748 <NDArray 1x2 @gpu(0)> 4749 >>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy() 4750 array([[ 0., 0.]], dtype=float16) 4751 """ 4752 # pylint: disable= unused-argument 4753 if ctx is None: 4754 ctx = current_context() 4755 dtype = mx_real_t if dtype is None else dtype 4756 # pylint: disable= no-member, protected-access 4757 return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs) 4758 # pylint: enable= no-member, protected-access 4759 4760def eye(N, M=0, k=0, ctx=None, dtype=None, **kwargs): 4761 """Return a 2-D array with ones on the diagonal and zeros elsewhere. 4762 4763 Parameters 4764 ---------- 4765 N: int 4766 Number of rows in the output. 4767 M: int, optional 4768 Number of columns in the output. If 0, defaults to N. 4769 k: int, optional 4770 Index of the diagonal: 0 (the default) refers to the main diagonal, 4771 a positive value refers to an upper diagonal, 4772 and a negative value to a lower diagonal. 4773 ctx: Context, optional 4774 An optional device context (default is the current default context) 4775 dtype: str or numpy.dtype, optional 4776 An optional value type (default is `float32`) 4777 4778 Returns 4779 ------- 4780 NDArray 4781 A created array 4782 4783 Examples 4784 -------- 4785 >>> mx.nd.eye(2) 4786 [[ 1. 0.] 4787 [ 0. 1.]] 4788 <NDArray 2x2 @cpu(0)> 4789 >>> mx.nd.eye(2, 3, 1) 4790 [[ 0. 1. 0.] 4791 [ 0. 0. 1.]] 4792 <NDArray 2x3 @cpu(0)> 4793 """ 4794 # pylint: disable= unused-argument 4795 if ctx is None: 4796 ctx = current_context() 4797 dtype = mx_real_t if dtype is None else dtype 4798 # pylint: disable= no-member, protected-access 4799 return _internal._eye(N=N, M=M, k=k, ctx=ctx, dtype=dtype, **kwargs) 4800 # pylint: enable= no-member, protected-access 4801 4802 4803def empty(shape, ctx=None, dtype=None): 4804 """Returns a new array of given shape and type, without initializing entries. 4805 4806 Parameters 4807 ---------- 4808 shape : int or tuple of int 4809 The shape of the empty array. 4810 ctx : Context, optional 4811 An optional device context (default is the current default context). 4812 dtype : str or numpy.dtype, optional 4813 An optional value type (default is `float32`). 4814 4815 Returns 4816 ------- 4817 NDArray 4818 A created array. 4819 4820 """ 4821 if isinstance(shape, int): 4822 shape = (shape, ) 4823 if ctx is None: 4824 ctx = current_context() 4825 if dtype is None: 4826 dtype = mx_real_t 4827 return NDArray(handle=_new_alloc_handle(shape, ctx, False, dtype)) 4828 4829 4830# pylint: disable= redefined-builtin 4831def histogram(a, bins=10, range=None): 4832 """Compute the histogram of the input data. 4833 4834 Parameters 4835 ---------- 4836 a : NDArray 4837 Input data. The histogram is computed over the flattened array. 4838 bins : int or sequence of scalars 4839 If bins is an int, it defines the number of equal-width bins in the 4840 given range (10, by default). If bins is a sequence, it defines the bin edges, 4841 including the rightmost edge, allowing for non-uniform bin widths. 4842 range : (float, float), optional 4843 The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()). 4844 Values outside the range are ignored. The first element of the range must be less than or 4845 equal to the second. range affects the automatic bin computation as well, the range will 4846 be equally divided by the number of bins. 4847 4848 Returns 4849 ------- 4850 NDArray 4851 A created array. 4852 4853 """ 4854 4855 # pylint: disable= no-member, protected-access 4856 if isinstance(bins, NDArray): 4857 return _internal._histogram(data=a, bins=bins) 4858 elif isinstance(bins, integer_types): 4859 if range is None: 4860 warnings.warn("range is not specified, using numpy's result " 4861 "to ensure consistency with numpy") 4862 res, bin_bounds = np.histogram(a.asnumpy(), bins=bins) 4863 return array(res), array(bin_bounds) 4864 return _internal._histogram(data=a, bin_cnt=bins, range=range) 4865 raise ValueError("bins argument should be either an integer or an NDArray") 4866 # pylint: enable= no-member, protected-access, redefined-builtin 4867 4868def split_v2(ary, indices_or_sections, axis=0, squeeze_axis=False): 4869 """Split an array into multiple sub-arrays. 4870 4871 Parameters 4872 ---------- 4873 ary : NDArray 4874 Array to be divided into sub-arrays. 4875 indices_or_sections : int or tuple of ints 4876 If `indices_or_sections` is an integer, N, the array will be divided 4877 into N equal arrays along `axis`. If such a split is not possible, 4878 an error is raised. 4879 If `indices_or_sections` is a 1-D array of sorted integers, the entries 4880 indicate where along `axis` the array is split. For example, 4881 ``[2, 3]`` would, for ``axis=0``, result in 4882 - ary[:2] 4883 - ary[2:3] 4884 - ary[3:] 4885 If an index exceeds the dimension of the array along `axis`, 4886 an empty sub-array is returned correspondingly. 4887 axis : int, optional 4888 The axis along which to split, default is 0. 4889 squeeze_axis: boolean, optional 4890 Whether to squeeze the axis of sub-arrays or not, only useful when size 4891 of the sub-arrays are 1 on the `axis`. Default is False. 4892 4893 Returns 4894 ------- 4895 NDArray 4896 A created array. 4897 4898 """ 4899 indices = [] 4900 axis_size = ary.shape[axis] 4901 if isinstance(indices_or_sections, int): 4902 sections = indices_or_sections 4903 if axis_size % sections: 4904 raise ValueError('array split does not result in an equal division') 4905 section_size = int(axis_size / sections) 4906 indices = [i * section_size for i in range(sections)] 4907 elif isinstance(indices_or_sections, tuple): 4908 indices = [0] + list(indices_or_sections) 4909 else: 4910 raise ValueError('indices_or_sections must either int or tuple of ints') 4911 return _internal._split_v2(ary, indices, axis, squeeze_axis) 4912 4913PyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p) 4914_c_str_dltensor = c_str('dltensor') 4915_c_str_used_dltensor = c_str('used_dltensor') 4916 4917def _dlpack_deleter(pycapsule): 4918 pycapsule = ctypes.c_void_p(pycapsule) 4919 if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor): 4920 ptr = ctypes.c_void_p( 4921 ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor)) 4922 check_call(_LIB.MXNDArrayCallDLPackDeleter(ptr)) 4923 4924_c_dlpack_deleter = PyCapsuleDestructor(_dlpack_deleter) 4925 4926def to_dlpack_for_read(data): 4927 """Returns a reference view of NDArray that represents as DLManagedTensor until 4928 all previous write operations on the current array are finished. 4929 4930 Parameters 4931 ---------- 4932 data: NDArray 4933 input data. 4934 4935 Returns 4936 ------- 4937 PyCapsule (the pointer of DLManagedTensor) 4938 a reference view of NDArray that represents as DLManagedTensor. 4939 4940 Examples 4941 -------- 4942 >>> x = mx.nd.ones((2,3)) 4943 >>> y = mx.nd.to_dlpack_for_read(x) 4944 >>> type(y) 4945 <class 'PyCapsule'> 4946 >>> z = mx.nd.from_dlpack(y) 4947 >>> z 4948 [[1. 1. 1.] 4949 [1. 1. 1.]] 4950 <NDArray 2x3 @cpu(0)> 4951 """ 4952 data.wait_to_read() 4953 dlpack = DLPackHandle() 4954 check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack))) 4955 return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter) 4956 4957def to_dlpack_for_write(data): 4958 """Returns a reference view of NDArray that represents as DLManagedTensor until 4959 all previous read/write operations on the current array are finished. 4960 4961 Parameters 4962 ---------- 4963 data: NDArray 4964 input data. 4965 4966 Returns 4967 ------- 4968 PyCapsule (the pointer of DLManagedTensor) 4969 a reference view of NDArray that represents as DLManagedTensor. 4970 4971 Examples 4972 -------- 4973 >>> x = mx.nd.ones((2,3)) 4974 >>> w = mx.nd.to_dlpack_for_write(x) 4975 >>> type(w) 4976 <class 'PyCapsule'> 4977 >>> u = mx.nd.from_dlpack(w) 4978 >>> u += 1 4979 >>> x 4980 [[2. 2. 2.] 4981 [2. 2. 2.]] 4982 <NDArray 2x3 @cpu(0)> 4983 """ 4984 check_call(_LIB.MXNDArrayWaitToWrite(data.handle)) 4985 dlpack = DLPackHandle() 4986 check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack))) 4987 return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter) 4988 4989def from_dlpack(dlpack): 4990 """Returns a NDArray backed by a dlpack tensor. 4991 4992 Parameters 4993 ---------- 4994 dlpack: PyCapsule (the pointer of DLManagedTensor) 4995 input data 4996 4997 Returns 4998 ------- 4999 NDArray 5000 a NDArray backed by a dlpack tensor 5001 5002 Examples 5003 -------- 5004 >>> x = mx.nd.ones((2,3)) 5005 >>> y = mx.nd.to_dlpack_for_read(x) 5006 >>> type(y) 5007 <class 'PyCapsule'> 5008 >>> z = mx.nd.from_dlpack(y) 5009 >>> type(z) 5010 <class 'mxnet.ndarray.ndarray.NDArray'> 5011 >>> z 5012 [[ 1. 1. 1.] 5013 [ 1. 1. 1.]] 5014 <NDArray 2x3 @cpu(0)> 5015 5016 >>> w = mx.nd.to_dlpack_for_write(x) 5017 >>> type(w) 5018 <class 'PyCapsule'> 5019 >>> u = mx.nd.from_dlpack(w) 5020 >>> u += 1 5021 >>> x 5022 [[2. 2. 2.] 5023 [2. 2. 2.]] 5024 <NDArray 2x3 @cpu(0)> 5025 """ 5026 handle = NDArrayHandle() 5027 dlpack = ctypes.py_object(dlpack) 5028 assert ctypes.pythonapi.PyCapsule_IsValid(dlpack, _c_str_dltensor), ValueError( 5029 'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.') 5030 dlpack_handle = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor)) 5031 check_call(_LIB.MXNDArrayFromDLPackEx(dlpack_handle, False, ctypes.byref(handle))) 5032 # Rename PyCapsule (DLPack) 5033 ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor) 5034 # delete the deleter of the old dlpack 5035 ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None) 5036 return NDArray(handle=handle) 5037 5038class DLContext(ctypes.Structure): 5039 _fields_ = [("device_type", ctypes.c_int), 5040 ("device_id", ctypes.c_int)] 5041 5042 5043class DLDataType(ctypes.Structure): 5044 _fields_ = [("type_code", ctypes.c_uint8), 5045 ("bits", ctypes.c_uint8), 5046 ("lanes", ctypes.c_uint16)] 5047 TYPE_MAP = { 5048 "int32": (0, 32, 1), 5049 "int64": (0, 64, 1), 5050 "bool": (1, 1, 1), 5051 "uint8": (1, 8, 1), 5052 "uint32": (1, 32, 1), 5053 "uint64": (1, 64, 1), 5054 'float16': (2, 16, 1), 5055 "float32": (2, 32, 1), 5056 "float64": (2, 64, 1), 5057 } 5058 5059 5060class DLTensor(ctypes.Structure): 5061 _fields_ = [("data", ctypes.c_void_p), 5062 ("ctx", DLContext), 5063 ("ndim", ctypes.c_int), 5064 ("dtype", DLDataType), 5065 ("shape", ctypes.POINTER(ctypes.c_int64)), 5066 ("strides", ctypes.POINTER(ctypes.c_int64)), 5067 ("byte_offset", ctypes.c_uint64)] 5068 5069class DLManagedTensor(ctypes.Structure): 5070 pass 5071 5072 5073DeleterFunc = ctypes.CFUNCTYPE(None, ctypes.POINTER(DLManagedTensor)) 5074 5075 5076DLManagedTensor._fields_ = [("dl_tensor", DLTensor), # pylint: disable=protected-access 5077 ("manager_ctx", ctypes.c_void_p), 5078 ("deleter", DeleterFunc)] 5079 5080 5081@DeleterFunc 5082def dl_managed_tensor_deleter(dl_managed_tensor_handle): 5083 void_p = dl_managed_tensor_handle.contents.manager_ctx 5084 pyobj = ctypes.cast(void_p, ctypes.py_object) 5085 ctypes.pythonapi.Py_DecRef(pyobj) 5086 5087 5088def from_numpy(ndarray, zero_copy=True, array_cls=NDArray): 5089 """Returns an MXNet's ndarray backed by numpy's ndarray. 5090 When `zero_copy` is set to be true, 5091 this API consumes numpy's ndarray and produces MXNet's ndarray 5092 without having to copy the content. In this case, we disallow 5093 users to modify the given numpy ndarray, and it is suggested 5094 not to read the numpy ndarray as well for internal correctness. 5095 5096 Parameters 5097 ---------- 5098 ndarray: numpy.ndarray 5099 input data 5100 zero_copy: bool 5101 Whether we use DLPack's zero-copy conversion to convert to MXNet's NDArray. 5102 This is only available for c-contiguous arrays, i.e. array.flags[C_CONTIGUOUS] == True. 5103 array_cls: ndarray class type 5104 The class type of the output array. 5105 5106 Returns 5107 ------- 5108 NDArray 5109 a NDArray backed by a dlpack tensor 5110 5111 """ 5112 5113 def _make_manager_ctx(obj): 5114 pyobj = ctypes.py_object(obj) 5115 void_p = ctypes.c_void_p.from_buffer(pyobj) 5116 ctypes.pythonapi.Py_IncRef(pyobj) 5117 return void_p 5118 5119 def _make_dl_tensor(array): 5120 if str(array.dtype) not in DLDataType.TYPE_MAP: 5121 raise ValueError(str(array.dtype) + " is not supported.") 5122 dl_tensor = DLTensor() 5123 dl_tensor.data = array.ctypes.data_as(ctypes.c_void_p) 5124 dl_tensor.ctx = DLContext(1, 0) 5125 dl_tensor.ndim = array.ndim 5126 dl_tensor.dtype = DLDataType.TYPE_MAP[str(array.dtype)] 5127 dl_tensor.shape = array.ctypes.shape_as(ctypes.c_int64) 5128 dl_tensor.strides = None 5129 dl_tensor.byte_offset = 0 5130 return dl_tensor 5131 5132 def _make_dl_managed_tensor(array): 5133 c_obj = DLManagedTensor() 5134 c_obj.dl_tensor = _make_dl_tensor(array) 5135 c_obj.manager_ctx = _make_manager_ctx(array) 5136 c_obj.deleter = dl_managed_tensor_deleter 5137 return c_obj 5138 5139 if not zero_copy: 5140 return array(ndarray, dtype=ndarray.dtype) 5141 5142 if not ndarray.flags['C_CONTIGUOUS']: 5143 raise ValueError("Only c-contiguous arrays are supported for zero-copy") 5144 ndarray.flags['WRITEABLE'] = False 5145 c_obj = _make_dl_managed_tensor(ndarray) 5146 handle = NDArrayHandle() 5147 check_call(_LIB.MXNDArrayFromDLPackEx(ctypes.byref(c_obj), True, ctypes.byref(handle))) 5148 return array_cls(handle=handle) 5149