diff options
author | DongHun Kwak <dh0128.kwak@samsung.com> | 2020-12-31 09:32:58 +0900 |
---|---|---|
committer | DongHun Kwak <dh0128.kwak@samsung.com> | 2020-12-31 09:32:58 +0900 |
commit | dafde1312a74de106e0e114de78b9ae5f52f0b78 (patch) | |
tree | 07b322b6dfbfa69110f399218d35d53c167fcaf7 /numpy | |
parent | 546f3e50ef3710ae2fc394774c63eefc7801ca0f (diff) | |
download | python-numpy-dafde1312a74de106e0e114de78b9ae5f52f0b78.tar.gz python-numpy-dafde1312a74de106e0e114de78b9ae5f52f0b78.tar.bz2 python-numpy-dafde1312a74de106e0e114de78b9ae5f52f0b78.zip |
Imported Upstream version 1.15.1upstream/1.15.1
Diffstat (limited to 'numpy')
31 files changed, 500 insertions, 218 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py index d250ed5ac..1f60f074c 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -181,6 +181,11 @@ else: __all__.extend(lib.__all__) __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) + # Filter out Cython harmless warnings + warnings.filterwarnings("ignore", message="numpy.dtype size changed") + warnings.filterwarnings("ignore", message="numpy.ufunc size changed") + warnings.filterwarnings("ignore", message="numpy.ndarray size changed") + # oldnumeric and numarray were removed in 1.9. In case some packages import # but do not use them, we define them here for backward compatibility. oldnumeric = 'removed' diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 9990bacf0..e658fc514 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -9,7 +9,7 @@ from __future__ import division, absolute_import, print_function import re import sys -from numpy.compat import basestring +from numpy.compat import basestring, unicode from .multiarray import dtype, array, ndarray try: import ctypes @@ -294,7 +294,7 @@ def _newnames(datatype, order): """ oldnames = datatype.names nameslist = list(oldnames) - if isinstance(order, str): + if isinstance(order, (str, unicode)): order = [order] seen = set() if isinstance(order, (list, tuple)): diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index 32c00fd8f..df2bf4a4e 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -1220,8 +1220,8 @@ def einsum(*operands, **kwargs): """ - # Grab non-einsum kwargs; never optimize 2-argument case. - optimize_arg = kwargs.pop('optimize', len(operands) > 3) + # Grab non-einsum kwargs; do not optimize by default. + optimize_arg = kwargs.pop('optimize', False) # If no optimization, run pure einsum if optimize_arg is False: diff --git a/numpy/core/records.py b/numpy/core/records.py index 612d39322..a483871ba 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -42,7 +42,7 @@ import warnings from . import numeric as sb from . import numerictypes as nt -from numpy.compat import isfileobj, bytes, long +from numpy.compat import isfileobj, bytes, long, unicode from .arrayprint import get_printoptions # All of the functions allow formats to be a dtype @@ -174,7 +174,7 @@ class format_parser(object): if (names): if (type(names) in [list, tuple]): pass - elif isinstance(names, str): + elif isinstance(names, (str, unicode)): names = names.split(',') else: raise NameError("illegal input names %s" % repr(names)) diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src index b4158ec8e..d622effe6 100644 --- a/numpy/core/src/multiarray/arraytypes.c.src +++ b/numpy/core/src/multiarray/arraytypes.c.src @@ -733,7 +733,7 @@ VOID_getitem(void *input, void *vap) return (PyObject *)ret; } - return PyBytes_FromStringAndSize(PyArray_DATA(ap), descr->elsize); + return PyBytes_FromStringAndSize(ip, descr->elsize); } diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c index 21dbdefd6..c8e3da8bc 100644 --- a/numpy/core/src/multiarray/buffer.c +++ b/numpy/core/src/multiarray/buffer.c @@ -175,6 +175,14 @@ _is_natively_aligned_at(PyArray_Descr *descr, return 1; } +/* + * Fill in str with an appropriate PEP 3118 format string, based on + * descr. For structured dtypes, calls itself recursively. Each call extends + * str at offset then updates offset, and uses descr->byteorder, (and + * possibly the byte order in obj) to determine the byte-order char. + * + * Returns 0 for success, -1 for failure + */ static int _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, PyObject* obj, Py_ssize_t *offset, @@ -195,8 +203,8 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, PyObject *item, *subarray_tuple; Py_ssize_t total_count = 1; Py_ssize_t dim_size; + Py_ssize_t old_offset; char buf[128]; - int old_offset; int ret; if (PyTuple_Check(descr->subarray->shape)) { @@ -230,15 +238,15 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, return ret; } else if (PyDataType_HASFIELDS(descr)) { - int base_offset = *offset; + Py_ssize_t base_offset = *offset; _append_str(str, "T{"); for (k = 0; k < PyTuple_GET_SIZE(descr->names); ++k) { PyObject *name, *item, *offset_obj, *tmp; PyArray_Descr *child; char *p; - Py_ssize_t len; - int new_offset; + Py_ssize_t len, new_offset; + int ret; name = PyTuple_GET_ITEM(descr->names, k); item = PyDict_GetItem(descr->fields, name); @@ -266,8 +274,11 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, } /* Insert child item */ - _buffer_format_string(child, str, obj, offset, + ret = _buffer_format_string(child, str, obj, offset, active_byteorder); + if (ret < 0) { + return -1; + } /* Insert field name */ #if defined(NPY_PY3K) @@ -393,8 +404,8 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str, case NPY_CFLOAT: if (_append_str(str, "Zf")) return -1; break; case NPY_CDOUBLE: if (_append_str(str, "Zd")) return -1; break; case NPY_CLONGDOUBLE: if (_append_str(str, "Zg")) return -1; break; - /* XXX: datetime */ - /* XXX: timedelta */ + /* XXX NPY_DATETIME */ + /* XXX NPY_TIMEDELTA */ case NPY_OBJECT: if (_append_char(str, 'O')) return -1; break; case NPY_STRING: { char buf[128]; @@ -468,10 +479,33 @@ _buffer_info_new(PyObject *obj) info = malloc(sizeof(_buffer_info_t)); if (info == NULL) { + PyErr_NoMemory(); goto fail; } - if (PyArray_IsScalar(obj, Generic)) { + if (PyArray_IsScalar(obj, Datetime) || PyArray_IsScalar(obj, Timedelta)) { + /* + * Special case datetime64 scalars to remain backward compatible. + * This will change in a future version. + * Note arrays of datetime64 and strutured arrays with datetime64 + * fields will not hit this code path and are currently unsupported + * in _buffer_format_string. + */ + _append_char(&fmt, 'B'); + _append_char(&fmt, '\0'); + info->ndim = 1; + info->shape = malloc(sizeof(Py_ssize_t) * 2); + if (info->shape == NULL) { + PyErr_NoMemory(); + goto fail; + } + info->strides = info->shape + info->ndim; + info->shape[0] = 8; + info->strides[0] = 1; + info->format = fmt.s; + return info; + } + else if (PyArray_IsScalar(obj, Generic)) { descr = PyArray_DescrFromScalar(obj); if (descr == NULL) { goto fail; @@ -493,6 +527,7 @@ _buffer_info_new(PyObject *obj) else { info->shape = malloc(sizeof(Py_ssize_t) * PyArray_NDIM(arr) * 2 + 1); if (info->shape == NULL) { + PyErr_NoMemory(); goto fail; } info->strides = info->shape + PyArray_NDIM(arr); @@ -796,8 +831,6 @@ gentype_getbuffer(PyObject *self, Py_buffer *view, int flags) /* Fill in information */ info = _buffer_get_info(self); if (info == NULL) { - PyErr_SetString(PyExc_BufferError, - "could not get scalar buffer information"); goto fail; } @@ -820,6 +853,9 @@ gentype_getbuffer(PyObject *self, Py_buffer *view, int flags) } #endif view->len = elsize; + if (PyArray_IsScalar(self, Datetime) || PyArray_IsScalar(self, Timedelta)) { + elsize = 1; /* descr->elsize,char is 8,'M', but we return 1,'B' */ + } view->itemsize = elsize; Py_DECREF(descr); diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 7367902cc..28a414892 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -92,6 +92,7 @@ swab_separator(const char *sep) s = start = malloc(strlen(sep)+3); if (s == NULL) { + PyErr_NoMemory(); return NULL; } /* add space to front if there isn't one */ @@ -1389,10 +1390,12 @@ _array_from_buffer_3118(PyObject *memoryview) if (!is_ctypes) { /* This object has no excuse for a broken PEP3118 buffer */ - PyErr_SetString( + PyErr_Format( PyExc_RuntimeError, - "Item size computed from the PEP 3118 buffer format " - "string does not match the actual item size."); + "Item size %zd for PEP 3118 buffer format " + "string %s does not match the dtype %c item size %d.", + view->itemsize, view->format, descr->type, + descr->elsize); Py_DECREF(descr); return NULL; } diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index bb3cc9d4e..a0dc98f0e 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -83,7 +83,7 @@ _arraydescr_fromctypes(PyObject *obj) /* derived type */ PyObject *newtup; PyArray_Descr *derived; - newtup = Py_BuildValue("NN", newdescr, length); + newtup = Py_BuildValue("N(N)", newdescr, length); ret = PyArray_DescrConverter(newtup, &derived); Py_DECREF(newtup); if (ret == NPY_SUCCEED) { diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c index abbf05220..14dfa71c2 100644 --- a/numpy/core/src/multiarray/dragon4.c +++ b/numpy/core/src/multiarray/dragon4.c @@ -114,7 +114,7 @@ LogBase2_64(npy_uint64 val) return LogBase2_32((npy_uint32)val); } -#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) +#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || defined(HAVE_LDOUBLE_IEEE_QUAD_BE) static npy_uint32 LogBase2_128(npy_uint64 hi, npy_uint64 lo) { @@ -217,7 +217,8 @@ BigInt_Set_uint64(BigInt *i, npy_uint64 val) #if (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) || \ - defined(HAVE_LDOUBLE_IEEE_QUAD_LE)) + defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || \ + defined(HAVE_LDOUBLE_IEEE_QUAD_BE)) static void BigInt_Set_2x_uint64(BigInt *i, npy_uint64 hi, npy_uint64 lo) { @@ -2845,7 +2846,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( #if (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE)) /* - * IBM extended precision 128-bit floating-point format, aka IBM double-dobule + * IBM extended precision 128-bit floating-point format, aka IBM double-double * * IBM's double-double type is a pair of IEEE binary64 values, which you add * together to get a total value. The exponents are arranged so that the lower @@ -2882,12 +2883,15 @@ Dragon4_PrintFloat_IEEE_binary128_be( */ static npy_uint32 Dragon4_PrintFloat_IBM_double_double( - Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt) + Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) { char *buffer = scratch->repr; npy_uint32 bufferSize = sizeof(scratch->repr); BigInt *bigints = scratch->bigints; + FloatVal128 val128; + FloatUnion128 buf128; + npy_uint32 floatExponent1, floatExponent2; npy_uint64 floatMantissa1, floatMantissa2; npy_uint32 floatSign1, floatSign2; @@ -2908,6 +2912,12 @@ Dragon4_PrintFloat_IBM_double_double( return 0; } + /* The high part always comes before the low part, regardless of the + * endianness of the system. */ + buf128.floatingPoint = *value; + val128.hi = buf128.integer.a; + val128.lo = buf128.integer.b; + /* deconstruct the floating point values */ floatMantissa1 = val128.hi & bitmask_u64(52); floatExponent1 = (val128.hi >> 52) & bitmask_u32(11); @@ -3052,39 +3062,6 @@ Dragon4_PrintFloat_IBM_double_double( signbit, mantissaBit, hasUnequalMargins, opt); } -#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) -static npy_uint32 -Dragon4_PrintFloat_IBM_double_double_le( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) -{ - FloatVal128 val128; - FloatUnion128 buf128; - - buf128.floatingPoint = *value; - val128.lo = buf128.integer.a; - val128.hi = buf128.integer.b; - - return Dragon4_PrintFloat_IBM_double_double(scratch, val128, opt); -} -#endif /* HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE */ - -#if defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) -static npy_uint32 -Dragon4_PrintFloat_IBM_double_double_be( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) -{ - FloatVal128 val128; - FloatUnion128 buf128; - - buf128.floatingPoint = *value; - val128.hi = buf128.integer.a; - val128.lo = buf128.integer.b; - - return Dragon4_PrintFloat_IBM_double_double(scratch, val128, opt); -} - -#endif /* HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE */ - #endif /* HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE | HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE */ #endif /* NPY_FLOAT128 */ diff --git a/numpy/core/src/multiarray/dragon4.h b/numpy/core/src/multiarray/dragon4.h index 383a0949d..2b8b4cef4 100644 --- a/numpy/core/src/multiarray/dragon4.h +++ b/numpy/core/src/multiarray/dragon4.h @@ -75,10 +75,9 @@ #define NPY_LONGDOUBLE_BINFMT_NAME Intel_extended128 #elif defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) #define NPY_LONGDOUBLE_BINFMT_NAME Motorola_extended96 -#elif defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) - #define NPY_LONGDOUBLE_BINFMT_NAME IBM_double_double_le -#elif defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE) - #define NPY_LONGDOUBLE_BINFMT_NAME IBM_double_double_be +#elif (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \ + defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE)) + #define NPY_LONGDOUBLE_BINFMT_NAME IBM_double_double #else #error No long double representation defined #endif diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index f78a748c0..fe19cc9ad 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -2116,7 +2116,7 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds if (DEPRECATE( "The binary mode of fromstring is deprecated, as it behaves " "surprisingly on unicode inputs. Use frombuffer instead") < 0) { - Py_DECREF(descr); + Py_XDECREF(descr); return NULL; } } diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 1ca298b30..0b02031a7 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1874,9 +1874,13 @@ NPY_NO_EXPORT void } else { BINARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; + @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; + in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; + if (npy_isnan(in1)) { + npy_set_floatstatus_invalid(); + } + *((@type@ *)op1) = in1; } } } diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 942554cae..b1b1e87c1 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -620,6 +620,10 @@ class TestDateTime(object): assert_equal(pickle.loads(pickle.dumps(dt)), dt) dt = np.dtype('M8[W]') assert_equal(pickle.loads(pickle.dumps(dt)), dt) + scalar = np.datetime64('2016-01-01T00:00:00.000000000') + assert_equal(pickle.loads(pickle.dumps(scalar)), scalar) + delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000') + assert_equal(pickle.loads(pickle.dumps(delta)), delta) # Check that loading pickles from 1.6 works pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \ diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py index 60a7c72f7..8dd42b21c 100644 --- a/numpy/core/tests/test_deprecations.py +++ b/numpy/core/tests/test_deprecations.py @@ -504,3 +504,9 @@ class TestGeneratorSum(_DeprecationTestCase): # 2018-02-25, 1.15.0 def test_generator_sum(self): self.assert_deprecated(np.sum, args=((i for i in range(5)),)) + + +class TestFromstring(_DeprecationTestCase): + # 2017-10-19, 1.14 + def test_fromstring(self): + self.assert_deprecated(np.fromstring, args=('\x00'*80,)) diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py index 27fbb10d5..31ef9d609 100644 --- a/numpy/core/tests/test_dtype.py +++ b/numpy/core/tests/test_dtype.py @@ -4,6 +4,7 @@ import pickle import sys import operator import pytest +import ctypes import numpy as np from numpy.core._rational_tests import rational @@ -728,3 +729,74 @@ def test_dtypes_are_true(): def test_invalid_dtype_string(): # test for gh-10440 assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]') + + +class TestFromCTypes(object): + + @staticmethod + def check(ctype, dtype): + dtype = np.dtype(dtype) + assert_equal(np.dtype(ctype), dtype) + assert_equal(np.dtype(ctype()), dtype) + + def test_array(self): + c8 = ctypes.c_uint8 + self.check( 3 * c8, (np.uint8, (3,))) + self.check( 1 * c8, (np.uint8, (1,))) + self.check( 0 * c8, (np.uint8, (0,))) + self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,))) + self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,))) + + def test_padded_structure(self): + class PaddedStruct(ctypes.Structure): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', np.uint8), + ('b', np.uint16) + ], align=True) + self.check(PaddedStruct, expected) + + @pytest.mark.xfail(reason="_pack_ is ignored - see gh-11651") + def test_packed_structure(self): + class PackedStructure(ctypes.Structure): + _pack_ = 1 + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', np.uint8), + ('b', np.uint16) + ]) + self.check(PackedStructure, expected) + + @pytest.mark.xfail(sys.byteorder != 'little', + reason="non-native endianness does not work - see gh-10533") + def test_little_endian_structure(self): + class PaddedStruct(ctypes.LittleEndianStructure): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', '<B'), + ('b', '<H') + ], align=True) + self.check(PaddedStruct, expected) + + @pytest.mark.xfail(sys.byteorder != 'big', + reason="non-native endianness does not work - see gh-10533") + def test_big_endian_structure(self): + class PaddedStruct(ctypes.BigEndianStructure): + _fields_ = [ + ('a', ctypes.c_uint8), + ('b', ctypes.c_uint16) + ] + expected = np.dtype([ + ('a', '>B'), + ('b', '>H') + ], align=True) + self.check(PaddedStruct, expected) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 37d73e42c..4cc38a9b5 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -4707,55 +4707,72 @@ class TestRecord(object): # Error raised when multiple fields have the same name assert_raises(ValueError, test_assign) - if sys.version_info[0] >= 3: - def test_bytes_fields(self): - # Bytes are not allowed in field names and not recognized in titles - # on Py3 - assert_raises(TypeError, np.dtype, [(b'a', int)]) - assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) - - dt = np.dtype([((b'a', 'b'), int)]) - assert_raises(TypeError, dt.__getitem__, b'a') - - x = np.array([(1,), (2,), (3,)], dtype=dt) - assert_raises(IndexError, x.__getitem__, b'a') - - y = x[0] - assert_raises(IndexError, y.__getitem__, b'a') - - def test_multiple_field_name_unicode(self): - def test_assign_unicode(): - dt = np.dtype([("\u20B9", "f8"), - ("B", "f8"), - ("\u20B9", "f8")]) - - # Error raised when multiple fields have the same name(unicode included) - assert_raises(ValueError, test_assign_unicode) - - else: - def test_unicode_field_titles(self): - # Unicode field titles are added to field dict on Py2 - title = u'b' - dt = np.dtype([((title, 'a'), int)]) - dt[title] - dt['a'] - x = np.array([(1,), (2,), (3,)], dtype=dt) - x[title] - x['a'] - y = x[0] - y[title] - y['a'] - - def test_unicode_field_names(self): - # Unicode field names are converted to ascii on Python 2: - encodable_name = u'b' - assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') - assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') - - # But raises UnicodeEncodeError if it can't be encoded: - nonencodable_name = u'\uc3bc' - assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) - assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") + def test_bytes_fields(self): + # Bytes are not allowed in field names and not recognized in titles + # on Py3 + assert_raises(TypeError, np.dtype, [(b'a', int)]) + assert_raises(TypeError, np.dtype, [(('b', b'a'), int)]) + + dt = np.dtype([((b'a', 'b'), int)]) + assert_raises(TypeError, dt.__getitem__, b'a') + + x = np.array([(1,), (2,), (3,)], dtype=dt) + assert_raises(IndexError, x.__getitem__, b'a') + + y = x[0] + assert_raises(IndexError, y.__getitem__, b'a') + + @pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3") + def test_multiple_field_name_unicode(self): + def test_assign_unicode(): + dt = np.dtype([("\u20B9", "f8"), + ("B", "f8"), + ("\u20B9", "f8")]) + + # Error raised when multiple fields have the same name(unicode included) + assert_raises(ValueError, test_assign_unicode) + + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") + def test_unicode_field_titles(self): + # Unicode field titles are added to field dict on Py2 + title = u'b' + dt = np.dtype([((title, 'a'), int)]) + dt[title] + dt['a'] + x = np.array([(1,), (2,), (3,)], dtype=dt) + x[title] + x['a'] + y = x[0] + y[title] + y['a'] + + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2") + def test_unicode_field_names(self): + # Unicode field names are converted to ascii on Python 2: + encodable_name = u'b' + assert_equal(np.dtype([(encodable_name, int)]).names[0], b'b') + assert_equal(np.dtype([(('a', encodable_name), int)]).names[0], b'b') + + # But raises UnicodeEncodeError if it can't be encoded: + nonencodable_name = u'\uc3bc' + assert_raises(UnicodeEncodeError, np.dtype, [(nonencodable_name, int)]) + assert_raises(UnicodeEncodeError, np.dtype, [(('a', nonencodable_name), int)]) + + def test_fromarrays_unicode(self): + # A single name string provided to fromarrays() is allowed to be unicode + # on both Python 2 and 3: + x = np.core.records.fromarrays([[0], [1]], names=u'a,b', formats=u'i4,i4') + assert_equal(x['a'][0], 0) + assert_equal(x['b'][0], 1) + + def test_unicode_order(self): + # Test that we can sort with order as a unicode field name in both Python 2 and + # 3: + name = u'b' + x = np.array([1, 3, 2], dtype=[(name, int)]) + x.sort(order=name) + assert_equal(x[u'b'], np.array([1, 2, 3])) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used @@ -6429,6 +6446,15 @@ class TestNewBufferProtocol(object): # Issue #4015. self._check_roundtrip(0) + def test_invalid_buffer_format(self): + # datetime64 cannot be used fully in a buffer yet + # Should be fixed in the next Numpy major release + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(3, dt) + assert_raises((ValueError, BufferError), memoryview, a) + assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]')) + + def test_export_simple_1d(self): x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 62f592524..51fe13f5d 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2366,6 +2366,13 @@ class TestRegression(object): del va assert_equal(x, b'\x00\x00\x00\x00') + def test_void_getitem(self): + # Test fix for gh-11668. + assert_(np.array([b'a'], 'V1').astype('O') == b'a') + assert_(np.array([b'ab'], 'V2').astype('O') == b'ab') + assert_(np.array([b'abc'], 'V3').astype('O') == b'abc') + assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd') + def test_structarray_title(self): # The following used to segfault on pypy, due to NPY_TITLE_KEY # not working properly and resulting to double-decref of the diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py index 6d57a5014..cb6c521e1 100644 --- a/numpy/core/tests/test_scalarbuffer.py +++ b/numpy/core/tests/test_scalarbuffer.py @@ -5,7 +5,7 @@ import sys import numpy as np import pytest -from numpy.testing import assert_, assert_equal +from numpy.testing import assert_, assert_equal, assert_raises # PEP3118 format strings for native (standard alignment and byteorder) types scalars_and_codes = [ @@ -77,3 +77,28 @@ class TestScalarPEP3118(object): mv_a = memoryview(a) assert_equal(mv_x.itemsize, mv_a.itemsize) assert_equal(mv_x.format, mv_a.format) + + def test_datetime_memoryview(self): + # gh-11656 + # Values verified with v1.13.3, shape is not () as in test_scalar_dim + def as_dict(m): + return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize, + ndim=m.ndim, format=m.format) + + dt1 = np.datetime64('2016-01-01') + dt2 = np.datetime64('2017-01-01') + expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, + 'shape': (8,), 'format': 'B'} + v = memoryview(dt1) + res = as_dict(v) + assert_equal(res, expected) + + v = memoryview(dt2 - dt1) + res = as_dict(v) + assert_equal(res, expected) + + dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')]) + a = np.empty(1, dt) + # Fails to create a PEP 3118 valid buffer + assert_raises((ValueError, BufferError), memoryview, a[0]) + diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py index 4772913be..29850108d 100644 --- a/numpy/core/tests/test_umath.py +++ b/numpy/core/tests/test_umath.py @@ -14,7 +14,7 @@ from numpy.testing import ( assert_, assert_equal, assert_raises, assert_raises_regex, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_allclose, assert_no_warnings, suppress_warnings, - _gen_alignment_data, + _gen_alignment_data, assert_warns ) @@ -1339,6 +1339,10 @@ class TestMinMax(object): assert_equal(np.min(r), np.nan) assert_equal(len(sup.log), n) + def test_minimize_warns(self): + # gh 11589 + assert_warns(RuntimeWarning, np.minimum, np.nan, 1) + class TestAbsoluteNegative(object): def test_abs_neg_blocked(self): diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 78802ef07..2510e18a0 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -2392,7 +2392,7 @@ def _selected_real_kind_func(p, r=0, radix=0): if p < 16: return 8 machine = platform.machine().lower() - if machine.startswith('power') or machine.startswith('ppc64'): + if machine.startswith(('aarch64', 'power', 'ppc64', 's390x')): if p <= 20: return 16 else: diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 6f1295f09..ab00b1444 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -37,6 +37,7 @@ from __future__ import division, absolute_import, print_function import os import sys +import warnings import shutil import io @@ -85,9 +86,10 @@ def _python2_bz2open(fn, mode, encoding, newline): if "t" in mode: # BZ2File is missing necessary functions for TextIOWrapper - raise ValueError("bz2 text files not supported in python2") - else: - return bz2.BZ2File(fn, mode) + warnings.warn("Assuming latin1 encoding for bz2 text file in Python2", + RuntimeWarning, stacklevel=5) + mode = mode.replace("t", "") + return bz2.BZ2File(fn, mode) def _python2_gzipopen(fn, mode, encoding, newline): """ Wrapper to open gzip in text mode. diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index 4d3f35183..c1f9f5bea 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -312,12 +312,12 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. return_indices : bool - If True, the indices which correspond to the intersection of the - two arrays are returned. The first instance of a value is used - if there are multiple. Default is False. - - .. versionadded:: 1.15.0 - + If True, the indices which correspond to the intersection of the two + arrays are returned. The first instance of a value is used if there are + multiple. Default is False. + + .. versionadded:: 1.15.0 + Returns ------- intersect1d : ndarray @@ -326,7 +326,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): The indices of the first occurrences of the common values in `ar1`. Only provided if `return_indices` is True. comm2 : ndarray - The indices of the first occurrences of the common values in `ar2`. + The indices of the first occurrences of the common values in `ar2`. Only provided if `return_indices` is True. @@ -345,7 +345,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): >>> from functools import reduce >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2])) array([3]) - + To return the indices of the values common to the input arrays along with the intersected values: >>> x = np.array([1, 1, 2, 3, 4]) @@ -355,8 +355,11 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): (array([0, 2, 4]), array([1, 0, 2])) >>> xy, x[x_ind], y[y_ind] (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4])) - + """ + ar1 = np.asanyarray(ar1) + ar2 = np.asanyarray(ar2) + if not assume_unique: if return_indices: ar1, ind1 = unique(ar1, return_index=True) @@ -367,7 +370,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): else: ar1 = ar1.ravel() ar2 = ar2.ravel() - + aux = np.concatenate((ar1, ar2)) if return_indices: aux_sort_indices = np.argsort(aux, kind='mergesort') @@ -389,6 +392,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): else: return int1d + def setxor1d(ar1, ar2, assume_unique=False): """ Find the set exclusive-or of two arrays. diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 03399c561..47ee92928 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -260,6 +260,32 @@ def _get_outer_edges(a, range): return first_edge, last_edge +def _unsigned_subtract(a, b): + """ + Subtract two values where a >= b, and produce an unsigned result + + This is needed when finding the difference between the upper and lower + bound of an int16 histogram + """ + # coerce to a single type + signed_to_unsigned = { + np.byte: np.ubyte, + np.short: np.ushort, + np.intc: np.uintc, + np.int_: np.uint, + np.longlong: np.ulonglong + } + dt = np.result_type(a, b) + try: + dt = signed_to_unsigned[dt.type] + except KeyError: + return np.subtract(a, b, dtype=dt) + else: + # we know the inputs are integers, and we are deliberately casting + # signed to unsigned + return np.subtract(a, b, casting='unsafe', dtype=dt) + + def _get_bin_edges(a, bins, range, weights): """ Computes the bins used internally by `histogram`. @@ -311,7 +337,7 @@ def _get_bin_edges(a, bins, range, weights): # Do not call selectors on empty arrays width = _hist_bin_selectors[bin_name](a) if width: - n_equal_bins = int(np.ceil((last_edge - first_edge) / width)) + n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) else: # Width can be zero for some estimators, e.g. FD when # the IQR of the data is zero. @@ -703,7 +729,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, n = np.zeros(n_equal_bins, ntype) # Pre-compute histogram scaling factor - norm = n_equal_bins / (last_edge - first_edge) + norm = n_equal_bins / _unsigned_subtract(last_edge, first_edge) # We iterate over blocks here for two reasons: the first is that for # large arrays, it is actually faster (for example for a 10^8 array it @@ -731,7 +757,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, # Compute the bin indices, and for values that lie exactly on # last_edge we need to subtract one - f_indices = (tmp_a - first_edge) * norm + f_indices = _unsigned_subtract(tmp_a, first_edge) * norm indices = f_indices.astype(np.intp) indices[indices == n_equal_bins] -= 1 diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py index 32812990c..85788941c 100644 --- a/numpy/lib/tests/test__datasource.py +++ b/numpy/lib/tests/test__datasource.py @@ -2,11 +2,14 @@ from __future__ import division, absolute_import, print_function import os import sys +import pytest from tempfile import mkdtemp, mkstemp, NamedTemporaryFile from shutil import rmtree -from numpy.testing import assert_, assert_equal, assert_raises, SkipTest import numpy.lib._datasource as datasource +from numpy.testing import ( + assert_, assert_equal, assert_raises, assert_warns, SkipTest + ) if sys.version_info[0] >= 3: import urllib.request as urllib_request @@ -30,14 +33,14 @@ def urlopen_stub(url, data=None): old_urlopen = None -def setup(): +def setup_module(): global old_urlopen old_urlopen = urllib_request.urlopen urllib_request.urlopen = urlopen_stub -def teardown(): +def teardown_module(): urllib_request.urlopen = old_urlopen # A valid website for more robust testing @@ -161,6 +164,24 @@ class TestDataSourceOpen(object): fp.close() assert_equal(magic_line, result) + @pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only") + def test_Bz2File_text_mode_warning(self): + try: + import bz2 + except ImportError: + # We don't have the bz2 capabilities to test. + raise SkipTest + # Test datasource's internal file_opener for BZip2 files. + filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2') + fp = bz2.BZ2File(filepath, 'w') + fp.write(magic_line) + fp.close() + with assert_warns(RuntimeWarning): + fp = self.ds.open(filepath, 'rt') + result = fp.readline() + fp.close() + assert_equal(magic_line, result) + class TestDataSourceExists(object): def setup(self): diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index dace5ade8..c76afb8e5 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -30,19 +30,30 @@ class TestSetOps(object): ed = np.array([1, 2, 5]) c = intersect1d(a, b) assert_array_equal(c, ed) - assert_array_equal([], intersect1d([], [])) - + + def test_intersect1d_array_like(self): + # See gh-11772 + class Test(object): + def __array__(self): + return np.arange(3) + + a = Test() + res = intersect1d(a, a) + assert_array_equal(res, a) + res = intersect1d([1, 2, 3], [1, 2, 3]) + assert_array_equal(res, [1, 2, 3]) + def test_intersect1d_indices(self): # unique inputs - a = np.array([1, 2, 3, 4]) + a = np.array([1, 2, 3, 4]) b = np.array([2, 1, 4, 6]) c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True) ee = np.array([1, 2, 4]) assert_array_equal(c, ee) assert_array_equal(a[i1], ee) assert_array_equal(b[i2], ee) - + # non-unique inputs a = np.array([1, 2, 2, 3, 4, 3, 2]) b = np.array([1, 8, 4, 2, 2, 3, 2, 3]) @@ -51,7 +62,7 @@ class TestSetOps(object): assert_array_equal(c, ef) assert_array_equal(a[i1], ef) assert_array_equal(b[i2], ef) - + # non1d, unique inputs a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) @@ -61,7 +72,7 @@ class TestSetOps(object): ea = np.array([2, 6, 7, 8]) assert_array_equal(ea, a[ui1]) assert_array_equal(ea, b[ui2]) - + # non1d, not assumed to be uniqueinputs a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) @@ -71,7 +82,7 @@ class TestSetOps(object): ea = np.array([2, 7, 8]) assert_array_equal(ea, a[ui1]) assert_array_equal(ea, b[ui2]) - + def test_setxor1d(self): a = np.array([5, 7, 1, 2]) b = np.array([2, 4, 3, 1, 5]) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index f136b5c81..561f5f938 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -310,6 +310,20 @@ class TestHistogram(object): assert_equal(d_edge.dtype, dates.dtype) assert_equal(t_edge.dtype, td) + def do_signed_overflow_bounds(self, dtype): + exponent = 8 * np.dtype(dtype).itemsize - 1 + arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype) + hist, e = histogram(arr, bins=2) + assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4]) + assert_equal(hist, [1, 1]) + + def test_signed_overflow_bounds(self): + self.do_signed_overflow_bounds(np.byte) + self.do_signed_overflow_bounds(np.short) + self.do_signed_overflow_bounds(np.intc) + self.do_signed_overflow_bounds(np.int_) + self.do_signed_overflow_bounds(np.longlong) + def do_precision_lower_bound(self, float_small, float_large): eps = np.finfo(float_large).eps diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 98af0733b..5aad6c006 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -542,6 +542,8 @@ def matrix_power(a, n): of the same shape as M is returned. If ``n < 0``, the inverse is computed and then raised to the ``abs(n)``. + .. note:: Stacks of object matrices are not currently supported. + Parameters ---------- a : (..., M, M) array_like @@ -604,6 +606,16 @@ def matrix_power(a, n): except TypeError: raise TypeError("exponent must be an integer") + # Fall back on dot for object arrays. Object arrays are not supported by + # the current implementation of matmul using einsum + if a.dtype != object: + fmatmul = matmul + elif a.ndim == 2: + fmatmul = dot + else: + raise NotImplementedError( + "matrix_power not supported for stacks of object arrays") + if n == 0: a = empty_like(a) a[...] = eye(a.shape[-2], dtype=a.dtype) @@ -618,20 +630,20 @@ def matrix_power(a, n): return a elif n == 2: - return matmul(a, a) + return fmatmul(a, a) elif n == 3: - return matmul(matmul(a, a), a) + return fmatmul(fmatmul(a, a), a) # Use binary decomposition to reduce the number of matrix multiplications. # Here, we iterate over the bits of n, from LSB to MSB, raise `a` to # increasing powers of 2, and multiply into the result as needed. z = result = None while n > 0: - z = a if z is None else matmul(z, z) + z = a if z is None else fmatmul(z, z) n, bit = divmod(n, 2) if bit: - result = z if result is None else matmul(result, z) + result = z if result is None else fmatmul(result, z) return result diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 87dfe988a..07c7813c9 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -924,77 +924,94 @@ class TestLstsq(LstsqCases): assert_(len(w) == 1) +@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO']) class TestMatrixPower(object): - R90 = array([[0, 1], [-1, 0]]) - Arb22 = array([[4, -7], [-2, 10]]) + + rshft_0 = np.eye(4) + rshft_1 = rshft_0[[3, 0, 1, 2]] + rshft_2 = rshft_0[[2, 3, 0, 1]] + rshft_3 = rshft_0[[1, 2, 3, 0]] + rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] noninv = array([[1, 0], [0, 0]]) - arbfloat = array([[[0.1, 3.2], [1.2, 0.7]], - [[0.2, 6.4], [2.4, 1.4]]]) + stacked = np.block([[[rshft_0]]]*2) + #FIXME the 'e' dtype might work in future + dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] - large = identity(10) - t = large[1, :].copy() - large[1, :] = large[0, :] - large[0, :] = t - def test_large_power(self): + def test_large_power(self, dt): + power = matrix_power + rshft = self.rshft_1.astype(dt) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90) + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 1), self.R90) + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 + 1), -self.R90) - - def test_large_power_trailing_zero(self): + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2) assert_equal( - matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5), identity(2)) + matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3) - def testip_zero(self): + def test_power_is_zero(self, dt): def tz(M): mz = matrix_power(M, 0) assert_equal(mz, identity_like_generalized(M)) assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - tz(M) - - def testip_one(self): - def tz(M): - mz = matrix_power(M, 1) - assert_equal(mz, M) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - tz(M) - - def testip_two(self): - def tz(M): - mz = matrix_power(M, 2) - assert_equal(mz, matmul(M, M)) - assert_equal(mz.dtype, M.dtype) - for M in [self.Arb22, self.arbfloat, self.large]: - tz(M) - - def testip_invert(self): - def tz(M): - mz = matrix_power(M, -1) - assert_almost_equal(matmul(mz, M), identity_like_generalized(M)) - for M in [self.R90, self.Arb22, self.arbfloat, self.large]: - tz(M) - - def test_invert_noninvertible(self): - assert_raises(LinAlgError, matrix_power, self.noninv, -1) - - def test_invalid(self): - assert_raises(TypeError, matrix_power, self.R90, 1.5) - assert_raises(TypeError, matrix_power, self.R90, [1]) - assert_raises(LinAlgError, matrix_power, np.array([1]), 1) - assert_raises(LinAlgError, matrix_power, np.array([[1], [2]]), 1) - assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2)), 1) - - -class TestBoolPower(object): + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_one(self, dt): + def tz(mat): + mz = matrix_power(mat, 1) + assert_equal(mz, mat) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_two(self, dt): + def tz(mat): + mz = matrix_power(mat, 2) + mmul = matmul if mat.dtype != object else dot + assert_equal(mz, mmul(mat, mat)) + assert_equal(mz.dtype, mat.dtype) + + for mat in self.rshft_all: + tz(mat.astype(dt)) + if dt != object: + tz(self.stacked.astype(dt)) + + def test_power_is_minus_one(self, dt): + def tz(mat): + invmat = matrix_power(mat, -1) + mmul = matmul if mat.dtype != object else dot + assert_almost_equal( + mmul(invmat, mat), identity_like_generalized(mat)) + + for mat in self.rshft_all: + if dt not in self.dtnoinv: + tz(mat.astype(dt)) + + def test_exceptions_bad_power(self, dt): + mat = self.rshft_0.astype(dt) + assert_raises(TypeError, matrix_power, mat, 1.5) + assert_raises(TypeError, matrix_power, mat, [1]) + + + def test_exceptions_non_square(self, dt): + assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1) + assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1) + assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1) + + def test_exceptions_not_invertible(self, dt): + if dt in self.dtnoinv: + return + mat = self.noninv.astype(dt) + assert_raises(LinAlgError, matrix_power, mat, -1) - def test_square(self): - A = array([[True, False], [True, True]]) - assert_equal(matrix_power(A, 2), A) class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase): diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 67a9186a8..24657bd1e 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -3174,18 +3174,13 @@ class TestMaskedArrayMethods(object): assert_equal(test.mask, mask_first.mask) # Test sort on dtype with subarray (gh-8069) + # Just check that the sort does not error, structured array subarrays + # are treated as byte strings and that leads to differing behavior + # depending on endianess and `endwith`. dt = np.dtype([('v', int, 2)]) a = a.view(dt) - mask_last = mask_last.view(dt) - mask_first = mask_first.view(dt) - test = sort(a) - assert_equal(test, mask_last) - assert_equal(test.mask, mask_last.mask) - test = sort(a, endwith=False) - assert_equal(test, mask_first) - assert_equal(test.mask, mask_first.mask) def test_argsort(self): # Test argsort diff --git a/numpy/random/setup.py b/numpy/random/setup.py index 3f3b773a4..a8d82b141 100644 --- a/numpy/random/setup.py +++ b/numpy/random/setup.py @@ -39,6 +39,9 @@ def configuration(parent_package='',top_path=None): ('_LARGEFILE64_SOURCE', '1')] if needs_mingw_ftime_workaround(): defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None)) + # fix for 0.26 < cython < 0.29 and perhaps 0.28.5 + # see https://github.com/cython/cython/issues/2494 + defs.append(('CYTHON_SMALL_CODE', '')) libs = [] # Configure mtrand diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 032c4a116..e501b2be6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -687,6 +687,8 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, equal_inf=True): __tracebackhide__ = True # Hide traceback for py.test from numpy.core import array, isnan, inf, bool_ + from numpy.core.fromnumeric import all as npall + x = array(x, copy=False, subok=True) y = array(y, copy=False, subok=True) @@ -697,14 +699,21 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, return x.dtype.char in "Mm" def func_assert_same_pos(x, y, func=isnan, hasval='nan'): - """Handling nan/inf: combine results of running func on x and y, - checking that they are True at the same locations.""" - # Both the != True comparison here and the cast to bool_ at - # the end are done to deal with `masked`, which cannot be - # compared usefully, and for which .all() yields masked. + """Handling nan/inf. + + Combine results of running func on x and y, checking that they are True + at the same locations. + + """ + # Both the != True comparison here and the cast to bool_ at the end are + # done to deal with `masked`, which cannot be compared usefully, and + # for which np.all yields masked. The use of the function np.all is + # for back compatibility with ndarray subclasses that changed the + # return values of the all method. We are not committed to supporting + # such subclasses, but some used to work. x_id = func(x) y_id = func(y) - if (x_id == y_id).all() != True: + if npall(x_id == y_id) != True: msg = build_err_msg([x, y], err_msg + '\nx and y %s location mismatch:' % (hasval), verbose=verbose, header=header, |