summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:36:04 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:36:04 +0900
commit968808213079cc6eafba13a6d6b253cebe022672 (patch)
treea942412a1014708aa27372b896117af4185e7d08
parentfc640bff22bac42b2b132b5ef68c803ee89d0535 (diff)
downloadpython-numpy-968808213079cc6eafba13a6d6b253cebe022672.tar.gz
python-numpy-968808213079cc6eafba13a6d6b253cebe022672.tar.bz2
python-numpy-968808213079cc6eafba13a6d6b253cebe022672.zip
Imported Upstream version 1.16.3upstream/1.16.3
-rw-r--r--.mailmap1
-rw-r--r--MANIFEST.in7
-rw-r--r--doc/changelog/1.16.3-changelog.rst55
-rw-r--r--doc/release/1.16.1-notes.rst4
-rw-r--r--doc/release/1.16.3-notes.rst46
-rw-r--r--doc/source/reference/routines.ctypeslib.rst1
-rw-r--r--numpy/_build_utils/src/apple_sgemv_fix.c36
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py21
-rw-r--r--numpy/core/function_base.py4
-rw-r--r--numpy/core/setup.py4
-rw-r--r--numpy/core/src/multiarray/common.c2
-rw-r--r--numpy/core/src/multiarray/ctors.c2
-rw-r--r--numpy/core/src/multiarray/datetime.c9
-rw-r--r--numpy/core/src/multiarray/descriptor.c84
-rw-r--r--numpy/core/src/multiarray/iterators.c50
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c8
-rw-r--r--numpy/core/src/multiarray/typeinfo.c47
-rw-r--r--numpy/core/src/multiarray/typeinfo.h12
-rw-r--r--numpy/core/src/umath/ufunc_object.c2
-rw-r--r--numpy/core/tests/test_datetime.py6
-rw-r--r--numpy/core/tests/test_dtype.py6
-rw-r--r--numpy/core/tests/test_function_base.py6
-rw-r--r--numpy/core/tests/test_multiarray.py32
-rw-r--r--numpy/core/tests/test_regression.py8
-rw-r--r--numpy/core/tests/test_scalarmath.py2
-rw-r--r--numpy/ctypeslib.py12
-rw-r--r--numpy/distutils/ccompiler.py60
-rw-r--r--numpy/distutils/fcompiler/environment.py12
-rw-r--r--numpy/distutils/misc_util.py55
-rwxr-xr-xnumpy/f2py/crackfortran.py2
-rw-r--r--numpy/f2py/src/fortranobject.c2
-rw-r--r--numpy/lib/format.py36
-rw-r--r--numpy/lib/npyio.py19
-rw-r--r--numpy/lib/polynomial.py10
-rw-r--r--numpy/lib/tests/test_format.py15
-rw-r--r--numpy/lib/tests/test_io.py14
-rw-r--r--numpy/lib/tests/test_polynomial.py18
-rw-r--r--numpy/random/mtrand/mtrand.pyx6
-rw-r--r--numpy/random/tests/test_random.py10
-rw-r--r--pavement.py2
-rwxr-xr-xsetup.py2
41 files changed, 474 insertions, 256 deletions
diff --git a/.mailmap b/.mailmap
index a5b6e04de..6cf0b9f4c 100644
--- a/.mailmap
+++ b/.mailmap
@@ -44,6 +44,7 @@ Benjamin Root <ben.v.root@gmail.com> Ben Root <ben.v.root@gmail.com>
Benjamin Root <ben.v.root@gmail.com> weathergod <?@?>
Bertrand Lefebvre <bertrand.l3f@gmail.com> bertrand <bertrand.l3f@gmail.com>
Bertrand Lefebvre <bertrand.l3f@gmail.com> Bertrand <bertrand.l3f@gmail.com>
+Bharat Raghunathan <bharatr@symphonyai.com> Bharat123Rox <bharatr@symphonyai.com>
Bob Eldering <eldering@jive.eu> bobeldering <eldering@jive.eu>
Brett R Murphy <bmurphy@enthought.com> brettrmurphy <bmurphy@enthought.com>
Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@Laptop-3.local>
diff --git a/MANIFEST.in b/MANIFEST.in
index e15e0e58a..647e2f704 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -11,15 +11,18 @@ include site.cfg.example
include numpy/random/mtrand/generate_mtrand_c.py
recursive-include numpy/random/mtrand *.pyx *.pxd
# Add build support that should go in sdist, but not go in bdist/be installed
+# Note that sub-directories that don't have __init__ are apparently not
+# included by 'recursive-include', so list those separately
+recursive-include numpy *
recursive-include numpy/_build_utils *
-recursive-include numpy/linalg/lapack_lite *.c *.h
+recursive-include numpy/linalg/lapack_lite *
include runtests.py
include tox.ini pytest.ini .coveragerc
recursive-include tools *
# Add sdist files whose use depends on local configuration.
include numpy/core/src/common/cblasfuncs.c
include numpy/core/src/common/python_xerbla.c
-# Adding scons build related files not found by distutils
+# Adding build related files not found by distutils
recursive-include numpy/core/code_generators *.py *.txt
recursive-include numpy/core *.in *.h
# Add documentation and benchmarks: we don't use add_data_dir since we do not
diff --git a/doc/changelog/1.16.3-changelog.rst b/doc/changelog/1.16.3-changelog.rst
new file mode 100644
index 000000000..96291c0ae
--- /dev/null
+++ b/doc/changelog/1.16.3-changelog.rst
@@ -0,0 +1,55 @@
+
+Contributors
+============
+
+A total of 16 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Andreas Schwab
+* Bharat Raghunathan +
+* Bran +
+* Charles Harris
+* Eric Wieser
+* Jakub Wilk
+* Kevin Sheppard
+* Marten van Kerkwijk
+* Matti Picus
+* Paul Ivanov
+* Ralf Gommers
+* Sebastian Berg
+* Tyler Reddy
+* Warren Weckesser
+* Yu Feng
+* adeak +
+
+Pull requests merged
+====================
+
+A total of 26 pull requests were merged for this release.
+
+* `#13072 <https://github.com/numpy/numpy/pull/13072>`__: BUG: Fixes to numpy.distutils.Configuration.get_version (#13056)
+* `#13082 <https://github.com/numpy/numpy/pull/13082>`__: BUG: Fix errors in string formatting while producing an error
+* `#13083 <https://github.com/numpy/numpy/pull/13083>`__: BUG: Convert fortran flags in environment variable
+* `#13084 <https://github.com/numpy/numpy/pull/13084>`__: BUG: Remove error-prone borrowed reference handling
+* `#13085 <https://github.com/numpy/numpy/pull/13085>`__: BUG: Add error checks when converting integers to datetime types
+* `#13091 <https://github.com/numpy/numpy/pull/13091>`__: BUG: Remove our patched version of `distutils.split_quoted`
+* `#13141 <https://github.com/numpy/numpy/pull/13141>`__: BUG: Fix testsuite failures on ppc and riscv
+* `#13142 <https://github.com/numpy/numpy/pull/13142>`__: BUG: Fix parameter validity checks in ``random.choice``
+* `#13143 <https://github.com/numpy/numpy/pull/13143>`__: BUG: Ensure linspace works on object input.
+* `#13144 <https://github.com/numpy/numpy/pull/13144>`__: BLD: fix include list for sdist building.
+* `#13145 <https://github.com/numpy/numpy/pull/13145>`__: BUG: __array_interface__ offset was always ignored
+* `#13274 <https://github.com/numpy/numpy/pull/13274>`__: MAINT: f2py: Add a cast to avoid a compiler warning.
+* `#13275 <https://github.com/numpy/numpy/pull/13275>`__: BUG, MAINT: fix reference count error on invalid input to ndarray.flat
+* `#13276 <https://github.com/numpy/numpy/pull/13276>`__: ENH: Cast covariance to double in random mvnormal
+* `#13278 <https://github.com/numpy/numpy/pull/13278>`__: BUG: Fix null pointer dereference in PyArray_DTypeFromObjectHelper
+* `#13339 <https://github.com/numpy/numpy/pull/13339>`__: BUG: Use C call to sysctlbyname for AVX detection on MacOS.
+* `#13340 <https://github.com/numpy/numpy/pull/13340>`__: BUG: Fix crash when calling savetxt on a padded array
+* `#13341 <https://github.com/numpy/numpy/pull/13341>`__: BUG: ufunc.at iteration variable size fix
+* `#13342 <https://github.com/numpy/numpy/pull/13342>`__: DOC: Add as_ctypes_type to the documentation
+* `#13350 <https://github.com/numpy/numpy/pull/13350>`__: BUG: Return the coefficients array directly
+* `#13351 <https://github.com/numpy/numpy/pull/13351>`__: BUG/MAINT: Tidy typeinfo.h and .c
+* `#13359 <https://github.com/numpy/numpy/pull/13359>`__: BUG: Make allow_pickle=False the default for loading
+* `#13360 <https://github.com/numpy/numpy/pull/13360>`__: DOC: fix some doctest failures
+* `#13363 <https://github.com/numpy/numpy/pull/13363>`__: BUG/MAINT: Tidy typeinfo.h and .c
+* `#13381 <https://github.com/numpy/numpy/pull/13381>`__: BLD: address mingw-w64 issue. Follow-up to gh-9977
+* `#13382 <https://github.com/numpy/numpy/pull/13382>`__: REL: Prepare for the NumPy release.
diff --git a/doc/release/1.16.1-notes.rst b/doc/release/1.16.1-notes.rst
index 2483b1834..2a190ef91 100644
--- a/doc/release/1.16.1-notes.rst
+++ b/doc/release/1.16.1-notes.rst
@@ -80,9 +80,9 @@ Improvements
Further improvements to ``ctypes`` support in ``np.ctypeslib``
--------------------------------------------------------------
-A new ``np.ctypeslib.as_ctypes_type`` function has been added, which can be
+A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
-new function, ``np.ctypeslib.as_ctypes`` now supports a much wider range of
+new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
array types, including structures, booleans, and integers of non-native
endianness.
diff --git a/doc/release/1.16.3-notes.rst b/doc/release/1.16.3-notes.rst
new file mode 100644
index 000000000..181a7264d
--- /dev/null
+++ b/doc/release/1.16.3-notes.rst
@@ -0,0 +1,46 @@
+==========================
+NumPy 1.16.3 Release Notes
+==========================
+
+The NumPy 1.16.3 release fixes bugs reported against the 1.16.2 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29.2 and,
+if using OpenBLAS, OpenBLAS > v0.3.4.
+
+The most noticeable change in this release is that unpickling object arrays
+when loading ``*.npy`` or ``*.npz`` files now requires an explicit opt-in.
+This backwards incompatible change was made in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+Compatibility notes
+===================
+
+Unpickling while loading requires explicit opt-in
+-------------------------------------------------
+The functions ``np.load``, and ``np.lib.format.read_array`` take an
+`allow_pickle` keyword which now defaults to ``False`` in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+Improvements
+============
+
+Covariance in `random.mvnormal` cast to double
+----------------------------------------------
+This should make the tolerance used when checking the singular values of the
+covariance matrix more meaningful.
+
+
+Changes
+=======
+
+``__array_interface__`` offset now works as documented
+------------------------------------------------------
+The interface may use an ``offset`` value that was previously mistakenly
+ignored.
+
diff --git a/doc/source/reference/routines.ctypeslib.rst b/doc/source/reference/routines.ctypeslib.rst
index 71b944a63..562638e9c 100644
--- a/doc/source/reference/routines.ctypeslib.rst
+++ b/doc/source/reference/routines.ctypeslib.rst
@@ -8,6 +8,7 @@ C-Types Foreign Function Interface (:mod:`numpy.ctypeslib`)
.. autofunction:: as_array
.. autofunction:: as_ctypes
+.. autofunction:: as_ctypes_type
.. autofunction:: ctypes_load_library
.. autofunction:: load_library
.. autofunction:: ndpointer
diff --git a/numpy/_build_utils/src/apple_sgemv_fix.c b/numpy/_build_utils/src/apple_sgemv_fix.c
index 4c9c82ece..c33c68992 100644
--- a/numpy/_build_utils/src/apple_sgemv_fix.c
+++ b/numpy/_build_utils/src/apple_sgemv_fix.c
@@ -29,6 +29,9 @@
#include <dlfcn.h>
#include <stdlib.h>
#include <stdio.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <string.h>
/* ----------------------------------------------------------------- */
/* Original cblas_sgemv */
@@ -66,12 +69,35 @@ static int AVX_and_10_9 = 0;
/* Dynamic check for AVX support
* __builtin_cpu_supports("avx") is available in gcc 4.8,
* but clang and icc do not currently support it. */
-#define cpu_supports_avx()\
-(system("sysctl -n machdep.cpu.features | grep -q AVX") == 0)
-
+static inline int
+cpu_supports_avx()
+{
+ int enabled, r;
+ size_t length = sizeof(enabled);
+ r = sysctlbyname("hw.optional.avx1_0", &enabled, &length, NULL, 0);
+ if ( r == 0 && enabled != 0) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
/* Check if we are using MacOS X version 10.9 */
-#define using_mavericks()\
-(system("sw_vers -productVersion | grep -q 10\\.9\\.") == 0)
+static inline int
+using_mavericks()
+{
+ int r;
+ char str[32] = {0};
+ size_t size = sizeof(str);
+ r = sysctlbyname("kern.osproductversion", str, &size, NULL, 0);
+ if ( r == 0 && strncmp(str, "10.9", strlen("10.9")) == 0) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
__attribute__((destructor))
static void unloadlib(void)
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 8a690c43d..6c0555e23 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -845,7 +845,7 @@ add_newdoc('numpy.core.umath', 'cos',
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
@@ -2604,12 +2604,13 @@ add_newdoc('numpy.core.umath', 'matmul',
- Stacks of matrices are broadcast together as if the matrices
were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
- >>> a = a = np.full([9,5,7,3], True, dtype=bool)
- >>> c = np.full([9, 5, 4,3], True, dtype=bool)
+ >>> a = np.ones([9, 5, 7, 4])
+ >>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
- (9, 5, 7, 9, 5, 4)
- >>> np.matmul(a, c).shape # n is 5, k is 3, m is 4
- (9, 5, 7, 4)
+ (9, 5, 7, 9, 5, 3)
+ >>> np.matmul(a, c).shape
+ (9, 5, 7, 3)
+ >>> # n is 7, k is 4, m is 3
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
@@ -2620,7 +2621,7 @@ add_newdoc('numpy.core.umath', 'matmul',
>>> a = np.array([[1, 0],
... [0, 1]])
- >>> b = np.array([[4, 1],
+ >>> b = np.array([[4, 1],
... [2, 2]]
>>> np.matmul(a, b)
array([[4, 1],
@@ -3483,7 +3484,7 @@ add_newdoc('numpy.core.umath', 'sinh',
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
@@ -3668,7 +3669,7 @@ add_newdoc('numpy.core.umath', 'tan',
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
@@ -3719,7 +3720,7 @@ add_newdoc('numpy.core.umath', 'tanh',
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index b68fd4068..e3f1046cc 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -6,7 +6,7 @@ import operator
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
- TooHardError, asanyarray)
+ TooHardError, asanyarray, ndim)
from numpy.core.multiarray import add_docstring
from numpy.core import overrides
@@ -140,7 +140,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
dtype = dt
delta = stop - start
- y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * delta.ndim)
+ y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 9ccca629e..aad0aae43 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -680,7 +680,9 @@ def configuration(parent_package='',top_path=None):
]
# Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
- is_msvc = platform.system() == 'Windows'
+ # Intel and Clang also don't seem happy with /GL
+ is_msvc = (platform.platform().startswith('Windows') and
+ platform.python_compiler().startswith('MS'))
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib',
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index addb67732..52694d491 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -343,7 +343,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
typestr = PyDict_GetItemString(ip, "typestr");
#if defined(NPY_PY3K)
/* Allow unicode type strings */
- if (PyUnicode_Check(typestr)) {
+ if (typestr && PyUnicode_Check(typestr)) {
tmp = PyUnicode_AsASCIIString(typestr);
typestr = tmp;
}
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index b9059ba4d..c8a78abfc 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -2481,7 +2481,7 @@ PyArray_FromInterface(PyObject *origin)
}
#endif
/* Get offset number from interface specification */
- attr = PyDict_GetItemString(origin, "offset");
+ attr = PyDict_GetItemString(iface, "offset");
if (attr) {
npy_longlong num = PyLong_AsLongLong(attr);
if (error_converting(num)) {
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 54d19d993..a33f643f1 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -2468,6 +2468,9 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
*out = PyLong_AsLongLong(obj);
+ if (error_converting(*out)) {
+ return -1;
+ }
return 0;
}
/* Datetime scalar */
@@ -2666,6 +2669,9 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
*out = PyLong_AsLongLong(obj);
+ if (error_converting(*out)) {
+ return -1;
+ }
return 0;
}
/* Timedelta scalar */
@@ -2853,6 +2859,9 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
*out = PyLong_AsLongLong(obj);
+ if (error_converting(*out)) {
+ return -1;
+ }
return 0;
}
else {
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 0471a2a3e..b6d33a74a 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -42,19 +42,6 @@ static PyObject *typeDict = NULL; /* Must be explicitly loaded */
static PyArray_Descr *
_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag);
-
-/*
- * Returns value of PyMapping_GetItemString but as a borrowed reference instead
- * of a new reference.
- */
-static PyObject *
-Borrowed_PyMapping_GetItemString(PyObject *o, char *key)
-{
- PyObject *ret = PyMapping_GetItemString(o, key);
- Py_XDECREF(ret);
- return ret;
-}
-
static PyArray_Descr *
_arraydescr_from_ctypes_type(PyTypeObject *type)
{
@@ -1001,8 +988,11 @@ _convert_from_dict(PyObject *obj, int align)
{
PyArray_Descr *new;
PyObject *fields = NULL;
- PyObject *names, *offsets, *descrs, *titles, *tmp;
- PyObject *metadata;
+ PyObject *names = NULL;
+ PyObject *offsets= NULL;
+ PyObject *descrs = NULL;
+ PyObject *titles = NULL;
+ PyObject *metadata, *tmp;
int n, i;
int totalsize, itemsize;
int maxalign = 0;
@@ -1017,19 +1007,27 @@ _convert_from_dict(PyObject *obj, int align)
/*
* Use PyMapping_GetItemString to support dictproxy objects as well.
*/
- names = Borrowed_PyMapping_GetItemString(obj, "names");
- descrs = Borrowed_PyMapping_GetItemString(obj, "formats");
- if (!names || !descrs) {
+ names = PyMapping_GetItemString(obj, "names");
+ if (names == NULL) {
Py_DECREF(fields);
+ /* XXX should check this is a KeyError */
PyErr_Clear();
return _use_fields_dict(obj, align);
}
+ descrs = PyMapping_GetItemString(obj, "formats");
+ if (descrs == NULL) {
+ Py_DECREF(fields);
+ /* XXX should check this is a KeyError */
+ PyErr_Clear();
+ Py_DECREF(names);
+ return _use_fields_dict(obj, align);
+ }
n = PyObject_Length(names);
- offsets = Borrowed_PyMapping_GetItemString(obj, "offsets");
+ offsets = PyMapping_GetItemString(obj, "offsets");
if (!offsets) {
PyErr_Clear();
}
- titles = Borrowed_PyMapping_GetItemString(obj, "titles");
+ titles = PyMapping_GetItemString(obj, "titles");
if (!titles) {
PyErr_Clear();
}
@@ -1047,7 +1045,7 @@ _convert_from_dict(PyObject *obj, int align)
* If a property 'aligned' is in the dict, it overrides the align flag
* to be True if it not already true.
*/
- tmp = Borrowed_PyMapping_GetItemString(obj, "aligned");
+ tmp = PyMapping_GetItemString(obj, "aligned");
if (tmp == NULL) {
PyErr_Clear();
} else {
@@ -1055,11 +1053,13 @@ _convert_from_dict(PyObject *obj, int align)
align = 1;
}
else if (tmp != Py_False) {
+ Py_DECREF(tmp);
PyErr_SetString(PyExc_ValueError,
"NumPy dtype descriptor includes 'aligned' entry, "
"but its value is neither True nor False");
- return NULL;
+ goto fail;
}
+ Py_DECREF(tmp);
}
totalsize = 0;
@@ -1215,14 +1215,18 @@ _convert_from_dict(PyObject *obj, int align)
}
new->elsize = totalsize;
if (!PyTuple_Check(names)) {
- names = PySequence_Tuple(names);
- }
- else {
- Py_INCREF(names);
+ Py_SETREF(names, PySequence_Tuple(names));
+ if (names == NULL) {
+ Py_DECREF(new);
+ goto fail;
+ }
}
new->names = names;
new->fields = fields;
new->flags = dtypeflags;
+ /* new takes responsibility for DECREFing names, fields */
+ names = NULL;
+ fields = NULL;
/*
* If the fields weren't in order, and there was an OBJECT type,
@@ -1231,7 +1235,7 @@ _convert_from_dict(PyObject *obj, int align)
if (has_out_of_order_fields && PyDataType_REFCHK(new)) {
if (validate_object_field_overlap(new) < 0) {
Py_DECREF(new);
- return NULL;
+ goto fail;
}
}
@@ -1241,14 +1245,15 @@ _convert_from_dict(PyObject *obj, int align)
}
/* Override the itemsize if provided */
- tmp = Borrowed_PyMapping_GetItemString(obj, "itemsize");
+ tmp = PyMapping_GetItemString(obj, "itemsize");
if (tmp == NULL) {
PyErr_Clear();
} else {
itemsize = (int)PyArray_PyIntAsInt(tmp);
+ Py_DECREF(tmp);
if (error_converting(itemsize)) {
Py_DECREF(new);
- return NULL;
+ goto fail;
}
/* Make sure the itemsize isn't made too small */
if (itemsize < new->elsize) {
@@ -1257,7 +1262,7 @@ _convert_from_dict(PyObject *obj, int align)
"cannot override to smaller itemsize of %d",
(int)new->elsize, (int)itemsize);
Py_DECREF(new);
- return NULL;
+ goto fail;
}
/* If align is set, make sure the alignment divides into the size */
if (align && itemsize % new->alignment != 0) {
@@ -1266,30 +1271,37 @@ _convert_from_dict(PyObject *obj, int align)
"which is not divisible into the specified itemsize %d",
(int)new->alignment, (int)itemsize);
Py_DECREF(new);
- return NULL;
+ goto fail;
}
/* Set the itemsize */
new->elsize = itemsize;
}
/* Add the metadata if provided */
- metadata = Borrowed_PyMapping_GetItemString(obj, "metadata");
+ metadata = PyMapping_GetItemString(obj, "metadata");
if (metadata == NULL) {
PyErr_Clear();
}
else if (new->metadata == NULL) {
new->metadata = metadata;
- Py_XINCREF(new->metadata);
}
- else if (PyDict_Merge(new->metadata, metadata, 0) == -1) {
- Py_DECREF(new);
- return NULL;
+ else {
+ int ret = PyDict_Merge(new->metadata, metadata, 0);
+ Py_DECREF(metadata);
+ if (ret < 0) {
+ Py_DECREF(new);
+ goto fail;
+ }
}
return new;
fail:
Py_XDECREF(fields);
+ Py_XDECREF(names);
+ Py_XDECREF(descrs);
+ Py_XDECREF(offsets);
+ Py_XDECREF(titles);
return NULL;
}
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index a3bc8e742..62a057538 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -539,6 +539,7 @@ iter_subscript(PyArrayIterObject *self, PyObject *ind)
char *dptr;
int size;
PyObject *obj = NULL;
+ PyObject *new;
PyArray_CopySwapFunc *copyswap;
if (ind == Py_Ellipsis) {
@@ -640,35 +641,34 @@ iter_subscript(PyArrayIterObject *self, PyObject *ind)
obj = ind;
}
- if (PyArray_Check(obj)) {
- /* Check for Boolean object */
- if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) {
- ret = iter_subscript_Bool(self, (PyArrayObject *)obj);
- Py_DECREF(indtype);
- }
- /* Check for integer array */
- else if (PyArray_ISINTEGER((PyArrayObject *)obj)) {
- PyObject *new;
- new = PyArray_FromAny(obj, indtype, 0, 0,
- NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL);
- if (new == NULL) {
- goto fail;
- }
- Py_DECREF(obj);
- obj = new;
- new = iter_subscript_int(self, (PyArrayObject *)obj);
- Py_DECREF(obj);
- return new;
- }
- else {
- goto fail;
- }
+ /* Any remaining valid input is an array or has been turned into one */
+ if (!PyArray_Check(obj)) {
+ goto fail;
+ }
+
+ /* Check for Boolean array */
+ if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) {
+ ret = iter_subscript_Bool(self, (PyArrayObject *)obj);
+ Py_DECREF(indtype);
Py_DECREF(obj);
return (PyObject *)ret;
}
- else {
- Py_DECREF(indtype);
+
+ /* Only integer arrays left */
+ if (!PyArray_ISINTEGER((PyArrayObject *)obj)) {
+ goto fail;
+ }
+
+ Py_INCREF(indtype);
+ new = PyArray_FromAny(obj, indtype, 0, 0,
+ NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL);
+ if (new == NULL) {
+ goto fail;
}
+ Py_DECREF(indtype);
+ Py_DECREF(obj);
+ Py_SETREF(new, iter_subscript_int(self, (PyArrayObject *)new));
+ return new;
fail:
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 166533b3f..29440ce78 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4722,11 +4722,9 @@ PyMODINIT_FUNC init_multiarray_umath(void) {
set_flaginfo(d);
/* Create the typeinfo types */
- typeinfo_init_structsequences();
- PyDict_SetItemString(d,
- "typeinfo", (PyObject *)&PyArray_typeinfoType);
- PyDict_SetItemString(d,
- "typeinforanged", (PyObject *)&PyArray_typeinforangedType);
+ if (typeinfo_init_structsequences(d) < 0) {
+ goto err;
+ }
if (!intern_strings()) {
goto err;
diff --git a/numpy/core/src/multiarray/typeinfo.c b/numpy/core/src/multiarray/typeinfo.c
index f0af76809..bc4147841 100644
--- a/numpy/core/src/multiarray/typeinfo.c
+++ b/numpy/core/src/multiarray/typeinfo.c
@@ -3,8 +3,7 @@
* Unfortunately, we need two different types to cover the cases where min/max
* do and do not appear in the tuple.
*/
-#define PY_SSIZE_T_CLEAN
-#include <Python.h>
+#include "typeinfo.h"
/* In python 2, this is not exported from Python.h */
#include <structseq.h>
@@ -14,8 +13,8 @@
#include "npy_pycompat.h"
-PyTypeObject PyArray_typeinfoType;
-PyTypeObject PyArray_typeinforangedType;
+static PyTypeObject PyArray_typeinfoType;
+static PyTypeObject PyArray_typeinforangedType;
static PyStructSequence_Field typeinfo_fields[] = {
{"char", "The character used to represent the type"},
@@ -51,7 +50,7 @@ static PyStructSequence_Desc typeinforanged_desc = {
7, /* n_in_sequence */
};
-PyObject *
+NPY_NO_EXPORT PyObject *
PyArray_typeinfo(
char typechar, int typenum, int nbits, int align,
PyTypeObject *type_obj)
@@ -77,7 +76,7 @@ PyArray_typeinfo(
return entry;
}
-PyObject *
+NPY_NO_EXPORT PyObject *
PyArray_typeinforanged(
char typechar, int typenum, int nbits, int align,
PyObject *max, PyObject *min, PyTypeObject *type_obj)
@@ -105,10 +104,36 @@ PyArray_typeinforanged(
return entry;
}
-void typeinfo_init_structsequences(void)
+/* Backport, only needed here */
+#if PY_VERSION_HEX < 0x03040000
+ static int
+ PyStructSequence_InitType2(PyTypeObject *type, PyStructSequence_Desc *desc) {
+ PyStructSequence_InitType(type, desc);
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ return 0;
+ }
+#endif
+
+NPY_NO_EXPORT int
+typeinfo_init_structsequences(PyObject *multiarray_dict)
{
- PyStructSequence_InitType(
- &PyArray_typeinfoType, &typeinfo_desc);
- PyStructSequence_InitType(
- &PyArray_typeinforangedType, &typeinforanged_desc);
+ if (PyStructSequence_InitType2(
+ &PyArray_typeinfoType, &typeinfo_desc) < 0) {
+ return -1;
+ }
+ if (PyStructSequence_InitType2(
+ &PyArray_typeinforangedType, &typeinforanged_desc) < 0) {
+ return -1;
+ }
+ if (PyDict_SetItemString(multiarray_dict,
+ "typeinfo", (PyObject *)&PyArray_typeinfoType) < 0) {
+ return -1;
+ }
+ if (PyDict_SetItemString(multiarray_dict,
+ "typeinforanged", (PyObject *)&PyArray_typeinforangedType) < 0) {
+ return -1;
+ }
+ return 0;
}
diff --git a/numpy/core/src/multiarray/typeinfo.h b/numpy/core/src/multiarray/typeinfo.h
index 5899c2093..28afa4120 100644
--- a/numpy/core/src/multiarray/typeinfo.h
+++ b/numpy/core/src/multiarray/typeinfo.h
@@ -1,17 +1,19 @@
#ifndef _NPY_PRIVATE_TYPEINFO_H_
#define _NPY_PRIVATE_TYPEINFO_H_
-void typeinfo_init_structsequences(void);
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include "npy_config.h"
-extern PyTypeObject PyArray_typeinfoType;
-extern PyTypeObject PyArray_typeinforangedType;
+NPY_VISIBILITY_HIDDEN int
+typeinfo_init_structsequences(PyObject *multiarray_dict);
-PyObject *
+NPY_VISIBILITY_HIDDEN PyObject *
PyArray_typeinfo(
char typechar, int typenum, int nbits, int align,
PyTypeObject *type_obj);
-PyObject *
+NPY_VISIBILITY_HIDDEN PyObject *
PyArray_typeinforanged(
char typechar, int typenum, int nbits, int align,
PyObject *max, PyObject *min, PyTypeObject *type_obj);
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 1ab48bb90..d1b029c18 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -5501,7 +5501,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
PyUFuncGenericFunction innerloop;
void *innerloopdata;
- int i;
+ npy_intp i;
int nop;
/* override vars */
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 9832b4275..170c52e9e 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1543,6 +1543,12 @@ class TestDateTime(object):
assert_equal(x[0].astype(np.int64), 322689600000000000)
+ # gh-13062
+ with pytest.raises(OverflowError):
+ np.datetime64(2**64, 'D')
+ with pytest.raises(OverflowError):
+ np.timedelta64(2**64, 'D')
+
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 8f371197c..a33361218 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -215,7 +215,6 @@ class TestRecord(object):
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
-
def test_union_struct(self):
# Should be able to create union dtypes
@@ -321,6 +320,11 @@ class TestRecord(object):
assert_equal(dt[1], dt[np.int8(1)])
+ def test_partial_dict(self):
+ # 'names' is missing
+ assert_raises(ValueError, np.dtype,
+ {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
+
class TestSubarray(object):
def test_single_subarray(self):
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 459bacab0..8b820bd75 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -362,3 +362,9 @@ class TestLinspace(object):
assert_(isinstance(y, tuple) and len(y) == 2 and
len(y[0]) == num and isnan(y[1]),
'num={0}, endpoint={1}'.format(num, ept))
+
+ def test_object(self):
+ start = array(1, dtype='O')
+ stop = array(2, dtype='O')
+ y = linspace(start, stop, 3)
+ assert_array_equal(y, array([1., 1.5, 2.]))
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 7dd35c736..3005a65cd 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -4870,6 +4870,22 @@ class TestFlat(object):
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount(self):
+ # includes regression test for reference count error gh-13165
+ inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]
+ indtype = np.dtype(np.intp)
+ rc_indtype = sys.getrefcount(indtype)
+ for ind in inds:
+ rc_ind = sys.getrefcount(ind)
+ for _ in range(100):
+ try:
+ self.a.flat[ind]
+ except IndexError:
+ pass
+ assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
+ assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
+
class TestResize(object):
def test_basic(self):
@@ -7102,6 +7118,22 @@ def test_array_interface_empty_shape():
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
+def test_array_interface_offset():
+ arr = np.array([1, 2, 3], dtype='int32')
+ interface = dict(arr.__array_interface__)
+ if sys.version_info[0] < 3:
+ interface['data'] = buffer(arr)
+ else:
+ interface['data'] = memoryview(arr)
+ interface['shape'] = (2,)
+ interface['offset'] = 4
+
+
+ class DummyArray(object):
+ __array_interface__ = interface
+
+ arr1 = np.asarray(DummyArray())
+ assert_equal(arr1, arr[1:])
def test_flat_element_deletion():
it = np.ones(3).flat
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 472a83696..3b9ca7246 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -99,7 +99,7 @@ class TestRegression(object):
f = BytesIO()
pickle.dump(ca, f, protocol=proto)
f.seek(0)
- ca = np.load(f)
+ ca = np.load(f, allow_pickle=True)
f.close()
def test_noncontiguous_fill(self):
@@ -2449,3 +2449,9 @@ class TestRegression(object):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(arr, protocol=proto)
assert_equal(pickle.loads(dumped), arr)
+
+ def test_bad_array_interface(self):
+ class T(object):
+ __array_interface__ = {}
+
+ np.array([T()])
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 51bcf2b8d..ebba457e3 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -422,7 +422,7 @@ class TestConversion(object):
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
- @pytest.mark.skipif(platform.machine().startswith("ppc64"),
+ @pytest.mark.skipif(platform.machine().startswith("ppc"),
reason="IBM double double")
def test_int_from_huge_longdouble(self):
# Produce a longdouble that would overflow a double,
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 78aa59ddc..535ea768b 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -462,7 +462,7 @@ if ctypes is not None:
def as_ctypes_type(dtype):
- """
+ r"""
Convert a dtype into a ctypes type.
Parameters
@@ -472,7 +472,7 @@ if ctypes is not None:
Returns
-------
- ctypes
+ ctype
A ctype scalar, union, array, or struct
Raises
@@ -485,13 +485,17 @@ if ctypes is not None:
This function does not losslessly round-trip in either direction.
``np.dtype(as_ctypes_type(dt))`` will:
+
- insert padding fields
- reorder fields to be sorted by offset
- discard field titles
``as_ctypes_type(np.dtype(ctype))`` will:
- - discard the class names of ``Structure``s and ``Union``s
- - convert single-element ``Union``s into single-element ``Structure``s
+
+ - discard the class names of `ctypes.Structure`\ s and
+ `ctypes.Union`\ s
+ - convert single-element `ctypes.Union`\ s into single-element
+ `ctypes.Structure`\ s
- insert padding fields
"""
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index 100d0d069..a60f8ebd4 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -796,63 +796,3 @@ for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
-
-##Fix distutils.util.split_quoted:
-# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears
-# that removing this fix causes f2py problems on Windows XP (see ticket #723).
-# Specifically, on WinXP when gfortran is installed in a directory path, which
-# contains spaces, then f2py is unable to find it.
-import string
-_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
-_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
-_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
-_has_white_re = re.compile(r'\s')
-def split_quoted(s):
- s = s.strip()
- words = []
- pos = 0
-
- while s:
- m = _wordchars_re.match(s, pos)
- end = m.end()
- if end == len(s):
- words.append(s[:end])
- break
-
- if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
- words.append(s[:end]) # we definitely have a word delimiter
- s = s[end:].lstrip()
- pos = 0
-
- elif s[end] == '\\': # preserve whatever is being escaped;
- # will become part of the current word
- s = s[:end] + s[end+1:]
- pos = end+1
-
- else:
- if s[end] == "'": # slurp singly-quoted string
- m = _squote_re.match(s, end)
- elif s[end] == '"': # slurp doubly-quoted string
- m = _dquote_re.match(s, end)
- else:
- raise RuntimeError("this can't happen (bad char '%c')" % s[end])
-
- if m is None:
- raise ValueError("bad string (mismatched %s quotes?)" % s[end])
-
- (beg, end) = m.span()
- if _has_white_re.search(s[beg+1:end-1]):
- s = s[:beg] + s[beg+1:end-1] + s[end:]
- pos = m.end() - 2
- else:
- # Keeping quotes when a quoted word does not contain
- # white-space. XXX: send a patch to distutils
- pos = m.end()
-
- if pos >= len(s):
- words.append(s)
- break
-
- return words
-ccompiler.split_quoted = split_quoted
-##Fix distutils.util.split_quoted:
diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
index 4238f35cb..73a5e98e1 100644
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -51,13 +51,16 @@ class EnvironmentConfig(object):
def _get_var(self, name, conf_desc):
hook, envvar, confvar, convert, append = conf_desc
+ if convert is None:
+ convert = lambda x: x
var = self._hook_handler(name, hook)
if envvar is not None:
envvar_contents = os.environ.get(envvar)
if envvar_contents is not None:
+ envvar_contents = convert(envvar_contents)
if var and append:
if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
- var = var + [envvar_contents]
+ var.extend(envvar_contents)
else:
var = envvar_contents
if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
@@ -70,11 +73,12 @@ class EnvironmentConfig(object):
else:
var = envvar_contents
if confvar is not None and self._conf:
- var = self._conf.get(confvar, (None, var))[1]
- if convert is not None:
- var = convert(var)
+ if confvar in self._conf:
+ source, confvar_contents = self._conf[confvar]
+ var = convert(confvar_contents)
return var
+
def clone(self, hook_handler):
ec = self.__class__(distutils_section=self._distutils_section,
**self._conf_keys)
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 67a5f7234..42374ac4f 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -1833,22 +1833,16 @@ class Configuration(object):
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
- revision = None
- m = None
- cwd = os.getcwd()
try:
- os.chdir(path or '.')
- p = subprocess.Popen(['svnversion'], shell=True,
- stdout=subprocess.PIPE, stderr=None,
- close_fds=True)
- sout = p.stdout
- m = re.match(r'(?P<revision>\d+)', sout.read())
- except Exception:
+ output = subprocess.check_output(
+ ['svnversion'], shell=True, cwd=path)
+ except (subprocess.CalledProcessError, OSError):
pass
- os.chdir(cwd)
- if m:
- revision = int(m.group('revision'))
- return revision
+ else:
+ m = re.match(br'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
@@ -1860,32 +1854,26 @@ class Configuration(object):
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
- revision = int(m.group('revision'))
+ return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
- revision = int(m.group('revision'))
- return revision
+ return int(m.group('revision'))
+ return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
- revision = None
- m = None
- cwd = os.getcwd()
try:
- os.chdir(path or '.')
- p = subprocess.Popen(['hg identify --num'], shell=True,
- stdout=subprocess.PIPE, stderr=None,
- close_fds=True)
- sout = p.stdout
- m = re.match(r'(?P<revision>\d+)', sout.read())
- except Exception:
+ output = subprocess.check_output(
+ ['hg identify --num'], shell=True, cwd=path)
+ except (subprocess.CalledProcessError, OSError):
pass
- os.chdir(cwd)
- if m:
- revision = int(m.group('revision'))
- return revision
+ else:
+ m = re.match(br'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
@@ -1906,8 +1894,9 @@ class Configuration(object):
continue
branch_map[branch1] = revision1
- revision = branch_map.get(branch0)
- return revision
+ return branch_map.get(branch0)
+
+ return None
def get_version(self, version_file=None, version_variable=None):
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index c4a650585..0e9cba1eb 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -2399,7 +2399,7 @@ def _selected_real_kind_func(p, r=0, radix=0):
if p < 16:
return 8
machine = platform.machine().lower()
- if machine.startswith(('aarch64', 'power', 'ppc64', 's390x', 'sparc')):
+ if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')):
if p <= 20:
return 16
else:
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 78b06f066..4a981bf55 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -135,7 +135,7 @@ format_def(char *buf, Py_ssize_t size, FortranDataDef def)
if (def.data == NULL) {
static const char notalloc[] = ", not allocated";
- if (size < sizeof(notalloc)) {
+ if ((size_t) size < sizeof(notalloc)) {
return -1;
}
memcpy(p, notalloc, sizeof(notalloc));
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 10945e5e8..0a8e9b274 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -149,7 +149,7 @@ data HEADER_LEN."
Notes
-----
The ``.npy`` format, including motivation for creating it and a comparison of
-alternatives, is described in the `"npy-format" NEP
+alternatives, is described in the `"npy-format" NEP
<https://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
evolved with time and this document is more current.
@@ -525,7 +525,7 @@ def _read_array_header(fp, version):
elif version == (2, 0):
hlength_type = '<I'
else:
- raise ValueError("Invalid version %r" % version)
+ raise ValueError("Invalid version {!r}".format(version))
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
@@ -541,29 +541,29 @@ def _read_array_header(fp, version):
try:
d = safe_eval(header)
except SyntaxError as e:
- msg = "Cannot parse header: %r\nException: %r"
- raise ValueError(msg % (header, e))
+ msg = "Cannot parse header: {!r}\nException: {!r}"
+ raise ValueError(msg.format(header, e))
if not isinstance(d, dict):
- msg = "Header is not a dictionary: %r"
- raise ValueError(msg % d)
+ msg = "Header is not a dictionary: {!r}"
+ raise ValueError(msg.format(d))
keys = sorted(d.keys())
if keys != ['descr', 'fortran_order', 'shape']:
- msg = "Header does not contain the correct keys: %r"
- raise ValueError(msg % (keys,))
+ msg = "Header does not contain the correct keys: {!r}"
+ raise ValueError(msg.format(keys))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
- msg = "shape is not valid: %r"
- raise ValueError(msg % (d['shape'],))
+ msg = "shape is not valid: {!r}"
+ raise ValueError(msg.format(d['shape']))
if not isinstance(d['fortran_order'], bool):
- msg = "fortran_order is not a valid bool: %r"
- raise ValueError(msg % (d['fortran_order'],))
+ msg = "fortran_order is not a valid bool: {!r}"
+ raise ValueError(msg.format(d['fortran_order']))
try:
dtype = descr_to_dtype(d['descr'])
except TypeError as e:
- msg = "descr is not a valid dtype descriptor: %r"
- raise ValueError(msg % (d['descr'],))
+ msg = "descr is not a valid dtype descriptor: {!r}"
+ raise ValueError(msg.format(d['descr']))
return d['shape'], d['fortran_order'], dtype
@@ -645,7 +645,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
fp.write(chunk.tobytes('C'))
-def read_array(fp, allow_pickle=True, pickle_kwargs=None):
+def read_array(fp, allow_pickle=False, pickle_kwargs=None):
"""
Read an array from an NPY file.
@@ -655,7 +655,11 @@ def read_array(fp, allow_pickle=True, pickle_kwargs=None):
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
- Whether to allow reading pickled data. Default: True
+ Whether to allow writing pickled data. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2 when using
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index db6a8e5eb..038d6a496 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -146,7 +146,11 @@ class NpzFile(Mapping):
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
- Allow loading pickled data. Default: True
+ Allow loading pickled data. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
@@ -182,7 +186,7 @@ class NpzFile(Mapping):
"""
- def __init__(self, fid, own_fid=False, allow_pickle=True,
+ def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
@@ -285,7 +289,7 @@ class NpzFile(Mapping):
@set_module('numpy')
-def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
+def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
@@ -307,8 +311,11 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
- arrays will fail.
- Default: True
+ arrays will fail. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
@@ -1376,7 +1383,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
# Complex dtype -- each field indicates a separate column
else:
- ncol = len(X.dtype.descr)
+ ncol = len(X.dtype.names)
else:
ncol = X.shape[1]
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index e3defdca2..7c858475a 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -1107,8 +1107,14 @@ class poly1d(object):
@property
def coeffs(self):
- """ A copy of the polynomial coefficients """
- return self._coeffs.copy()
+ """ The polynomial coefficients """
+ return self._coeffs
+
+ @coeffs.setter
+ def coeffs(self, value):
+ # allowing this makes p.coeffs *= 2 legal
+ if value is not self._coeffs:
+ raise AttributeError("Cannot set attribute")
@property
def variable(self):
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 077507082..2ebd483d5 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -426,7 +426,7 @@ def roundtrip(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIO(f.getvalue())
- arr2 = format.read_array(f2)
+ arr2 = format.read_array(f2, allow_pickle=True)
return arr2
@@ -576,7 +576,7 @@ def test_pickle_python2_python3():
path = os.path.join(data_dir, fname)
for encoding in ['bytes', 'latin1']:
- data_f = np.load(path, encoding=encoding)
+ data_f = np.load(path, allow_pickle=True, encoding=encoding)
if fname.endswith('.npz'):
data = data_f['x']
data_f.close()
@@ -598,16 +598,19 @@ def test_pickle_python2_python3():
if sys.version_info[0] >= 3:
if fname.startswith('py2'):
if fname.endswith('.npz'):
- data = np.load(path)
+ data = np.load(path, allow_pickle=True)
assert_raises(UnicodeError, data.__getitem__, 'x')
data.close()
- data = np.load(path, fix_imports=False, encoding='latin1')
+ data = np.load(path, allow_pickle=True, fix_imports=False,
+ encoding='latin1')
assert_raises(ImportError, data.__getitem__, 'x')
data.close()
else:
- assert_raises(UnicodeError, np.load, path)
+ assert_raises(UnicodeError, np.load, path,
+ allow_pickle=True)
assert_raises(ImportError, np.load, path,
- encoding='latin1', fix_imports=False)
+ allow_pickle=True, fix_imports=False,
+ encoding='latin1')
def test_pickle_disallow():
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 7ef25538b..b8b786816 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -87,7 +87,7 @@ class RoundtripTest(object):
"""
save_kwds = kwargs.get('save_kwds', {})
- load_kwds = kwargs.get('load_kwds', {})
+ load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
@@ -347,13 +347,23 @@ class TestSaveTxt(object):
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
- def test_record(self):
+ def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
+ def test_structured_padded(self):
+ # gh-13297
+ a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
+ ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
+ ])
+ c = BytesIO()
+ np.savetxt(c, a[['foo', 'baz']], fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
+
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
def test_multifield_view(self):
a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 77414ba7c..89759bd83 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -246,16 +246,16 @@ class TestPolynomial(object):
assert_equal(r.coeffs.dtype, np.complex128)
assert_equal(q*a + r, b)
- def test_poly_coeffs_immutable(self):
- """ Coefficients should not be modifiable """
+ def test_poly_coeffs_mutable(self):
+ """ Coefficients should be modifiable """
p = np.poly1d([1, 2, 3])
- try:
- # despite throwing an exception, this used to change state
- p.coeffs += 1
- except Exception:
- pass
- assert_equal(p.coeffs, [1, 2, 3])
+ p.coeffs += 1
+ assert_equal(p.coeffs, [2, 3, 4])
p.coeffs[2] += 10
- assert_equal(p.coeffs, [1, 2, 3])
+ assert_equal(p.coeffs, [2, 3, 14])
+
+ # this never used to be allowed - let's not add features to deprecated
+ # APIs
+ assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1))
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index c0561137b..08312da55 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -1168,6 +1168,9 @@ cdef class RandomState:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
+ if size < 0:
+ raise ValueError("negative dimensions are not allowed")
+
if p is not None:
if np.count_nonzero(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
@@ -4415,6 +4418,7 @@ cdef class RandomState:
Behavior when the covariance matrix is not positive semidefinite.
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
+ cov is cast to double before the check.
Returns
-------
@@ -4526,6 +4530,8 @@ cdef class RandomState:
# not zero. We continue to use the SVD rather than Cholesky in
# order to preserve current outputs.
+ # GH10839, ensure double to make tol meaningful
+ cov = cov.astype(np.double)
(u, s, v) = svd(cov)
if check_valid != 'ignore':
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index d4721bc62..4529b4fbd 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -400,6 +400,10 @@ class TestRandomDist(object):
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
@@ -717,6 +721,12 @@ class TestRandomDist(object):
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
+ cov = np.array([[1, 0.1],[0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ np.random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
diff --git a/pavement.py b/pavement.py
index e9f58a333..ecb42cd5e 100644
--- a/pavement.py
+++ b/pavement.py
@@ -42,7 +42,7 @@ from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/release/1.16.2-notes.rst'
+RELEASE_NOTES = 'doc/release/1.16.3-notes.rst'
#-------------------------------------------------------
diff --git a/setup.py b/setup.py
index 77db25b65..fbfbfe68b 100755
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ Operating System :: MacOS
MAJOR = 1
MINOR = 16
-MICRO = 2
+MICRO = 3
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)