summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--azure-pipelines.yml7
-rw-r--r--doc/changelog/1.18.2-changelog.rst25
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/release/1.18.2-notes.rst39
-rw-r--r--numpy/core/src/multiarray/sequence.c5
-rw-r--r--numpy/core/tests/test_multiarray.py21
-rw-r--r--numpy/core/tests/test_regression.py4
-rw-r--r--numpy/distutils/lib2def.py19
-rw-r--r--numpy/distutils/mingw32ccompiler.py50
-rw-r--r--numpy/distutils/misc_util.py5
-rw-r--r--numpy/distutils/tests/test_mingw32ccompiler.py42
-rw-r--r--numpy/random/_bounded_integers.pyx.in10
-rw-r--r--numpy/random/_generator.pyx116
-rw-r--r--numpy/random/mtrand.pyx38
-rw-r--r--numpy/testing/_private/utils.py21
-rw-r--r--pavement.py2
-rw-r--r--pytest.ini1
-rwxr-xr-xsetup.py2
-rw-r--r--test_requirements.txt5
-rwxr-xr-xtools/pypy-test.sh2
-rwxr-xr-xtools/travis-before-install.sh40
-rwxr-xr-xtools/travis-test.sh21
22 files changed, 297 insertions, 179 deletions
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 0ac3b6471..5c8930770 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -97,9 +97,10 @@ stages:
# manually link critical gfortran libraries
ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib
ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib
- # manually symlink gfortran-4.9 to plain gfortran
- # for f2py
- ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran
+ # Manually symlink gfortran-4.9 to plain gfortran for f2py.
+ # No longer needed after Feb 13 2020 as gfortran is already present
+ # and the attempted link errors. Keep this for future reference.
+ # ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran
displayName: 'make gfortran available on mac os vm'
# use the pre-built openblas binary that most closely
# matches our MacOS wheel builds -- currently based
diff --git a/doc/changelog/1.18.2-changelog.rst b/doc/changelog/1.18.2-changelog.rst
new file mode 100644
index 000000000..95008b897
--- /dev/null
+++ b/doc/changelog/1.18.2-changelog.rst
@@ -0,0 +1,25 @@
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Ganesh Kathiresan +
+* Matti Picus
+* Sebastian Berg
+* przemb +
+
+Pull requests merged
+====================
+
+A total of 7 pull requests were merged for this release.
+
+* `#15675 <https://github.com/numpy/numpy/pull/15675>`__: TST: move _no_tracing to testing._private
+* `#15676 <https://github.com/numpy/numpy/pull/15676>`__: MAINT: Large overhead in some random functions
+* `#15677 <https://github.com/numpy/numpy/pull/15677>`__: TST: Do not create gfortran link in azure Mac testing.
+* `#15679 <https://github.com/numpy/numpy/pull/15679>`__: BUG: Added missing error check in `ndarray.__contains__`
+* `#15722 <https://github.com/numpy/numpy/pull/15722>`__: MAINT: use list-based APIs to call subprocesses
+* `#15729 <https://github.com/numpy/numpy/pull/15729>`__: REL: Prepare for 1.18.2 release.
+* `#15734 <https://github.com/numpy/numpy/pull/15734>`__: BUG: fix logic error when nm fails on 32-bit
diff --git a/doc/source/release.rst b/doc/source/release.rst
index cbeca3660..d8726d5b8 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -5,6 +5,7 @@ Release Notes
.. toctree::
:maxdepth: 3
+ 1.18.2 <release/1.18.2-notes>
1.18.1 <release/1.18.1-notes>
1.18.0 <release/1.18.0-notes>
1.17.5 <release/1.17.5-notes>
diff --git a/doc/source/release/1.18.2-notes.rst b/doc/source/release/1.18.2-notes.rst
new file mode 100644
index 000000000..629449b19
--- /dev/null
+++ b/doc/source/release/1.18.2-notes.rst
@@ -0,0 +1,39 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.18.2 Release Notes
+==========================
+
+This small elease contains a fix for a performance regression in numpy/random
+and several bug/maintenance updates.
+
+The Python versions supported in this release are 3.5-3.8. Downstream
+developers should use Cython >= 0.29.15 for Python 3.8 support and OpenBLAS >=
+3.7 to avoid errors on the Skylake architecture.
+
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Ganesh Kathiresan +
+* Matti Picus
+* Sebastian Berg
+* przemb +
+
+
+Pull requests merged
+====================
+
+A total of 7 pull requests were merged for this release.
+
+* `#15675 <https://github.com/numpy/numpy/pull/15675>`__: TST: move _no_tracing to testing._private
+* `#15676 <https://github.com/numpy/numpy/pull/15676>`__: MAINT: Large overhead in some random functions
+* `#15677 <https://github.com/numpy/numpy/pull/15677>`__: TST: Do not create gfortran link in azure Mac testing.
+* `#15679 <https://github.com/numpy/numpy/pull/15679>`__: BUG: Added missing error check in `ndarray.__contains__`
+* `#15722 <https://github.com/numpy/numpy/pull/15722>`__: MAINT: use list-based APIs to call subprocesses
+* `#15729 <https://github.com/numpy/numpy/pull/15729>`__: REL: Prepare for 1.18.2 release.
+* `#15734 <https://github.com/numpy/numpy/pull/15734>`__: BUG: fix logic error when nm fails on 32-bit
diff --git a/numpy/core/src/multiarray/sequence.c b/numpy/core/src/multiarray/sequence.c
index 4769bdad9..1efdd204f 100644
--- a/numpy/core/src/multiarray/sequence.c
+++ b/numpy/core/src/multiarray/sequence.c
@@ -38,8 +38,13 @@ array_contains(PyArrayObject *self, PyObject *el)
if (res == NULL) {
return -1;
}
+
any = PyArray_Any((PyArrayObject *)res, NPY_MAXDIMS, NULL);
Py_DECREF(res);
+ if (any == NULL) {
+ return -1;
+ }
+
ret = PyObject_IsTrue(any);
Py_DECREF(any);
return ret;
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 2464dee8b..958b265ca 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -46,6 +46,7 @@ from numpy.testing import (
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
temppath, suppress_warnings, break_cycles,
)
+from numpy.testing._private.utils import _no_tracing
from numpy.core.tests._locales import CommaDecimalPointLocale
# Need to test an object that does not fully implement math interface
@@ -96,26 +97,6 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
data.fill(0)
return data
-def _no_tracing(func):
- """
- Decorator to temporarily turn off tracing for the duration of a test.
- Needed in tests that check refcounting, otherwise the tracing itself
- influences the refcounts
- """
- if not hasattr(sys, 'gettrace'):
- return func
- else:
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- original_trace = sys.gettrace()
- try:
- sys.settrace(None)
- return func(*args, **kwargs)
- finally:
- sys.settrace(original_trace)
- return wrapper
-
-
class TestFlags(object):
def setup(self):
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 3880b1394..f2d3d788d 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -16,8 +16,8 @@ from numpy.testing import (
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT,
)
+from numpy.testing._private.utils import _no_tracing
from numpy.compat import asbytes, asunicode, long, pickle
-from test.support import no_tracing
try:
RecursionError
@@ -1317,7 +1317,7 @@ class TestRegression(object):
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
- @no_tracing
+ @_no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py
index 2d013a1e3..34b1eceaf 100644
--- a/numpy/distutils/lib2def.py
+++ b/numpy/distutils/lib2def.py
@@ -24,7 +24,7 @@ __version__ = '0.1a'
py_ver = "%d%d" % tuple(sys.version_info[:2])
-DEFAULT_NM = 'nm -Cs'
+DEFAULT_NM = ['nm', '-Cs']
DEF_HEADER = """LIBRARY python%s.dll
;CODE PRELOAD MOVEABLE DISCARDABLE
@@ -61,13 +61,16 @@ libfile, deffile = parse_cmd()"""
deffile = None
return libfile, deffile
-def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
+def getnm(nm_cmd=['nm', '-Cs', 'python%s.lib' % py_ver], shell=True):
"""Returns the output of nm_cmd via a pipe.
-nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
- f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
- nm_output = f.stdout.read()
- f.stdout.close()
+nm_output = getnm(nm_cmd = 'nm -Cs py_lib')"""
+ p = subprocess.Popen(nm_cmd, shell=shell, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, universal_newlines=True)
+ nm_output, nm_err = p.communicate()
+ if p.returncode != 0:
+ raise RuntimeError('failed to run "%s": "%s"' % (
+ ' '.join(nm_cmd), nm_err))
return nm_output
def parse_nm(nm_output):
@@ -109,7 +112,7 @@ if __name__ == '__main__':
deffile = sys.stdout
else:
deffile = open(deffile, 'w')
- nm_cmd = [str(DEFAULT_NM), str(libfile)]
- nm_output = getnm(nm_cmd)
+ nm_cmd = DEFAULT_NM + [str(libfile)]
+ nm_output = getnm(nm_cmd, shell=False)
dlist, flist = parse_nm(nm_output)
output_def(dlist, flist, DEF_HEADER, deffile)
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index 075858cfe..a56cc8f8c 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -71,10 +71,10 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
- p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
- stdout=subprocess.PIPE)
- out_string = p.stdout.read()
- p.stdout.close()
+ try:
+ out_string = subprocess.check_output(['gcc', '-dumpversion'])
+ except (OSError, CalledProcessError):
+ out_string = "" # ignore failures to match old behavior
result = re.search(r'(\d+\.\d+)', out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
@@ -285,8 +285,8 @@ def find_python_dll():
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
- st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
- return st.stdout.readlines()
+ st = subprocess.check_output(["objdump.exe", "-p", dll])
+ return st.split(b'\n')
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
@@ -311,15 +311,14 @@ def generate_def(dll, dfile):
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
- d = open(dfile, 'w')
- d.write('LIBRARY %s\n' % os.path.basename(dll))
- d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
- d.write(';DATA PRELOAD SINGLE\n')
- d.write('\nEXPORTS\n')
- for s in syms:
- #d.write('@%d %s\n' % (s[0], s[1]))
- d.write('%s\n' % s[1])
- d.close()
+ with open(dfile, 'w') as d:
+ d.write('LIBRARY %s\n' % os.path.basename(dll))
+ d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
+ d.write(';DATA PRELOAD SINGLE\n')
+ d.write('\nEXPORTS\n')
+ for s in syms:
+ #d.write('@%d %s\n' % (s[0], s[1]))
+ d.write('%s\n' % s[1])
def find_dll(dll_name):
@@ -472,7 +471,7 @@ def _build_import_library_amd64():
# generate import library from this symbol list
cmd = ['dlltool', '-d', def_file, '-l', out_file]
- subprocess.Popen(cmd)
+ subprocess.check_call(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
@@ -506,16 +505,19 @@ def _build_import_library_x86():
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
- nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
- nm_output = lib2def.getnm(nm_cmd)
+ nm_output = lib2def.getnm(
+ lib2def.DEFAULT_NM + [lib_file], shell=False)
dlist, flist = lib2def.parse_nm(nm_output)
- lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
+ with open(def_file, 'w') as fid:
+ lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, fid)
dll_name = find_python_dll ()
- args = (dll_name, def_file, out_file)
- cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args
- status = os.system(cmd)
- # for now, fail silently
+
+ cmd = ["dlltool",
+ "--dllname", dll_name,
+ "--def", def_file,
+ "--output-lib", out_file]
+ status = subprocess.check_output(cmd)
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return
@@ -548,6 +550,8 @@ if sys.platform == 'win32':
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
+ # Python 3.7 uses 1415, but get_build_version returns 140 ??
+ _MSVCRVER_TO_FULLVER['140'] = "14.15.26726.0"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index d46ff8981..bb1699ead 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -1868,8 +1868,7 @@ class Configuration(object):
"""Return path's SVN revision number.
"""
try:
- output = subprocess.check_output(
- ['svnversion'], shell=True, cwd=path)
+ output = subprocess.check_output(['svnversion'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
@@ -1899,7 +1898,7 @@ class Configuration(object):
"""
try:
output = subprocess.check_output(
- ['hg identify --num'], shell=True, cwd=path)
+ ['hg', 'identify', '--num'], cwd=path)
except (subprocess.CalledProcessError, OSError):
pass
else:
diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py
new file mode 100644
index 000000000..ebedacb32
--- /dev/null
+++ b/numpy/distutils/tests/test_mingw32ccompiler.py
@@ -0,0 +1,42 @@
+import shutil
+import subprocess
+import sys
+import pytest
+
+from numpy.distutils import mingw32ccompiler
+
+
+@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
+def test_build_import():
+ '''Test the mingw32ccompiler.build_import_library, which builds a
+ `python.a` from the MSVC `python.lib`
+ '''
+
+ # make sure `nm.exe` exists and supports the current python version. This
+ # can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
+ try:
+ out = subprocess.check_output(['nm.exe', '--help'])
+ except FileNotFoundError:
+ pytest.skip("'nm.exe' not on path, is mingw installed?")
+ supported = out[out.find(b'supported targets:'):]
+ if sys.maxsize < 2**32:
+ if b'pe-i386' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 32-bit "
+ "dlls when using 32-bit python. Supported "
+ "formats: '%s'" % supported)
+ elif b'pe-x86-64' not in supported:
+ raise ValueError("'nm.exe' found but it does not support 64-bit "
+ "dlls when using 64-bit python. Supported "
+ "formats: '%s'" % supported)
+ # Hide the import library to force a build
+ has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
+ if has_import_lib:
+ shutil.move(fullpath, fullpath + '.bak')
+
+ try:
+ # Whew, now we can actually test the function
+ mingw32ccompiler.build_import_library()
+
+ finally:
+ if has_import_lib:
+ shutil.move(fullpath + '.bak', fullpath)
diff --git a/numpy/random/_bounded_integers.pyx.in b/numpy/random/_bounded_integers.pyx.in
index 7e19471e4..accf571d1 100644
--- a/numpy/random/_bounded_integers.pyx.in
+++ b/numpy/random/_bounded_integers.pyx.in
@@ -51,16 +51,6 @@ cdef extern from "numpy/random/distributions.h":
np.npy_bool *out) nogil
-
-_integers_types = {'bool': (0, 2),
- 'int8': (-2**7, 2**7),
- 'int16': (-2**15, 2**15),
- 'int32': (-2**31, 2**31),
- 'int64': (-2**63, 2**63),
- 'uint8': (0, 2**8),
- 'uint16': (0, 2**16),
- 'uint32': (0, 2**32),
- 'uint64': (0, 2**64)}
{{
py:
type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'),
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index d76cde44c..6a217e954 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -17,7 +17,6 @@ from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
_rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
_rand_uint8, _gen_mask)
-from ._bounded_integers import _integers_types
from ._pcg64 import PCG64
from numpy.random cimport bitgen_t
from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
@@ -262,7 +261,7 @@ cdef class Generator:
def random(self, size=None, dtype=np.float64, out=None):
"""
- random(size=None, dtype='d', out=None)
+ random(size=None, dtype=np.float64, out=None)
Return random floats in the half-open interval [0.0, 1.0).
@@ -278,10 +277,9 @@ cdef class Generator:
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
@@ -312,13 +310,13 @@ cdef class Generator:
"""
cdef double temp
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
+ elif _dtype == np.float32:
return float_fill(&random_standard_uniform_fill_f, &self._bitgen, size, self.lock, out)
else:
- raise TypeError('Unsupported dtype "%s" for random' % key)
+ raise TypeError('Unsupported dtype %r for random' % _dtype)
def beta(self, a, b, size=None):
"""
@@ -417,7 +415,7 @@ cdef class Generator:
def standard_exponential(self, size=None, dtype=np.float64, method=u'zig', out=None):
"""
- standard_exponential(size=None, dtype='d', method='zig', out=None)
+ standard_exponential(size=None, dtype=np.float64, method='zig', out=None)
Draw samples from the standard exponential distribution.
@@ -431,9 +429,8 @@ cdef class Generator:
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
method : str, optional
Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method.
'zig' uses the much faster Ziggurat method of Marsaglia and Tsang.
@@ -454,24 +451,24 @@ cdef class Generator:
>>> n = np.random.default_rng().standard_exponential((3, 8000))
"""
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
if method == u'zig':
return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
else:
return double_fill(&random_standard_exponential_inv_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
+ elif _dtype == np.float32:
if method == u'zig':
return float_fill(&random_standard_exponential_fill_f, &self._bitgen, size, self.lock, out)
else:
return float_fill(&random_standard_exponential_inv_fill_f, &self._bitgen, size, self.lock, out)
else:
- raise TypeError('Unsupported dtype "%s" for standard_exponential'
- % key)
+ raise TypeError('Unsupported dtype %r for standard_exponential'
+ % _dtype)
def integers(self, low, high=None, size=None, dtype=np.int64, endpoint=False):
"""
- integers(low, high=None, size=None, dtype='int64', endpoint=False)
+ integers(low, high=None, size=None, dtype=np.int64, endpoint=False)
Return random integers from `low` (inclusive) to `high` (exclusive), or
if endpoint=True, `low` (inclusive) to `high` (inclusive). Replaces
@@ -496,11 +493,9 @@ cdef class Generator:
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result. All dtypes are determined by their
- name, i.e., 'int64', 'int', etc, so byteorder is not available
- and a specific precision may have different C types depending
- on the platform. The default value is `np.int_`.
+ dtype : dtype, optional
+ Desired dtype of the result. Byteorder must be native.
+ The default value is np.int64.
endpoint : bool, optional
If true, sample from the interval [low, high] instead of the
default [low, high)
@@ -559,39 +554,39 @@ cdef class Generator:
high = low
low = 0
- dt = np.dtype(dtype)
- key = dt.name
- if key not in _integers_types:
- raise TypeError('Unsupported dtype "%s" for integers' % key)
- if not dt.isnative:
- raise ValueError('Providing a dtype with a non-native byteorder '
- 'is not supported. If you require '
- 'platform-independent byteorder, call byteswap '
- 'when required.')
+ _dtype = np.dtype(dtype)
# Implementation detail: the old API used a masked method to generate
# bounded uniform integers. Lemire's method is preferable since it is
# faster. randomgen allows a choice, we will always use the faster one.
cdef bint _masked = False
- if key == 'int32':
+ if _dtype == np.int32:
ret = _rand_int32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int64':
+ elif _dtype == np.int64:
ret = _rand_int64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int16':
+ elif _dtype == np.int16:
ret = _rand_int16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'int8':
+ elif _dtype == np.int8:
ret = _rand_int8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint64':
+ elif _dtype == np.uint64:
ret = _rand_uint64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint32':
+ elif _dtype == np.uint32:
ret = _rand_uint32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint16':
+ elif _dtype == np.uint16:
ret = _rand_uint16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'uint8':
+ elif _dtype == np.uint8:
ret = _rand_uint8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- elif key == 'bool':
+ elif _dtype == np.bool_:
ret = _rand_bool(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif not _dtype.isnative:
+ raise ValueError('Providing a dtype with a non-native byteorder '
+ 'is not supported. If you require '
+ 'platform-independent byteorder, call byteswap '
+ 'when required.')
+ else:
+ raise TypeError('Unsupported dtype %r for integers' % _dtype)
+
if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
@@ -977,7 +972,7 @@ cdef class Generator:
# Complicated, continuous distributions:
def standard_normal(self, size=None, dtype=np.float64, out=None):
"""
- standard_normal(size=None, dtype='d', out=None)
+ standard_normal(size=None, dtype=np.float64, out=None)
Draw samples from a standard Normal distribution (mean=0, stdev=1).
@@ -987,10 +982,9 @@ cdef class Generator:
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
@@ -1038,14 +1032,13 @@ cdef class Generator:
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
"""
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
return double_fill(&random_standard_normal_fill, &self._bitgen, size, self.lock, out)
- elif key == 'float32':
+ elif _dtype == np.float32:
return float_fill(&random_standard_normal_fill_f, &self._bitgen, size, self.lock, out)
-
else:
- raise TypeError('Unsupported dtype "%s" for standard_normal' % key)
+ raise TypeError('Unsupported dtype %r for standard_normal' % _dtype)
def normal(self, loc=0.0, scale=1.0, size=None):
"""
@@ -1151,7 +1144,7 @@ cdef class Generator:
def standard_gamma(self, shape, size=None, dtype=np.float64, out=None):
"""
- standard_gamma(shape, size=None, dtype='d', out=None)
+ standard_gamma(shape, size=None, dtype=np.float64, out=None)
Draw samples from a standard Gamma distribution.
@@ -1167,10 +1160,9 @@ cdef class Generator:
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``shape`` is a scalar. Otherwise,
``np.array(shape).size`` samples are drawn.
- dtype : {str, dtype}, optional
- Desired dtype of the result, either 'd' (or 'float64') or 'f'
- (or 'float32'). All dtypes are determined by their name. The
- default value is 'd'.
+ dtype : dtype, optional
+ Desired dtype of the result, only `float64` and `float32` are supported.
+ Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is
not None, it must have the same shape as the provided size and
@@ -1227,19 +1219,19 @@ cdef class Generator:
"""
cdef void *func
- key = np.dtype(dtype).name
- if key == 'float64':
+ _dtype = np.dtype(dtype)
+ if _dtype == np.float64:
return cont(&random_standard_gamma, &self._bitgen, size, self.lock, 1,
shape, 'shape', CONS_NON_NEGATIVE,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE,
out)
- if key == 'float32':
+ if _dtype == np.float32:
return cont_f(&random_standard_gamma_f, &self._bitgen, size, self.lock,
shape, 'shape', CONS_NON_NEGATIVE,
out)
else:
- raise TypeError('Unsupported dtype "%s" for standard_gamma' % key)
+ raise TypeError('Unsupported dtype %r for standard_gamma' % _dtype)
def gamma(self, shape, scale=1.0, size=None):
"""
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index a4d409f37..d4f7d2d96 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -15,7 +15,6 @@ from libc.stdint cimport int64_t, uint64_t
from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
_rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
_rand_uint8,)
-from ._bounded_integers import _integers_types
from ._mt19937 import MT19937 as _MT19937
from numpy.random cimport bitgen_t
from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
@@ -641,7 +640,7 @@ cdef class RandomState:
def randint(self, low, high=None, size=None, dtype=int):
"""
- randint(low, high=None, size=None, dtype='l')
+ randint(low, high=None, size=None, dtype=int)
Return random integers from `low` (inclusive) to `high` (exclusive).
@@ -668,10 +667,8 @@ cdef class RandomState:
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
- Desired dtype of the result. All dtypes are determined by their
- name, i.e., 'int64', 'int', etc, so byteorder is not available
- and a specific precision may have different C types depending
- on the platform. The default value is `np.int_`.
+ Desired dtype of the result. Byteorder must be native.
+ The default value is int.
.. versionadded:: 1.11.0
@@ -722,17 +719,16 @@ cdef class RandomState:
high = low
low = 0
- dt = np.dtype(dtype)
- key = dt.name
- if key not in _integers_types:
- raise TypeError('Unsupported dtype "%s" for randint' % key)
- if not dt.isnative:
+ _dtype = np.dtype(dtype)
+
+ if not _dtype.isnative:
# numpy 1.17.0, 2019-05-28
warnings.warn('Providing a dtype with a non-native byteorder is '
'not supported. If you require platform-independent '
'byteorder, call byteswap when required.\nIn future '
'version, providing byteorder will raise a '
'ValueError', DeprecationWarning)
+ _dtype = _dtype.newbyteorder()
# Implementation detail: the use a masked method to generate
# bounded uniform integers. Lemire's method is preferable since it is
@@ -741,24 +737,26 @@ cdef class RandomState:
cdef bint _masked = True
cdef bint _endpoint = False
- if key == 'int32':
+ if _dtype == np.int32:
ret = _rand_int32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int64':
+ elif _dtype == np.int64:
ret = _rand_int64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int16':
+ elif _dtype == np.int16:
ret = _rand_int16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'int8':
+ elif _dtype == np.int8:
ret = _rand_int8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint64':
+ elif _dtype == np.uint64:
ret = _rand_uint64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint32':
+ elif _dtype == np.uint32:
ret = _rand_uint32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint16':
+ elif _dtype == np.uint16:
ret = _rand_uint16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'uint8':
+ elif _dtype == np.uint8:
ret = _rand_uint8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- elif key == 'bool':
+ elif _dtype == np.bool_:
ret = _rand_bool(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ else:
+ raise TypeError('Unsupported dtype %r for randint' % _dtype)
if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 23267a9e1..d00538c19 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -2476,3 +2476,24 @@ def _get_mem_available():
return info['memfree'] + info['cached']
return None
+
+
+def _no_tracing(func):
+ """
+ Decorator to temporarily turn off tracing for the duration of a test.
+ Needed in tests that check refcounting, otherwise the tracing itself
+ influences the refcounts
+ """
+ if not hasattr(sys, 'gettrace'):
+ return func
+ else:
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ original_trace = sys.gettrace()
+ try:
+ sys.settrace(None)
+ return func(*args, **kwargs)
+ finally:
+ sys.settrace(original_trace)
+ return wrapper
+
diff --git a/pavement.py b/pavement.py
index 8a958fff3..09c7dbba3 100644
--- a/pavement.py
+++ b/pavement.py
@@ -41,7 +41,7 @@ from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/source/release/1.18.1-notes.rst'
+RELEASE_NOTES = 'doc/source/release/1.18.2-notes.rst'
#-------------------------------------------------------
diff --git a/pytest.ini b/pytest.ini
index 4748e3575..74faefd6e 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -2,6 +2,7 @@
addopts = -l
norecursedirs = doc tools numpy/linalg/lapack_lite numpy/core/code_generators
doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES
+junit_family=xunit2
filterwarnings =
error
diff --git a/setup.py b/setup.py
index e6e696c11..d7f807b71 100755
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@ Operating System :: MacOS
MAJOR = 1
MINOR = 18
-MICRO = 1
+MICRO = 2
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
diff --git a/test_requirements.txt b/test_requirements.txt
index 627150673..7edd4c3ef 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,9 +1,8 @@
-cython==0.29.14
-pytest==5.3.1
+cython==0.29.15
+pytest==5.3.5
pytz==2019.3
pytest-cov==2.8.1
pickle5; python_version == '3.7'
pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy'
-nose
# for numpy.random.test.test_extending
cffi
diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh
index 33a97ad17..867c26d76 100755
--- a/tools/pypy-test.sh
+++ b/tools/pypy-test.sh
@@ -45,5 +45,5 @@ pypy3/bin/pypy3 runtests.py --debug-info --show-build-log -v -- -rsx \
echo Make sure the correct openblas has been linked in
-pypy3/bin/pip install .
+pypy3/bin/pypy3 -m pip install .
pypy3/bin/pypy3 tools/openblas_support.py --check_version "$OpenBLAS_version"
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index 9f8b66a47..dbe2f6ea2 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -1,18 +1,14 @@
#!/bin/bash
+# Exit the script immediately if a command exits with a non-zero status,
+# and print commands and their arguments as they are executed.
+set -ex
+
uname -a
free -m
df -h
ulimit -a
-if [ -n "$DOWNLOAD_OPENBLAS" ]; then
- pwd
- ls -ltrh
- target=$(python tools/openblas_support.py)
- sudo cp -r $target/lib/* /usr/lib
- sudo cp $target/include/* /usr/include
-fi
-
mkdir builds
pushd builds
@@ -22,16 +18,38 @@ pip install -U virtualenv
if [ -n "$USE_DEBUG" ]
then
- virtualenv --python=python3-dbg venv
+ virtualenv --python=$(which python3-dbg) venv
else
virtualenv --python=python venv
fi
source venv/bin/activate
python -V
+gcc --version
popd
-pip install --upgrade pip setuptools
-pip install -r test_requirements.txt
+pip install --upgrade pip
+
+# 'setuptools', 'wheel' and 'cython' are build dependencies. This information
+# is stored in pyproject.toml, but there is not yet a standard way to install
+# those dependencies with, say, a pip command, so we'll just hard-code their
+# installation here. We only need to install them separately for the cases
+# where numpy is installed with setup.py, which is the case for the Travis jobs
+# where the environment variables USE_DEBUG or USE_WHEEL are set. When pip is
+# used to install numpy, pip gets the build dependencies from pyproject.toml.
+# A specific version of cython is required, so we read the cython package
+# requirement using `grep cython test_requirements.txt` instead of simply
+# writing 'pip install setuptools wheel cython'.
+# urllib3 is needed for openblas_support
+pip install setuptools wheel urllib3 `grep cython test_requirements.txt`
+
+if [ -n "$DOWNLOAD_OPENBLAS" ]; then
+ pwd
+ target=$(python tools/openblas_support.py)
+ sudo cp -r $target/lib/* /usr/lib
+ sudo cp $target/include/* /usr/include
+fi
+
+
if [ -n "$USE_ASV" ]; then pip install asv; fi
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 241b9d913..cd3ffe29a 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -48,7 +48,7 @@ setup_base()
if [ -z "$USE_DEBUG" ]; then
$PIP install -v . 2>&1 | tee log
else
- # Python3.5-dbg on travis seems to need this
+ # The job run with USE_DEBUG=1 on travis needs this.
export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized"
$PYTHON setup.py build build_src --verbose-cfg build_ext --inplace 2>&1 | tee log
fi
@@ -65,7 +65,13 @@ setup_base()
run_test()
{
- $PIP install -r test_requirements.txt
+ # Install the test dependencies.
+ # Clear PYTHONOPTIMIZE when running `pip install -r test_requirements.txt`
+ # because version 2.19 of pycparser (a dependency of one of the packages
+ # in test_requirements.txt) does not provide a wheel, and the source tar
+ # file does not install correctly when Python's optimization level is set
+ # to strip docstrings (see https://github.com/eliben/pycparser/issues/291).
+ PYTHONOPTIMIZE="" $PIP install -r test_requirements.txt
if [ -n "$USE_DEBUG" ]; then
export PYTHONPATH=$PWD
@@ -135,16 +141,11 @@ run_test()
fi
}
+
export PYTHON
export PIP
-$PIP install setuptools
if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
- # Build wheel
- $PIP install wheel
- # ensure that the pip / setuptools versions deployed inside
- # the venv are recent enough
- $PIP install -U virtualenv
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
# adjust gcc flags if C coverage requested
@@ -155,7 +156,7 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
export F90='gfortran --coverage'
export LDFLAGS='--coverage'
fi
- $PYTHON setup.py build build_src --verbose-cfg bdist_wheel
+ $PYTHON setup.py build --warn-error build_src --verbose-cfg bdist_wheel
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
@@ -167,8 +168,6 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
run_test
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
- # use an up-to-date pip / setuptools inside the venv
- $PIP install -U virtualenv
# temporary workaround for sdist failures.
$PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)"
# ensure some warnings are not issued