summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:41:19 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:41:19 +0900
commit0c12ca49cde19388bfef915d7652c7f808d96bb8 (patch)
treed673bcf72751a4960eaebdf23aec1802af7c190c
parent79bf063fb15c1b8bede19f336b67636cc6e0e867 (diff)
downloadpython-numpy-0c12ca49cde19388bfef915d7652c7f808d96bb8.tar.gz
python-numpy-0c12ca49cde19388bfef915d7652c7f808d96bb8.tar.bz2
python-numpy-0c12ca49cde19388bfef915d7652c7f808d96bb8.zip
Imported Upstream version 1.19.1upstream/1.19.1
-rw-r--r--.mailmap2
-rw-r--r--INSTALL.rst.txt2
-rw-r--r--azure-pipelines.yml3
-rw-r--r--azure-steps-windows.yml2
-rw-r--r--doc/TESTS.rst.txt121
-rw-r--r--doc/changelog/1.19.0-changelog.rst3
-rw-r--r--doc/changelog/1.19.1-changelog.rst53
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/release/1.19.1-notes.rst68
-rw-r--r--numpy/__init__.pxd2
-rw-r--r--numpy/__init__.py17
-rw-r--r--numpy/core/include/numpy/npy_cpu.h4
-rw-r--r--numpy/core/include/numpy/npy_endian.h3
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src37
-rw-r--r--numpy/core/src/multiarray/ctors.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c4
-rw-r--r--numpy/core/src/multiarray/methods.c1
-rw-r--r--numpy/core/src/multiarray/scalarapi.c15
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src78
-rw-r--r--numpy/core/src/npymath/ieee754.c.src2
-rw-r--r--numpy/core/src/umath/simd.inc.src4
-rw-r--r--numpy/core/tests/test_api.py23
-rw-r--r--numpy/core/tests/test_cpu_features.py54
-rw-r--r--numpy/core/tests/test_multiarray.py5
-rw-r--r--numpy/core/tests/test_regression.py5
-rw-r--r--numpy/core/tests/test_umath_complex.py31
-rw-r--r--numpy/distutils/fcompiler/gnu.py13
-rw-r--r--numpy/lib/_iotools.py9
-rw-r--r--numpy/lib/tests/test__iotools.py4
-rw-r--r--numpy/lib/tests/test_io.py37
-rw-r--r--numpy/lib/twodim_base.py2
-rw-r--r--numpy/random/_common.pxd2
-rw-r--r--numpy/random/_generator.pyx5
-rw-r--r--numpy/random/mtrand.pyx2
-rw-r--r--numpy/random/tests/test_extending.py4
-rw-r--r--numpy/random/tests/test_generator_mt19937.py10
-rw-r--r--numpy/random/tests/test_randomstate.py10
-rw-r--r--pavement.py2
-rw-r--r--pyproject.toml4
-rwxr-xr-xsetup.py16
-rw-r--r--shippable.yml14
-rw-r--r--test_requirements.txt8
-rwxr-xr-xtools/cythonize.py4
-rwxr-xr-xtools/pypy-test.sh2
-rwxr-xr-xtools/travis-before-install.sh4
45 files changed, 542 insertions, 152 deletions
diff --git a/.mailmap b/.mailmap
index 6a700941b..ae221c020 100644
--- a/.mailmap
+++ b/.mailmap
@@ -66,6 +66,7 @@ Christoph Gohlke <cgohlke@uci.edu> cgholke <?@?>
Christoph Gohlke <cgohlke@uci.edu> cgohlke <cgohlke@uci.edu>
Christoph Gohlke <cgohlke@uci.edu> Christolph Gohlke <cgohlke@uci.edu>
Chunlin Fang <fangchunlin@huawei.com> Qiyu8 <fangchunlin@huawei.com>
+Chunlin Fang <fangchunlin@huawei.com> Chunlin <fangchunlin@huawei.com>
Colin Snyder <47012605+colinsnyder@users.noreply.github.com> colinsnyder <47012605+colinsnyder@users.noreply.github.com>
Daniel B Allan <daniel.b.allan@gmail.com> danielballan <daniel.b.allan@gmail.com>
Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <daniel@meltingwax.net>
@@ -95,6 +96,7 @@ Erik M. Bray <erik.bray@lri.fr> Erik Bray <erik.m.bray@gmail.com>
Eric Fode <ericfode@gmail.com> Eric Fode <ericfode@linuxlaptop.(none)>
Eric Quintero <eric.antonio.quintero@gmail.com> e-q <eric.antonio.quintero@gmail.com>
Ernest N. Mamikonyan <ernest.mamikonyan@gmail.com> mamikony <ernest.mamikonyan@sig.com>
+Etienne Guesnet <etienne.guesnet.external@atos.net> EGuesnet <51407514+EGuesnet@users.noreply.github.com>
Evgeni Burovski <evgeny.burovskiy@gmail.com> Evgeni Burovski <evgeni@burovski.me>
Evgeny Toder <evgeny.toder@jpmorgan.com> eltjpm <evgeny.toder@jpmorgan.com>
Fernando Perez <Fernando.Perez@berkeley.edu> Fernando Perez <fperez@fperez.org>
diff --git a/INSTALL.rst.txt b/INSTALL.rst.txt
index 2b9226751..1c33060a6 100644
--- a/INSTALL.rst.txt
+++ b/INSTALL.rst.txt
@@ -20,7 +20,7 @@ Building NumPy requires the following installed software:
e.g., on Debian/Ubuntu one needs to install both `python3` and
`python3-dev`. On Windows and macOS this is normally not an issue.
-2) Cython >= 0.29.14
+2) Cython >= 0.29.21
3) pytest__ (optional) 1.15 or later
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 564f5d8e8..f4d8ca142 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -249,7 +249,8 @@ stages:
fi
displayName: 'add gcc 4.8'
- script: |
- python3 -m pip install --user --upgrade pip setuptools
+ # python3 has no setuptools, so install one to get us going
+ python3 -m pip install --user --upgrade pip setuptools!=49.2.0
python3 -m pip install --user -r test_requirements.txt
CPPFLAGS='' CC=gcc-4.8 F77=gfortran-5 F90=gfortran-5 \
python3 runtests.py --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml
diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml
index c39faddf9..04c578f7c 100644
--- a/azure-steps-windows.yml
+++ b/azure-steps-windows.yml
@@ -4,7 +4,7 @@ steps:
versionSpec: $(PYTHON_VERSION)
addToPath: true
architecture: $(PYTHON_ARCH)
-- script: python -m pip install --upgrade pip setuptools wheel
+- script: python -m pip install --upgrade pip
displayName: 'Install tools'
- script: python -m pip install -r test_requirements.txt
displayName: 'Install dependencies; some are optional to avoid test skips'
diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt
index af47fe99c..4ab39c586 100644
--- a/doc/TESTS.rst.txt
+++ b/doc/TESTS.rst.txt
@@ -12,7 +12,7 @@ the `pytest`_ framework. The older framework is still maintained in order to
support downstream projects that use the old numpy framework, but all tests
for NumPy should use pytest.
-Our goal is that every module and package in SciPy and NumPy
+Our goal is that every module and package in NumPy
should have a thorough set of unit
tests. These tests should exercise the full functionality of a given
routine as well as its robustness to erroneous or unexpected input
@@ -28,26 +28,30 @@ is found in a routine, you should write a new test for that specific
case and add it to the test suite to prevent that bug from creeping
back in unnoticed.
-To run SciPy's full test suite, use the following::
+.. note::
- >>> import scipy
- >>> scipy.test()
+ SciPy uses the testing framework from :mod:`numpy.testing`,
+ so all of the NumPy examples shown below are also applicable to SciPy
-or from the command line::
+Testing NumPy
+'''''''''''''
- $ python runtests.py
+NumPy can be tested in a number of ways, choose any way you feel comfortable.
+
+Running tests from inside Python
+--------------------------------
-SciPy uses the testing framework from :mod:`numpy.testing`, so all
-the SciPy examples shown here are also applicable to NumPy. NumPy's full test
-suite can be run as follows::
+You can test an installed NumPy by `numpy.test`, for example,
+To run NumPy's full test suite, use the following::
>>> import numpy
- >>> numpy.test()
+ >>> numpy.test(label='slow')
-The test method may take two or more arguments; the first, ``label`` is a
-string specifying what should be tested and the second, ``verbose`` is an
-integer giving the level of output verbosity. See the docstring for
-numpy.test for details. The default value for ``label`` is 'fast' - which
+The test method may take two or more arguments; the first ``label`` is a
+string specifying what should be tested and the second ``verbose`` is an
+integer giving the level of output verbosity. See the docstring
+`numpy.test`
+for details. The default value for ``label`` is 'fast' - which
will run the standard tests. The string 'full' will run the full battery
of tests, including those identified as being slow to run. If ``verbose``
is 1 or less, the tests will just show information messages about the tests
@@ -55,38 +59,43 @@ that are run; but if it is greater than 1, then the tests will also provide
warnings on missing tests. So if you want to run every test and get
messages about which modules don't have tests::
- >>> scipy.test(label='full', verbose=2) # or scipy.test('full', 2)
+ >>> numpy.test(label='full', verbose=2) # or numpy.test('full', 2)
-Finally, if you are only interested in testing a subset of SciPy, for
-example, the ``integrate`` module, use the following::
+Finally, if you are only interested in testing a subset of NumPy, for
+example, the ``core`` module, use the following::
- >>> scipy.integrate.test()
+ >>> numpy.core.test()
-or from the command line::
+Running tests from the command line
+-----------------------------------
- $python runtests.py -t scipy/integrate/tests
+If you want to build NumPy in order to work on NumPy itself, use
+``runtests.py``.To run NumPy's full test suite::
-The rest of this page will give you a basic idea of how to add unit
-tests to modules in SciPy. It is extremely important for us to have
-extensive unit testing since this code is going to be used by
-scientists and researchers and is being developed by a large number of
-people spread across the world. So, if you are writing a package that
-you'd like to become part of SciPy, please write the tests as you
-develop the package. Also since much of SciPy is legacy code that was
-originally written without unit tests, there are still several modules
-that don't have tests yet. Please feel free to choose one of these
-modules and develop tests for it as you read through
-this introduction.
+ $ python runtests.py
+
+Testing a subset of NumPy::
+
+ $python runtests.py -t numpy/core/tests
+
+For detailed info on testing, see :ref:`testing-builds`
+
+Other methods of running tests
+------------------------------
+
+Run tests using your favourite IDE such as `vscode`_ or `pycharm`_
Writing your own tests
''''''''''''''''''''''
-Every Python module, extension module, or subpackage in the SciPy
+If you are writing a package that you'd like to become part of NumPy,
+please write the tests as you develop the package.
+Every Python module, extension module, or subpackage in the NumPy
package directory should have a corresponding ``test_<name>.py`` file.
-Pytest examines these files for test methods (named test*) and test
-classes (named Test*).
+Pytest examines these files for test methods (named ``test*``) and test
+classes (named ``Test*``).
-Suppose you have a SciPy module ``scipy/xxx/yyy.py`` containing a
+Suppose you have a NumPy module ``numpy/xxx/yyy.py`` containing a
function ``zzz()``. To test this function you would create a test
module called ``test_yyy.py``. If you only need to test one aspect of
``zzz``, you can simply add a test function::
@@ -100,7 +109,7 @@ a test class::
from numpy.testing import assert_, assert_raises
# import xxx symbols
- from scipy.xxx.yyy import zzz
+ from numpy.xxx.yyy import zzz
class TestZzz:
def test_simple(self):
@@ -119,6 +128,11 @@ that makes it hard to identify the test from the output of running the test
suite with ``verbose=2`` (or similar verbosity setting). Use plain comments
(``#``) if necessary.
+Also since much of NumPy is legacy code that was
+originally written without unit tests, there are still several modules
+that don't have tests yet. Please feel free to choose one of these
+modules and develop tests for it.
+
Labeling tests
--------------
@@ -126,8 +140,8 @@ As an alternative to ``pytest.mark.<label>``, there are a number of labels you
can use.
Unlabeled tests like the ones above are run in the default
-``scipy.test()`` run. If you want to label your test as slow - and
-therefore reserved for a full ``scipy.test(label='full')`` run, you
+``numpy.test()`` run. If you want to label your test as slow - and
+therefore reserved for a full ``numpy.test(label='full')`` run, you
can label it with a decorator::
# numpy.testing module includes 'import decorators as dec'
@@ -211,10 +225,10 @@ for numpy.lib::
>>> np.lib.test(doctests=True)
The doctests are run as if they are in a fresh Python instance which
-has executed ``import numpy as np``. Tests that are part of a SciPy
+has executed ``import numpy as np``. Tests that are part of a NumPy
subpackage will have that subpackage already imported. E.g. for a test
-in ``scipy/linalg/tests/``, the namespace will be created such that
-``from scipy import linalg`` has already executed.
+in ``numpy/linalg/tests/``, the namespace will be created such that
+``from numpy import linalg`` has already executed.
``tests/``
@@ -223,15 +237,15 @@ in ``scipy/linalg/tests/``, the namespace will be created such that
Rather than keeping the code and the tests in the same directory, we
put all the tests for a given subpackage in a ``tests/``
subdirectory. For our example, if it doesn't already exist you will
-need to create a ``tests/`` directory in ``scipy/xxx/``. So the path
-for ``test_yyy.py`` is ``scipy/xxx/tests/test_yyy.py``.
+need to create a ``tests/`` directory in ``numpy/xxx/``. So the path
+for ``test_yyy.py`` is ``numpy/xxx/tests/test_yyy.py``.
-Once the ``scipy/xxx/tests/test_yyy.py`` is written, its possible to
+Once the ``numpy/xxx/tests/test_yyy.py`` is written, its possible to
run the tests by going to the ``tests/`` directory and typing::
python test_yyy.py
-Or if you add ``scipy/xxx/tests/`` to the Python path, you could run
+Or if you add ``numpy/xxx/tests/`` to the Python path, you could run
the tests interactively in the interpreter like this::
>>> import test_yyy
@@ -262,14 +276,14 @@ section of your setup.py::
Now you can do the following to test your module::
- >>> import scipy
- >>> scipy.xxx.test()
+ >>> import numpy
+ >>> numpy.xxx.test()
-Also, when invoking the entire SciPy test suite, your tests will be
+Also, when invoking the entire NumPy test suite, your tests will be
found and run::
- >>> import scipy
- >>> scipy.test()
+ >>> import numpy
+ >>> numpy.test()
# your tests are included and run automatically!
Tips & Tricks
@@ -370,7 +384,14 @@ failures without requiring a fixed seed, reporting *minimal* examples for
each failure, and better-than-naive-random techniques for triggering bugs.
+Documentation for ``numpy.test``
+--------------------------------
+
+.. autofunction:: numpy.test
+
.. _nose: https://nose.readthedocs.io/en/latest/
.. _pytest: https://pytest.readthedocs.io
.. _parameterization: https://docs.pytest.org/en/latest/parametrize.html
.. _Hypothesis: https://hypothesis.readthedocs.io/en/latest/
+.. _vscode: https://code.visualstudio.com/docs/python/testing#_enable-a-test-framework
+.. _pycharm: https://www.jetbrains.com/help/pycharm/testing-your-first-python-application.html
diff --git a/doc/changelog/1.19.0-changelog.rst b/doc/changelog/1.19.0-changelog.rst
index bd743832a..725a5ba44 100644
--- a/doc/changelog/1.19.0-changelog.rst
+++ b/doc/changelog/1.19.0-changelog.rst
@@ -2,7 +2,7 @@
Contributors
============
-A total of 126 people contributed to this release. People with a "+" by their
+A total of 125 people contributed to this release. People with a "+" by their
names contributed a patch for the first time.
* Alex Henrie
@@ -24,7 +24,6 @@ names contributed a patch for the first time.
* Chris Barker
* Chris Holland +
* Christian Kastner +
-* Chunlin +
* Chunlin Fang +
* Damien Caliste +
* Dan Allan
diff --git a/doc/changelog/1.19.1-changelog.rst b/doc/changelog/1.19.1-changelog.rst
new file mode 100644
index 000000000..3b46ffadf
--- /dev/null
+++ b/doc/changelog/1.19.1-changelog.rst
@@ -0,0 +1,53 @@
+
+Contributors
+============
+
+A total of 15 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Abhinav Reddy +
+* Anirudh Subramanian
+* Antonio Larrosa +
+* Charles Harris
+* Chunlin Fang
+* Eric Wieser
+* Etienne Guesnet +
+* Kevin Sheppard
+* Matti Picus
+* Raghuveer Devulapalli
+* Roman Yurchak
+* Ross Barnowski
+* Sayed Adel
+* Sebastian Berg
+* Tyler Reddy
+
+Pull requests merged
+====================
+
+A total of 25 pull requests were merged for this release.
+
+* `#16649 <https://github.com/numpy/numpy/pull/16649>`__: MAINT, CI: disable Shippable cache
+* `#16652 <https://github.com/numpy/numpy/pull/16652>`__: MAINT: Replace `PyUString_GET_SIZE` with `PyUnicode_GetLength`.
+* `#16654 <https://github.com/numpy/numpy/pull/16654>`__: REL: Fix outdated docs link
+* `#16656 <https://github.com/numpy/numpy/pull/16656>`__: BUG: raise IEEE exception on AIX
+* `#16672 <https://github.com/numpy/numpy/pull/16672>`__: BUG: Fix bug in AVX complex absolute while processing array of...
+* `#16693 <https://github.com/numpy/numpy/pull/16693>`__: TST: Add extra debugging information to CPU features detection
+* `#16703 <https://github.com/numpy/numpy/pull/16703>`__: BLD: Add CPU entry for Emscripten / WebAssembly
+* `#16705 <https://github.com/numpy/numpy/pull/16705>`__: TST: Disable Python 3.9-dev testing.
+* `#16714 <https://github.com/numpy/numpy/pull/16714>`__: MAINT: Disable use_hugepages in case of ValueError
+* `#16724 <https://github.com/numpy/numpy/pull/16724>`__: BUG: Fix PyArray_SearchSorted signature.
+* `#16768 <https://github.com/numpy/numpy/pull/16768>`__: MAINT: Fixes for deprecated functions in scalartypes.c.src
+* `#16772 <https://github.com/numpy/numpy/pull/16772>`__: MAINT: Remove unneeded call to PyUnicode_READY
+* `#16776 <https://github.com/numpy/numpy/pull/16776>`__: MAINT: Fix deprecated functions in scalarapi.c
+* `#16779 <https://github.com/numpy/numpy/pull/16779>`__: BLD, ENH: Add RPATH support for AIX
+* `#16780 <https://github.com/numpy/numpy/pull/16780>`__: BUG: Fix default fallback in genfromtxt
+* `#16784 <https://github.com/numpy/numpy/pull/16784>`__: BUG: Added missing return after raising error in methods.c
+* `#16795 <https://github.com/numpy/numpy/pull/16795>`__: BLD: update cython to 0.29.21
+* `#16832 <https://github.com/numpy/numpy/pull/16832>`__: MAINT: setuptools 49.2.0 emits a warning, avoid it
+* `#16872 <https://github.com/numpy/numpy/pull/16872>`__: BUG: Validate output size in bin- and multinomial
+* `#16875 <https://github.com/numpy/numpy/pull/16875>`__: BLD, MAINT: Pin setuptools
+* `#16904 <https://github.com/numpy/numpy/pull/16904>`__: DOC: Reconstruct Testing Guideline.
+* `#16905 <https://github.com/numpy/numpy/pull/16905>`__: TST, BUG: Re-raise MemoryError exception in test_large_zip's...
+* `#16906 <https://github.com/numpy/numpy/pull/16906>`__: BUG,DOC: Fix bad MPL kwarg.
+* `#16916 <https://github.com/numpy/numpy/pull/16916>`__: BUG: Fix string/bytes to complex assignment
+* `#16922 <https://github.com/numpy/numpy/pull/16922>`__: REL: Prepare for NumPy 1.19.1 release
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 1e5697828..18e8b200d 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -5,6 +5,7 @@ Release Notes
.. toctree::
:maxdepth: 3
+ 1.19.1 <release/1.19.1-notes>
1.19.0 <release/1.19.0-notes>
1.18.4 <release/1.18.4-notes>
1.18.3 <release/1.18.3-notes>
diff --git a/doc/source/release/1.19.1-notes.rst b/doc/source/release/1.19.1-notes.rst
new file mode 100644
index 000000000..4fc5528f5
--- /dev/null
+++ b/doc/source/release/1.19.1-notes.rst
@@ -0,0 +1,68 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.19.1 Release Notes
+==========================
+
+NumPy 1.19.1 fixes several bugs found in the 1.19.0 release, replaces several
+functions deprecated in the upcoming Python-3.9 release, has improved support
+for AIX, and has a number of development related updates to keep CI working
+with recent upstream changes.
+
+This release supports Python 3.6-3.8. Cython >= 0.29.21 needs to be used when
+building with Python 3.9 for testing purposes.
+
+
+Contributors
+============
+
+A total of 15 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Abhinav Reddy +
+* Anirudh Subramanian
+* Antonio Larrosa +
+* Charles Harris
+* Chunlin Fang
+* Eric Wieser
+* Etienne Guesnet +
+* Kevin Sheppard
+* Matti Picus
+* Raghuveer Devulapalli
+* Roman Yurchak
+* Ross Barnowski
+* Sayed Adel
+* Sebastian Berg
+* Tyler Reddy
+
+
+Pull requests merged
+====================
+
+A total of 25 pull requests were merged for this release.
+
+* `#16649 <https://github.com/numpy/numpy/pull/16649>`__: MAINT, CI: disable Shippable cache
+* `#16652 <https://github.com/numpy/numpy/pull/16652>`__: MAINT: Replace `PyUString_GET_SIZE` with `PyUnicode_GetLength`.
+* `#16654 <https://github.com/numpy/numpy/pull/16654>`__: REL: Fix outdated docs link
+* `#16656 <https://github.com/numpy/numpy/pull/16656>`__: BUG: raise IEEE exception on AIX
+* `#16672 <https://github.com/numpy/numpy/pull/16672>`__: BUG: Fix bug in AVX complex absolute while processing array of...
+* `#16693 <https://github.com/numpy/numpy/pull/16693>`__: TST: Add extra debugging information to CPU features detection
+* `#16703 <https://github.com/numpy/numpy/pull/16703>`__: BLD: Add CPU entry for Emscripten / WebAssembly
+* `#16705 <https://github.com/numpy/numpy/pull/16705>`__: TST: Disable Python 3.9-dev testing.
+* `#16714 <https://github.com/numpy/numpy/pull/16714>`__: MAINT: Disable use_hugepages in case of ValueError
+* `#16724 <https://github.com/numpy/numpy/pull/16724>`__: BUG: Fix PyArray_SearchSorted signature.
+* `#16768 <https://github.com/numpy/numpy/pull/16768>`__: MAINT: Fixes for deprecated functions in scalartypes.c.src
+* `#16772 <https://github.com/numpy/numpy/pull/16772>`__: MAINT: Remove unneeded call to PyUnicode_READY
+* `#16776 <https://github.com/numpy/numpy/pull/16776>`__: MAINT: Fix deprecated functions in scalarapi.c
+* `#16779 <https://github.com/numpy/numpy/pull/16779>`__: BLD, ENH: Add RPATH support for AIX
+* `#16780 <https://github.com/numpy/numpy/pull/16780>`__: BUG: Fix default fallback in genfromtxt
+* `#16784 <https://github.com/numpy/numpy/pull/16784>`__: BUG: Added missing return after raising error in methods.c
+* `#16795 <https://github.com/numpy/numpy/pull/16795>`__: BLD: update cython to 0.29.21
+* `#16832 <https://github.com/numpy/numpy/pull/16832>`__: MAINT: setuptools 49.2.0 emits a warning, avoid it
+* `#16872 <https://github.com/numpy/numpy/pull/16872>`__: BUG: Validate output size in bin- and multinomial
+* `#16875 <https://github.com/numpy/numpy/pull/16875>`__: BLD, MAINT: Pin setuptools
+* `#16904 <https://github.com/numpy/numpy/pull/16904>`__: DOC: Reconstruct Testing Guideline.
+* `#16905 <https://github.com/numpy/numpy/pull/16905>`__: TST, BUG: Re-raise MemoryError exception in test_large_zip's...
+* `#16906 <https://github.com/numpy/numpy/pull/16906>`__: BUG,DOC: Fix bad MPL kwarg.
+* `#16916 <https://github.com/numpy/numpy/pull/16916>`__: BUG: Fix string/bytes to complex assignment
+* `#16922 <https://github.com/numpy/numpy/pull/16922>`__: REL: Prepare for NumPy 1.19.1 release
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd
index d5c50d9bf..fd736a9c1 100644
--- a/numpy/__init__.pxd
+++ b/numpy/__init__.pxd
@@ -607,7 +607,7 @@ cdef extern from "numpy/arrayobject.h":
object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
int PyArray_Sort (ndarray, int, NPY_SORTKIND)
object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
- object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, object)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE, PyObject *)
object PyArray_ArgMax (ndarray, int, ndarray)
object PyArray_ArgMin (ndarray, int, ndarray)
object PyArray_Reshape (ndarray, object)
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 575e8ea3d..70c336106 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -293,11 +293,18 @@ else:
import os
use_hugepage = os.environ.get("NUMPY_MADVISE_HUGEPAGE", None)
if sys.platform == "linux" and use_hugepage is None:
- use_hugepage = 1
- kernel_version = os.uname().release.split(".")[:2]
- kernel_version = tuple(int(v) for v in kernel_version)
- if kernel_version < (4, 6):
- use_hugepage = 0
+ # If there is an issue with parsing the kernel version,
+ # set use_hugepages to 0. Usage of LooseVersion will handle
+ # the kernel version parsing better, but avoided since it
+ # will increase the import time. See: #16679 for related discussion.
+ try:
+ use_hugepage = 1
+ kernel_version = os.uname().release.split(".")[:2]
+ kernel_version = tuple(int(v) for v in kernel_version)
+ if kernel_version < (4, 6):
+ use_hugepage = 0
+ except ValueError:
+ use_hugepages = 0
elif use_hugepage is None:
# This is not Linux, so it should not matter, just enable anyway
use_hugepage = 1
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 5edd8f42e..509e23a51 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -18,6 +18,7 @@
* NPY_CPU_ARCEL
* NPY_CPU_ARCEB
* NPY_CPU_RISCV64
+ * NPY_CPU_WASM
*/
#ifndef _NPY_CPUARCH_H_
#define _NPY_CPUARCH_H_
@@ -102,6 +103,9 @@
#define NPY_CPU_ARCEB
#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
#define NPY_CPU_RISCV64
+#elif defined(__EMSCRIPTEN__)
+ /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
+ #define NPY_CPU_WASM
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index 44cdffd14..aa367a002 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -48,7 +48,8 @@
|| defined(NPY_CPU_MIPSEL) \
|| defined(NPY_CPU_PPC64LE) \
|| defined(NPY_CPU_ARCEL) \
- || defined(NPY_CPU_RISCV64)
+ || defined(NPY_CPU_RISCV64) \
+ || defined(NPY_CPU_WASM)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
#elif defined(NPY_CPU_PPC) \
|| defined(NPY_CPU_SPARC) \
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 38d5f21eb..578d55342 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -298,9 +298,42 @@ static int
oop.real = NPY_NAN;
oop.imag = NPY_NAN;
}
+ else if (PyBytes_Check(op) || PyUnicode_Check(op)) {
+ /*
+ * Unlike most numeric conversion functions PyComplex_AsCComplex
+ * does not handle strings, so we have to use its constructor.
+ */
+ PyObject *pycomplex, *args;
+ if (PyBytes_Check(op)) {
+ /* The complex constructor expects unicode */
+ PyObject *unicode;
+ unicode = PyUnicode_FromEncodedObject(op, NULL, NULL);
+ if (unicode == NULL) {
+ return -1;
+ }
+ args = PyTuple_Pack(1, unicode);
+ Py_DECREF(unicode);
+ }
+ else {
+ args = PyTuple_Pack(1, op);
+ }
+ if (args == NULL) {
+ return -1;
+ }
+ pycomplex = PyComplex_Type.tp_new(&PyComplex_Type, args, NULL);
+ Py_DECREF(args);
+ if (pycomplex == NULL) {
+ return -1;
+ }
+ oop = PyComplex_AsCComplex(pycomplex);
+ Py_DECREF(pycomplex);
+ if (error_converting(oop.real)) {
+ return -1;
+ }
+ }
else {
- oop = PyComplex_AsCComplex (op);
- if (PyErr_Occurred()) {
+ oop = PyComplex_AsCComplex(op);
+ if (error_converting(oop.real)) {
return -1;
}
}
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 14e64b647..be0b0b8b0 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -2362,7 +2362,7 @@ _is_default_descr(PyObject *descr, PyObject *typestr) {
return 0;
}
name = PyTuple_GET_ITEM(tuple, 0);
- if (!(PyUString_Check(name) && PyUString_GET_SIZE(name) == 0)) {
+ if (!(PyUnicode_Check(name) && PyUnicode_GetLength(name) == 0)) {
return 0;
}
typestr2 = PyTuple_GET_ITEM(tuple, 1);
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index b26a26abf..59498ad6e 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -469,7 +469,7 @@ _convert_from_array_descr(PyObject *obj, int align)
/* Insert name into nameslist */
Py_INCREF(name);
- if (PyUString_GET_SIZE(name) == 0) {
+ if (PyUnicode_GetLength(name) == 0) {
Py_DECREF(name);
if (title == NULL) {
name = PyUString_FromFormat("f%d", i);
@@ -478,7 +478,7 @@ _convert_from_array_descr(PyObject *obj, int align)
}
}
/* On Py3, allow only non-empty Unicode strings as field names */
- else if (PyUString_Check(title) && PyUString_GET_SIZE(title) > 0) {
+ else if (PyUnicode_Check(title) && PyUnicode_GetLength(title) > 0) {
name = title;
Py_INCREF(name);
}
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index e2026ec1c..8b4009edf 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -741,6 +741,7 @@ array_setscalar(PyArrayObject *self, PyObject *args)
else {
PyErr_SetString(PyExc_ValueError,
"can only convert an array of size 1 to a Python scalar");
+ return NULL;
}
}
/* Special case of C-order flat indexing... :| */
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index f3c440dc6..439646e67 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -69,7 +69,7 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
CASE(TIMEDELTA, Timedelta);
#undef CASE
case NPY_STRING:
- return (void *)PyString_AS_STRING(scalar);
+ return (void *)PyBytes_AsString(scalar);
case NPY_UNICODE:
/* lazy initialization, to reduce the memory used by string scalars */
if (PyArrayScalar_VAL(scalar, Unicode) == NULL) {
@@ -141,7 +141,18 @@ scalar_value(PyObject *scalar, PyArray_Descr *descr)
return (void *)PyString_AS_STRING(scalar);
}
if (_CHK(Unicode)) {
- return (void *)PyUnicode_AS_DATA(scalar);
+ /* Treat this the same as the NPY_UNICODE base class */
+
+ /* lazy initialization, to reduce the memory used by string scalars */
+ if (PyArrayScalar_VAL(scalar, Unicode) == NULL) {
+ Py_UCS4 *raw_data = PyUnicode_AsUCS4Copy(scalar);
+ if (raw_data == NULL) {
+ return NULL;
+ }
+ PyArrayScalar_VAL(scalar, Unicode) = raw_data;
+ return (void *)raw_data;
+ }
+ return PyArrayScalar_VAL(scalar, Unicode);
}
if (_CHK(Void)) {
/* Note: no & needed here, so can't use _IFCASE */
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index bf8e03bd2..b48fdf0c6 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -342,47 +342,68 @@ format_@name@(@type@ val, npy_bool scientific,
/**end repeat**/
/*
- * over-ride repr and str of array-scalar strings and unicode to
- * remove NULL bytes and then call the corresponding functions
- * of string and unicode.
- *
- * FIXME:
- * is this really a good idea?
- * stop using Py_UNICODE here.
+ * Over-ride repr and str of array-scalar byte strings to remove NULL bytes and
+ * then call the corresponding functions of PyBytes_Type to generate the string
*/
/**begin repeat
- * #name = string*2,unicode*2#
- * #form = (repr,str)*2#
- * #Name = String*2,Unicode*2#
- * #NAME = STRING*2,UNICODE*2#
- * #extra = AndSize*2,,#
- * #type = npy_char*2, Py_UNICODE*2#
+ * #form = repr, str#
*/
static PyObject *
-@name@type_@form@(PyObject *self)
+stringtype_@form@(PyObject *self)
{
- const @type@ *dptr, *ip;
- int len;
+ const npy_char *dptr, *ip;
+ Py_ssize_t len;
PyObject *new;
PyObject *ret;
- ip = dptr = Py@Name@_AS_@NAME@(self);
- len = Py@Name@_GET_SIZE(self);
- dptr += len-1;
- while(len > 0 && *dptr-- == 0) {
- len--;
- }
- new = Py@Name@_From@Name@@extra@(ip, len);
+ ip = PyBytes_AS_STRING(self);
+ len = PyBytes_GET_SIZE(self);
+ for(dptr = ip + len - 1; len > 0 && *dptr == 0; len--, dptr--);
+ new = PyBytes_FromStringAndSize(ip, len);
if (new == NULL) {
- return PyUString_FromString("");
+ return NULL;
}
- ret = Py@Name@_Type.tp_@form@(new);
+ ret = PyBytes_Type.tp_@form@(new);
Py_DECREF(new);
return ret;
}
/**end repeat**/
+/*
+ * Over-ride repr and str of array-scalar strings to remove NULL code points and
+ * then call the corresponding functions of PyUnicode_Type to generate the string
+ */
+
+/**begin repeat
+ * #form = repr, str#
+ */
+static PyObject *
+unicodetype_@form@(PyObject *self)
+{
+ Py_UCS4 *dptr, *ip;
+ Py_ssize_t len;
+ PyObject *new;
+ PyObject *ret;
+
+ /* PyUnicode_READY is called by PyUnicode_GetLength */
+ len = PyUnicode_GetLength(self);
+ ip = PyUnicode_AsUCS4Copy(self);
+ if (ip == NULL) {
+ return NULL;
+ }
+ for(dptr = ip + len - 1; len > 0 && *dptr == 0; len--, dptr--);
+ new = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, ip, len);
+ if (new == NULL) {
+ PyMem_Free(ip);
+ return NULL;
+ }
+ ret = PyUnicode_Type.tp_@form@(new);
+ Py_DECREF(new);
+ PyMem_Free(ip);
+ return ret;
+}
+/**end repeat**/
/*
* Convert array of bytes to a string representation much like bytes.__repr__,
@@ -720,12 +741,13 @@ legacy_@name@_format@kind@(@type@ val)
return NULL;
}
if (!npy_isfinite(val.imag)) {
- strncat(buf, "*", 1);
+ strncat(buf, "*", sizeof(buf) - strlen(buf) - 1);
}
- strncat(buf, "j", 1);
+ strncat(buf, "j", sizeof(buf) - strlen(buf) - 1);
}
else {
char re[64], im[64];
+
if (npy_isfinite(val.real)) {
PyOS_snprintf(format, sizeof(format), _FMT1, @NAME@PREC_@KIND@);
res = NumPyOS_ascii_format@suff@(re, sizeof(re), format,
@@ -768,7 +790,7 @@ legacy_@name@_format@kind@(@type@ val)
strcpy(im, "-inf");
}
if (!npy_isfinite(val.imag)) {
- strncat(im, "*", 1);
+ strncat(im, "*", sizeof(im) - strlen(im) - 1);
}
}
PyOS_snprintf(buf, sizeof(buf), "(%s%sj)", re, im);
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index 3f66b24a4..4e6ddb712 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -634,7 +634,7 @@ void npy_set_floatstatus_invalid(void)
fpsetsticky(FP_X_INV);
}
-#elif defined(_AIX)
+#elif defined(_AIX) && !defined(__GNUC__)
#include <float.h>
#include <fpxcp.h>
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 4f511cf09..b28c63930 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -244,7 +244,9 @@ static NPY_INLINE int
run_unary_avx512f_@func@_@TYPE@(char **args, const npy_intp *dimensions, const npy_intp *steps)
{
#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
- if ((IS_OUTPUT_BLOCKABLE_UNARY((npy_uint)(@esize@/@outsize@), 64)) && (labs(steps[0]) < 2*@max_stride@*@esize@)) {
+ if ((IS_OUTPUT_BLOCKABLE_UNARY((npy_uint)(@esize@/@outsize@), 64)) &&
+ (labs(steps[0]) < 2*@max_stride@*@esize@) &&
+ ((steps[0] & (@esize@-1)) == 0)) {
AVX512F_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0], steps[0]);
return 1;
}
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 2600d409a..09105905a 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -317,6 +317,29 @@ def test_string_to_boolean_cast_errors(dtype, out_dtype):
with assert_raises(ValueError):
arr.astype(out_dtype)
+@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
+@pytest.mark.parametrize("scalar_type",
+ [np.complex64, np.complex128, np.clongdouble])
+def test_string_to_complex_cast(str_type, scalar_type):
+ value = scalar_type(b"1+3j")
+ assert scalar_type(value) == 1+3j
+ assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
+ assert np.array(value).astype(scalar_type)[()] == 1+3j
+ arr = np.zeros(1, dtype=scalar_type)
+ arr[0] = value
+ assert arr[0] == 1+3j
+
+@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
+def test_none_to_nan_cast(dtype):
+ # Note that at the time of writing this test, the scalar constructors
+ # reject None
+ arr = np.zeros(1, dtype=dtype)
+ arr[0] = None
+ assert np.isnan(arr)[0]
+ assert np.isnan(np.array(None, dtype=dtype))[()]
+ assert np.isnan(np.array([None], dtype=dtype))[0]
+ assert np.isnan(np.array(None).astype(dtype))[()]
+
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
diff --git a/numpy/core/tests/test_cpu_features.py b/numpy/core/tests/test_cpu_features.py
index 337b7330c..bafa5a05f 100644
--- a/numpy/core/tests/test_cpu_features.py
+++ b/numpy/core/tests/test_cpu_features.py
@@ -1,8 +1,53 @@
import sys, platform, re, pytest
-
-from numpy.testing import assert_equal
from numpy.core._multiarray_umath import __cpu_features__
+def assert_features_equal(actual, desired, fname):
+ __tracebackhide__ = True # Hide traceback for py.test
+ actual, desired = str(actual), str(desired)
+ if actual == desired:
+ return
+ detected = str(__cpu_features__).replace("'", "")
+ try:
+ with open("/proc/cpuinfo", "r") as fd:
+ cpuinfo = fd.read(2048)
+ except Exception as err:
+ cpuinfo = str(err)
+
+ try:
+ import subprocess
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
+ auxv = auxv.decode()
+ except Exception as err:
+ auxv = str(err)
+
+ import textwrap
+ error_report = textwrap.indent(
+"""
+###########################################
+### Extra debugging information
+###########################################
+-------------------------------------------
+--- NumPy Detections
+-------------------------------------------
+%s
+-------------------------------------------
+--- SYS / CPUINFO
+-------------------------------------------
+%s....
+-------------------------------------------
+--- SYS / AUXV
+-------------------------------------------
+%s
+""" % (detected, cpuinfo, auxv), prefix='\r')
+
+ raise AssertionError((
+ "Failure Detection\n"
+ " NAME: '%s'\n"
+ " ACTUAL: %s\n"
+ " DESIRED: %s\n"
+ "%s"
+ ) % (fname, actual, desired, error_report))
+
class AbstractTest(object):
features = []
features_groups = {}
@@ -12,17 +57,16 @@ class AbstractTest(object):
def load_flags(self):
# a hook
pass
-
def test_features(self):
self.load_flags()
for gname, features in self.features_groups.items():
test_features = [self.cpu_have(f) for f in features]
- assert_equal(__cpu_features__.get(gname), all(test_features))
+ assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
for feature_name in self.features:
cpu_have = self.cpu_have(feature_name)
npy_have = __cpu_features__.get(feature_name)
- assert_equal(npy_have, cpu_have)
+ assert_features_equal(npy_have, cpu_have, feature_name)
def cpu_have(self, feature_name):
map_names = self.features_map.get(feature_name, feature_name)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 763b8fc5a..002603ef0 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1369,6 +1369,11 @@ class TestStructured:
a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
a[['a', 'b']] = a[['b', 'a']]
assert_equal(a[0].item(), (2,1))
+
+ def test_scalar_assignment(self):
+ with assert_raises(ValueError):
+ arr = np.arange(25).reshape(5, 5)
+ arr.itemset(3)
def test_structuredscalar_indexing(self):
# test gh-7262
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 96a6d810f..ef205555a 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -2430,9 +2430,10 @@ class TestRegression:
assert b'numpy.core.multiarray' in s
def test_object_casting_errors(self):
- # gh-11993
+ # gh-11993 update to ValueError (see gh-16909), since strings can in
+ # principle be converted to complex, but this string cannot.
arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
- assert_raises(TypeError, arr.astype, 'c8')
+ assert_raises(ValueError, arr.astype, 'c8')
def test_eff1d_casting(self):
# gh-12711
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index a21158420..a685abe9a 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -577,3 +577,34 @@ class TestComplexAbsoluteAVX(object):
arr = np.ones(arraysize, dtype=astype)
abs_true = np.ones(arraysize, dtype=arr.real.dtype)
assert_equal(np.abs(arr[::stride]), abs_true[::stride])
+
+# Testcase taken as is from https://github.com/numpy/numpy/issues/16660
+class TestComplexAbsoluteMixedDTypes(object):
+ @pytest.mark.parametrize("stride", [-4,-3,-2,-1,1,2,3,4])
+ @pytest.mark.parametrize("astype", [np.complex64, np.complex128])
+ @pytest.mark.parametrize("func", ['abs', 'square', 'conjugate'])
+
+ def test_array(self, stride, astype, func):
+ dtype = [('template_id', '<i8'), ('bank_chisq','<f4'),
+ ('bank_chisq_dof','<i8'), ('chisq', '<f4'), ('chisq_dof','<i8'),
+ ('cont_chisq', '<f4'), ('psd_var_val', '<f4'), ('sg_chisq','<f4'),
+ ('mycomplex', astype), ('time_index', '<i8')]
+ vec = np.array([
+ (0, 0., 0, -31.666483, 200, 0., 0., 1. , 3.0+4.0j , 613090),
+ (1, 0., 0, 260.91525 , 42, 0., 0., 1. , 5.0+12.0j , 787315),
+ (1, 0., 0, 52.15155 , 42, 0., 0., 1. , 8.0+15.0j , 806641),
+ (1, 0., 0, 52.430195, 42, 0., 0., 1. , 7.0+24.0j , 1363540),
+ (2, 0., 0, 304.43646 , 58, 0., 0., 1. , 20.0+21.0j , 787323),
+ (3, 0., 0, 299.42108 , 52, 0., 0., 1. , 12.0+35.0j , 787332),
+ (4, 0., 0, 39.4836 , 28, 0., 0., 9.182192, 9.0+40.0j , 787304),
+ (4, 0., 0, 76.83787 , 28, 0., 0., 1. , 28.0+45.0j, 1321869),
+ (5, 0., 0, 143.26366 , 24, 0., 0., 10.996129, 11.0+60.0j , 787299)], dtype=dtype)
+ myfunc = getattr(np, func)
+ a = vec['mycomplex']
+ g = myfunc(a[::stride])
+
+ b = vec['mycomplex'].copy()
+ h = myfunc(b[::stride])
+
+ assert_array_max_ulp(h.real, g.real, 1)
+ assert_array_max_ulp(h.imag, g.imag, 1)
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 23d905393..caa08549e 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -253,15 +253,20 @@ class GnuFCompiler(FCompiler):
return []
def runtime_library_dir_option(self, dir):
- if sys.platform[:3] == 'aix' or sys.platform == 'win32':
- # Linux/Solaris/Unix support RPATH, Windows and AIX do not
+ if sys.platform == 'win32':
+ # Linux/Solaris/Unix support RPATH, Windows does not
raise NotImplementedError
# TODO: could use -Xlinker here, if it's supported
assert "," not in dir
- sep = ',' if sys.platform == 'darwin' else '='
- return '-Wl,-rpath%s%s' % (sep, dir)
+ if sys.platform == 'darwin':
+ return f'-Wl,-rpath,{dir}'
+ elif sys.platform[:3] == 'aix':
+ # AIX RPATH is called LIBPATH
+ return f'-Wl,-blibpath:{dir}'
+ else:
+ return f'-Wl,-rpath={dir}'
class Gnu95FCompiler(GnuFCompiler):
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 84aff5e5d..7560bf4da 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -506,13 +506,16 @@ class StringConverter:
_mapper.extend([(nx.float64, float, nx.nan),
(nx.complex128, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
- (nx.unicode_, asunicode, '???'),
- (nx.string_, asbytes, '???'),
# If a non-default dtype is passed, fall back to generic
# ones (should only be used for the converter)
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
- (nx.complexfloating, complex, nx.nan + 0j),])
+ (nx.complexfloating, complex, nx.nan + 0j),
+ # Last, try with the string types (must be last, because
+ # `_mapper[-1]` is used as default in some cases)
+ (nx.unicode_, asunicode, '???'),
+ (nx.string_, asbytes, '???'),
+ ])
@classmethod
def _getdtype(cls, val):
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 6964c1128..a5b787025 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -177,12 +177,12 @@ class TestStringConverter:
# test str
# note that the longdouble type has been skipped, so the
# _status increases by 2. Everything should succeed with
- # unicode conversion (5).
+ # unicode conversion (8).
for s in ['a', b'a']:
res = converter.upgrade(s)
assert_(type(res) is str)
assert_equal(res, 'a')
- assert_equal(converter._status, 5 + status_offset)
+ assert_equal(converter._status, 8 + status_offset)
def test_missing(self):
"Tests the use of missing values."
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 9abde3e11..7714b3167 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -13,7 +13,8 @@ from tempfile import NamedTemporaryFile
from io import BytesIO, StringIO
from datetime import datetime
import locale
-from multiprocessing import Process
+from multiprocessing import Process, Value
+from ctypes import c_bool
import numpy as np
import numpy.ma as ma
@@ -574,16 +575,29 @@ class TestSaveTxt:
@pytest.mark.slow
@requires_memory(free_bytes=7e9)
def test_large_zip(self):
- def check_large_zip():
- # The test takes at least 6GB of memory, writes a file larger than 4GB
- test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
- for i in range(800000)], dtype=object)
- with tempdir() as tmpdir:
- np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
+ def check_large_zip(memoryerror_raised):
+ memoryerror_raised.value = False
+ try:
+ # The test takes at least 6GB of memory, writes a file larger
+ # than 4GB
+ test_data = np.asarray([np.random.rand(
+ np.random.randint(50,100),4)
+ for i in range(800000)], dtype=object)
+ with tempdir() as tmpdir:
+ np.savez(os.path.join(tmpdir, 'test.npz'),
+ test_data=test_data)
+ except MemoryError:
+ memoryerror_raised.value = True
+ raise
# run in a subprocess to ensure memory is released on PyPy, see gh-15775
- p = Process(target=check_large_zip)
+ # Use an object in shared memory to re-raise the MemoryError exception
+ # in our process if needed, see gh-16889
+ memoryerror_raised = Value(c_bool)
+ p = Process(target=check_large_zip, args=(memoryerror_raised,))
p.start()
p.join()
+ if memoryerror_raised.value:
+ raise MemoryError("Child process raised a MemoryError exception")
assert p.exitcode == 0
class LoadTxtBase:
@@ -1567,6 +1581,13 @@ M 33 21.99
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
+ def test_dtype_with_object_no_converter(self):
+ # Object without a converter uses bytes:
+ parsed = np.genfromtxt(TextIO("1"), dtype=object)
+ assert parsed[()] == b"1"
+ parsed = np.genfromtxt(TextIO("string"), dtype=object)
+ assert parsed[()] == b"string"
+
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 320a24856..afc0f7423 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -675,7 +675,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
- >>> plt.imshow(H, interpolation='nearest', origin='low',
+ >>> plt.imshow(H, interpolation='nearest', origin='lower',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
<matplotlib.image.AxesImage object at 0x...>
diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd
index 588f613ae..4f404b7a1 100644
--- a/numpy/random/_common.pxd
+++ b/numpy/random/_common.pxd
@@ -77,6 +77,8 @@ cdef object wrap_int(object val, object bits)
cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
+cdef validate_output_shape(iter_shape, np.ndarray output)
+
cdef object cont(void *func, void *state, object size, object lock, int narg,
object a, object a_name, constraint_type a_constraint,
object b, object b_name, constraint_type b_constraint,
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index e5bb02e24..be609d019 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -25,6 +25,7 @@ from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
CONS_GT_1, CONS_POSITIVE_NOT_NAN, CONS_POISSON,
double_fill, cont, kahan_sum, cont_broadcast_3, float_fill, cont_f,
check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
+ validate_output_shape
)
np.import_array()
@@ -2806,6 +2807,7 @@ cdef class Generator:
cnt = np.PyArray_SIZE(randoms)
it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
+ validate_output_shape(it.shape, randoms)
with self.lock, nogil:
for i in range(cnt):
_dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
@@ -3602,7 +3604,7 @@ cdef class Generator:
Now, do one experiment throwing the dice 10 time, and 10 times again,
and another throwing the dice 20 times, and 20 times again:
- >>> rng.multinomial([[10], [20]], [1/6.]*6, size=2)
+ >>> rng.multinomial([[10], [20]], [1/6.]*6, size=(2, 2))
array([[[2, 4, 0, 1, 2, 1],
[1, 3, 0, 3, 1, 2]],
[[1, 4, 4, 4, 4, 3],
@@ -3657,6 +3659,7 @@ cdef class Generator:
temp = np.empty(size, dtype=np.int8)
temp_arr = <np.ndarray>temp
it = np.PyArray_MultiIterNew2(on, temp_arr)
+ validate_output_shape(it.shape, temp_arr)
shape = it.shape + (d,)
multin = np.zeros(shape, dtype=np.int64)
mnarr = <np.ndarray>multin
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 54d656e3b..169ed5f0e 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -22,6 +22,7 @@ from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
CONS_GT_1, LEGACY_CONS_POISSON,
double_fill, cont, kahan_sum, cont_broadcast_3,
check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
+ validate_output_shape
)
cdef extern from "numpy/random/distributions.h":
@@ -3371,6 +3372,7 @@ cdef class RandomState:
cnt = np.PyArray_SIZE(randoms)
it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
+ validate_output_shape(it.shape, randoms)
with self.lock, nogil:
for i in range(cnt):
_dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py
index 77353463e..99a819efb 100644
--- a/numpy/random/tests/test_extending.py
+++ b/numpy/random/tests/test_extending.py
@@ -31,11 +31,11 @@ except ImportError:
cython = None
else:
from distutils.version import LooseVersion
- # Cython 0.29.14 is required for Python 3.8 and there are
+ # Cython 0.29.21 is required for Python 3.9 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with the one in pyproject.toml
- required_version = LooseVersion('0.29.14')
+ required_version = LooseVersion('0.29.21')
if LooseVersion(cython_version) < required_version:
# too old or wrong cython, skip the test
cython = None
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index 332b63198..bb6d25ef1 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -2423,6 +2423,16 @@ def test_broadcast_size_error():
with pytest.raises(ValueError):
random.standard_gamma(shape, out=out)
+ # 2 arg
+ with pytest.raises(ValueError):
+ random.binomial(1, [0.3, 0.7], size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], 0.3, size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
+ with pytest.raises(ValueError):
+ random.multinomial([2, 2], [.3, .7], size=(2, 1))
+
# 3 arg
a = random.chisquare(5, size=3)
b = random.chisquare(5, size=(4, 3))
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
index edd7811bf..23dbbed6a 100644
--- a/numpy/random/tests/test_randomstate.py
+++ b/numpy/random/tests/test_randomstate.py
@@ -1989,3 +1989,13 @@ def test_integer_repeat(int_func):
val = val.byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(res == md5)
+
+
+def test_broadcast_size_error():
+ # GH-16833
+ with pytest.raises(ValueError):
+ random.binomial(1, [0.3, 0.7], size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], 0.3, size=(2, 1))
+ with pytest.raises(ValueError):
+ random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
diff --git a/pavement.py b/pavement.py
index 46239dbf5..a8551e2a3 100644
--- a/pavement.py
+++ b/pavement.py
@@ -37,7 +37,7 @@ from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/source/release/1.19.0-notes.rst'
+RELEASE_NOTES = 'doc/source/release/1.19.1-notes.rst'
#-------------------------------------------------------
diff --git a/pyproject.toml b/pyproject.toml
index d81b731d3..a54d0b379 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,9 +1,9 @@
[build-system]
# Minimum requirements for the build system to execute.
requires = [
- "setuptools",
+ "setuptools!=49.2.0",
"wheel",
- "Cython>=0.29.14", # Note: keep in sync with tools/cythonize.py
+ "Cython>=0.29.21", # Note: keep in sync with tools/cythonize.py
]
diff --git a/setup.py b/setup.py
index e8320581f..cb435acbc 100755
--- a/setup.py
+++ b/setup.py
@@ -55,7 +55,7 @@ Operating System :: MacOS
MAJOR = 1
MINOR = 19
-MICRO = 0
+MICRO = 1
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
@@ -189,7 +189,7 @@ def check_submodules():
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: {}'.format(line))
-
+
class concat_license_files():
@@ -403,6 +403,16 @@ def parse_setuppy_commands():
return True
+def get_docs_url():
+ if not ISRELEASED:
+ return "https://numpy.org/devdocs"
+ else:
+ # For releaeses, this URL ends up on pypi.
+ # By pinning the version, users looking at old PyPI releases can get
+ # to the associated docs easily.
+ return "https://numpy.org/doc/{}.{}".format(MAJOR, MINOR)
+
+
def setup_package():
src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
@@ -437,7 +447,7 @@ def setup_package():
download_url = "https://pypi.python.org/pypi/numpy",
project_urls={
"Bug Tracker": "https://github.com/numpy/numpy/issues",
- "Documentation": "https://docs.scipy.org/doc/numpy/",
+ "Documentation": get_docs_url(),
"Source Code": "https://github.com/numpy/numpy",
},
license = 'BSD',
diff --git a/shippable.yml b/shippable.yml
index dc3617e12..2843377e2 100644
--- a/shippable.yml
+++ b/shippable.yml
@@ -1,7 +1,7 @@
branches:
only:
- - master
- - maintenance/*
+ - master
+ - maintenance/*
language: python
@@ -53,15 +53,7 @@ build:
# run the test suite
- python runtests.py -n --debug-info --show-build-log -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
- cache: true
- cache_dir_list:
- # the NumPy project uses a single Amazon S3 cache
- # so upload the parent path of the Python-specific
- # version paths to avoid i.e., 3.6 overwriting
- # 3.7 pip cache (seems to be an issue)
- - /root/.cache/pip/wheels
-
-
+ cache: false
# disable email notification
# of CI job result
diff --git a/test_requirements.txt b/test_requirements.txt
index 5db322b9b..05e09cb78 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,6 +1,8 @@
-cython==0.29.17
-hypothesis==5.12.0
-pytest==5.4.2
+cython==0.29.21
+wheel
+setuptools!=49.2.0
+hypothesis==5.19.1
+pytest==5.4.3
pytz==2020.1
pytest-cov==2.8.1
pickle5; python_version == '3.7'
diff --git a/tools/cythonize.py b/tools/cythonize.py
index 65b79f716..6cebf0f72 100755
--- a/tools/cythonize.py
+++ b/tools/cythonize.py
@@ -66,11 +66,11 @@ def process_pyx(fromfile, tofile):
# check the version, and invoke through python
from distutils.version import LooseVersion
- # Cython 0.29.14 is required for Python 3.8 and there are
+ # Cython 0.29.21 is required for Python 3.9 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with that in pyproject.toml
- required_version = LooseVersion('0.29.14')
+ required_version = LooseVersion('0.29.21')
if LooseVersion(cython_version) < required_version:
raise RuntimeError(f'Building {VENDOR} requires Cython >= {required_version}')
diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh
index e24d7a99d..32b7968d8 100755
--- a/tools/pypy-test.sh
+++ b/tools/pypy-test.sh
@@ -33,7 +33,7 @@ wget -q https://downloads.python.org/pypy/pypy3.6-v7.3.1-linux64.tar.bz2 -O pypy
mkdir -p pypy3
(cd pypy3; tar --strip-components=1 -xf ../pypy.tar.bz2)
pypy3/bin/pypy3 -mensurepip
-pypy3/bin/pypy3 -m pip install --upgrade pip setuptools wheel
+pypy3/bin/pypy3 -m pip install --upgrade pip
pypy3/bin/pypy3 -m pip install --user -r test_requirements.txt --no-warn-script-location
echo
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index e468dd932..1446a8bad 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -29,7 +29,7 @@ gcc --version
popd
-pip install --upgrade pip
+pip install --upgrade pip setuptools!=49.2.0 wheel
# 'setuptools', 'wheel' and 'cython' are build dependencies. This information
# is stored in pyproject.toml, but there is not yet a standard way to install
@@ -41,7 +41,7 @@ pip install --upgrade pip
# A specific version of cython is required, so we read the cython package
# requirement using `grep cython test_requirements.txt` instead of simply
# writing 'pip install setuptools wheel cython'.
-pip install setuptools wheel `grep cython test_requirements.txt`
+pip install `grep cython test_requirements.txt`
if [ -n "$DOWNLOAD_OPENBLAS" ]; then
pwd