summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:36:33 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:36:33 +0900
commit511f2125c7ab4423984a9c9e2e00ae0d83b4672e (patch)
tree591aa5e601fcddaff3ba91f2508578191e4bdd74
parent635a72c2cedbc99fd3bd22af319e12b8f95bd339 (diff)
downloadpython-numpy-511f2125c7ab4423984a9c9e2e00ae0d83b4672e.tar.gz
python-numpy-511f2125c7ab4423984a9c9e2e00ae0d83b4672e.tar.bz2
python-numpy-511f2125c7ab4423984a9c9e2e00ae0d83b4672e.zip
Imported Upstream version 1.16.5upstream/1.16.5
-rw-r--r--.mailmap13
-rw-r--r--.travis.yml1
-rw-r--r--azure-pipelines.yml13
-rw-r--r--doc/changelog/1.16.5-changelog.rst54
-rw-r--r--doc/release/1.16.5-notes.rst68
-rw-r--r--doc/source/reference/arrays.classes.rst126
-rw-r--r--doc/source/release.rst4
-rw-r--r--doc/source/user/basics.dispatch.rst8
-rw-r--r--doc/source/user/basics.rst1
-rw-r--r--numpy/core/_add_newdocs.py255
-rw-r--r--numpy/core/_internal.py10
-rw-r--r--numpy/core/function_base.py29
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h4
-rw-r--r--numpy/core/multiarray.py23
-rw-r--r--numpy/core/src/common/npy_ctypes.h1
-rw-r--r--numpy/core/src/multiarray/alloc.c16
-rw-r--r--numpy/core/src/multiarray/arrayobject.c6
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c81
-rw-r--r--numpy/core/src/multiarray/ctors.c37
-rw-r--r--numpy/core/src/multiarray/descriptor.c10
-rw-r--r--numpy/core/src/multiarray/dragon4.c2
-rw-r--r--numpy/core/src/multiarray/getset.c16
-rw-r--r--numpy/core/src/multiarray/mapping.c6
-rw-r--r--numpy/core/src/multiarray/methods.c8
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c4
-rw-r--r--numpy/core/src/umath/reduction.c8
-rw-r--r--numpy/core/tests/test_dtype.py12
-rw-r--r--numpy/core/tests/test_indexing.py9
-rw-r--r--numpy/core/tests/test_multiarray.py29
-rw-r--r--numpy/core/tests/test_nditer.py2
-rw-r--r--numpy/core/tests/test_numeric.py35
-rw-r--r--numpy/core/tests/test_numerictypes.py3
-rw-r--r--numpy/doc/dispatch.py271
-rw-r--r--numpy/doc/structured_arrays.py7
-rw-r--r--numpy/f2py/tests/test_block_docstring.py3
-rw-r--r--numpy/lib/histograms.py12
-rw-r--r--numpy/lib/recfunctions.py232
-rw-r--r--numpy/lib/tests/test_function_base.py3
-rw-r--r--numpy/lib/tests/test_recfunctions.py41
-rw-r--r--numpy/lib/tests/test_twodim_base.py26
-rw-r--r--numpy/lib/twodim_base.py16
-rw-r--r--numpy/testing/_private/utils.py21
-rw-r--r--numpy/testing/tests/test_decorators.py4
-rw-r--r--pavement.py2
-rwxr-xr-xsetup.py6
-rw-r--r--tools/pypy-test.sh51
-rwxr-xr-xtools/travis-before-install.sh2
47 files changed, 1197 insertions, 394 deletions
diff --git a/.mailmap b/.mailmap
index 8d9287998..d36594212 100644
--- a/.mailmap
+++ b/.mailmap
@@ -30,6 +30,7 @@ Alyssa Quek <alyssaquek@gmail.com> alyssaq <alyssaquek@gmail.com>
Amir Sarabadani <ladsgroup@gmail.com> amir <ladsgroup@gmail.com>
Anatoly Techtonik <techtonik@gmail.com> anatoly techtonik <techtonik@gmail.com>
Andras Deak <deak.andris@gmail.com> adeak <adeak@users.noreply.github.com>
+Andrea Pattori <andrea.pattori@gmail.com> patto90 <andrea.pattori@gmail.com>
Andrei Kucharavy <ank@andreikucharavy.com> chiffa <ank@andreikucharavy.com>
Anne Archibald <peridot.faceted@gmail.com> aarchiba <peridot.faceted@gmail.com>
Anne Archibald <peridot.faceted@gmail.com> Anne Archibald <archibald@astron.nl>
@@ -43,6 +44,7 @@ Badhri Narayanan Krishnakumar <badhrinarayanan.k@gmail.com> badhrink <badhrinara
Behzad Nouri <behzadnouri@gmail.com> behzad nouri <behzadnouri@gmail.com>
Benjamin Root <ben.v.root@gmail.com> Ben Root <ben.v.root@gmail.com>
Benjamin Root <ben.v.root@gmail.com> weathergod <?@?>
+Bernardt Duvenhage <bernardt.duvenhage@gmail.com> bduvenhage <bernardt.duvenhage@gmail.com>
Bertrand Lefebvre <bertrand.l3f@gmail.com> bertrand <bertrand.l3f@gmail.com>
Bertrand Lefebvre <bertrand.l3f@gmail.com> Bertrand <bertrand.l3f@gmail.com>
Bharat Raghunathan <bharatr@symphonyai.com> Bharat123Rox <bharatr@symphonyai.com>
@@ -118,7 +120,10 @@ Jason King <pizza@netspace.net.au> jason king <pizza@netspace.net.au>
Jay Bourque <jay.bourque@continuum.io> jayvius <jay.bourque@continuum.io>
Jean Utke <jutke@allstate.com> jutke <jutke@allstate.com>
Jeffrey Yancey <jeffrey@octane5.com> Jeff <3820914+jeffyancey@users.noreply.github.com>
+Jeremy Lay <jlay80@gmail.com> jeremycl01 <jlay80@gmail.com>
+Jérémie du Boisberranger <jeremie.du-boisberranger@inria.fr> jeremiedbb <34657725+jeremiedbb@users.noreply.github.com>
Jerome Kelleher <jerome.kelleher@ed.ac.uk> jeromekelleher <jerome.kelleher@ed.ac.uk>
+Johannes Hampp <johannes.hampp@zeu.uni-giessen.de> euronion <42553970+euronion@users.noreply.github.com>
Johannes Schönberger <hannesschoenberger@gmail.com> Johannes Schönberger <jschoenberger@demuc.de>
John Darbyshire <24256554+attack68@users.noreply.github.com> attack68 <24256554+attack68@users.noreply.github.com>
Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <joseph.r.fox-rabinovitz@nasa.gov>
@@ -130,8 +135,11 @@ Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <juliantaylor108@googlem
Julien Lhermitte <jrmlhermitte@gmail.com> Julien Lhermitte <lhermitte@bnl.gov>
Julien Schueller <julien.schueller@gmail.com> jschueller <julien.schueller@gmail.com>
Kai Striega <kaistriega@gmail.com> kai <kaistriega@gmail.com>
+Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega@gmail.com>
+Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega+github@gmail.com>
Khaled Ben Abdallah Okuda <khaled.ben.okuda@gmail.com> KhaledTo <khaled.ben.okuda@gmail.com>
-Kiko Correoso <kikocorreoso@gmail.com> kikocorreoso <kikocorreoso@gmail.com>
+Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@gmail.com>
+Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@users.noreply.github.com>
Konrad Kapp <k_kapp@yahoo.com> k_kapp@yahoo.com <k_kapp@yahoo.com>
Kriti Singh <kritisingh1.ks@gmail.com> kritisingh1 <kritisingh1.ks@gmail.com>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <l.buitinck@esciencecenter.nl>
@@ -192,8 +200,10 @@ Sanchez Gonzalez Alvaro <as12513@imperial.ac.uk> alvarosg <as12513@imperial.ac.u
Saullo Giovani <saullogiovani@gmail.com> saullogiovani <saullogiovani@gmail.com>
Saurabh Mehta <e.samehta@gmail.com>
Sebastian Berg <sebastian@sipsolutions.net> seberg <sebastian@sipsolutions.net>
+Shekhar Prasad Rajak <shekharrajak@live.com> shekharrajak <shekharrajak@live.com>
Shota Kawabuchi <shota.kawabuchi+GitHub@gmail.com> skwbc <shota.kawabuchi+GitHub@gmail.com>
Siavash Eliasi <siavashserver@gmail.com> siavashserver <siavashserver@gmail.com>
+Søren Rasmussen <soren.rasmussen@alexandra.dk> sorenrasmussenai <47032123+sorenrasmussenai@users.noreply.github.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <sjvdwalt@gmail.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <stefan@sun.ac.za>
Stephan Hoyer <shoyer@gmail.com> Stephan Hoyer <shoyer@climate.com>
@@ -218,6 +228,7 @@ Wendell Smith <wendellwsmith@gmail.com> Wendell Smith <wackywendell@gmail.com>
Wim Glenn <wim.glenn@melbourneit.com.au> wim glenn <wim.glenn@melbourneit.com.au>
Wojtek Ruszczewski <git@wr.waw.pl> wrwrwr <git@wr.waw.pl>
Yuji Kanagawa <yuji.kngw.80s.revive@gmail.com> kngwyu <yuji.kngw.80s.revive@gmail.com>
+Yury Kirienko <yury.kirienko@gmail.com> kirienko <yury.kirienko@gmail.com>
Zixu Zhao <zixu.zhao.tireless@gmail.com> ZZhaoTireless <zixu.zhao.tireless@gmail.com>
Ziyan Zhou<ziyan.zhou@mujin.co.jp> Ziyan <ziyan.zhou@mujin.co.jp>
luzpaz <kunda@scribus.net> luz.paz <luzpaz@users.noreply.github.com>
diff --git a/.travis.yml b/.travis.yml
index 491fcefea..8c8dce075 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -11,7 +11,6 @@ addons:
apt:
packages: &common_packages
- gfortran
- - libatlas-dev
- libatlas-base-dev
# Speedup builds, particularly when USE_CHROOT=1
- eatmydata
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index ff8d529e8..9317955f8 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -198,3 +198,16 @@ jobs:
inputs:
testResultsFiles: '**/test-*.xml'
testRunTitle: 'Publish test results for Python $(python.version)'
+
+- job: Linux_PyPy
+ pool:
+ vmIMage: 'ubuntu-16.04'
+ steps:
+ - script: source tools/pypy-test.sh
+ displayName: 'Run PyPy Build / Tests'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ testRunTitle: 'Publish test results for PyPy'
+ failTaskOnFailedTests: true
diff --git a/doc/changelog/1.16.5-changelog.rst b/doc/changelog/1.16.5-changelog.rst
new file mode 100644
index 000000000..19374058d
--- /dev/null
+++ b/doc/changelog/1.16.5-changelog.rst
@@ -0,0 +1,54 @@
+
+Contributors
+============
+
+A total of 18 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Shadchin
+* Allan Haldane
+* Bruce Merry +
+* Charles Harris
+* Colin Snyder +
+* Dan Allan +
+* Emile +
+* Eric Wieser
+* Grey Baker +
+* Maksim Shabunin +
+* Marten van Kerkwijk
+* Matti Picus
+* Peter Andreas Entschev +
+* Ralf Gommers
+* Richard Harris +
+* Sebastian Berg
+* Sergei Lebedev +
+* Stephan Hoyer
+
+Pull requests merged
+====================
+
+A total of 23 pull requests were merged for this release.
+
+* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
+* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
+* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
+* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
+* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
+* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
+* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
+* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
+* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
+* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
+* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
+* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
+* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
diff --git a/doc/release/1.16.5-notes.rst b/doc/release/1.16.5-notes.rst
new file mode 100644
index 000000000..5b6eb585b
--- /dev/null
+++ b/doc/release/1.16.5-notes.rst
@@ -0,0 +1,68 @@
+==========================
+NumPy 1.16.5 Release Notes
+==========================
+
+The NumPy 1.16.5 release fixes bugs reported against the 1.16.4 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7-dev, which should fix errors on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and
+3.5-3.7.
+
+
+Contributors
+============
+
+A total of 18 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Shadchin
+* Allan Haldane
+* Bruce Merry +
+* Charles Harris
+* Colin Snyder +
+* Dan Allan +
+* Emile +
+* Eric Wieser
+* Grey Baker +
+* Maksim Shabunin +
+* Marten van Kerkwijk
+* Matti Picus
+* Peter Andreas Entschev +
+* Ralf Gommers
+* Richard Harris +
+* Sebastian Berg
+* Sergei Lebedev +
+* Stephan Hoyer
+
+Pull requests merged
+====================
+
+A total of 23 pull requests were merged for this release.
+
+* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
+* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
+* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
+* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
+* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
+* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
+* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
+* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
+* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
+* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
+* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
+* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
+* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index f17cb932a..dc8669a2b 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -6,8 +6,15 @@ Standard array subclasses
.. currentmodule:: numpy
-The :class:`ndarray` in NumPy is a "new-style" Python
-built-in-type. Therefore, it can be inherited from (in Python or in C)
+.. note::
+
+ Subclassing a ``numpy.ndarray`` is possible but if your goal is to create
+ an array with *modified* behavior, as do dask arrays for distributed
+ computation and cupy arrays for GPU-based computation, subclassing is
+ discouraged. Instead, using numpy's
+ :ref:`dispatch mechanism <basics.dispatch>` is recommended.
+
+The :class:`ndarray` can be inherited from (in Python or in C)
if desired. Therefore, it can form a foundation for many useful
classes. Often whether to sub-class the array object or to simply use
the core array component as an internal part of a new class is a
@@ -151,6 +158,121 @@ NumPy provides several hooks that classes can customize:
:func:`__array_prepare__`, :data:`__array_priority__` mechanism
described below for ufuncs (which may eventually be deprecated).
+.. py:method:: class.__array_function__(func, types, args, kwargs)
+
+ .. versionadded:: 1.16
+
+ .. note::
+
+ - In NumPy 1.17, the protocol is enabled by default, but can be disabled
+ with ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0``.
+ - In NumPy 1.16, you need to set the environment variable
+ ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1`` before importing NumPy to use
+ NumPy function overrides.
+ - Eventually, expect to ``__array_function__`` to always be enabled.
+
+ - ``func`` is an arbitrary callable exposed by NumPy's public API,
+ which was called in the form ``func(*args, **kwargs)``.
+ - ``types`` is a `collection <collections.abc.Collection>`_
+ of unique argument types from the original NumPy function call that
+ implement ``__array_function__``.
+ - The tuple ``args`` and dict ``kwargs`` are directly passed on from the
+ original call.
+
+ As a convenience for ``__array_function__`` implementors, ``types``
+ provides all argument types with an ``'__array_function__'`` attribute.
+ This allows implementors to quickly identify cases where they should defer
+ to ``__array_function__`` implementations on other arguments.
+ Implementations should not rely on the iteration order of ``types``.
+
+ Most implementations of ``__array_function__`` will start with two
+ checks:
+
+ 1. Is the given function something that we know how to overload?
+ 2. Are all arguments of a type that we know how to handle?
+
+ If these conditions hold, ``__array_function__`` should return the result
+ from calling its implementation for ``func(*args, **kwargs)``. Otherwise,
+ it should return the sentinel value ``NotImplemented``, indicating that the
+ function is not implemented by these types.
+
+ There are no general requirements on the return value from
+ ``__array_function__``, although most sensible implementations should
+ probably return array(s) with the same type as one of the function's
+ arguments.
+
+ It may also be convenient to define a custom decorators (``implements``
+ below) for registering ``__array_function__`` implementations.
+
+ .. code:: python
+
+ HANDLED_FUNCTIONS = {}
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ # Note: this allows subclasses that don't override
+ # __array_function__ to handle MyArray objects
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ def implements(numpy_function):
+ """Register an __array_function__ implementation for MyArray objects."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ @implements(np.concatenate)
+ def concatenate(arrays, axis=0, out=None):
+ ... # implementation of concatenate for MyArray objects
+
+ @implements(np.broadcast_to)
+ def broadcast_to(array, shape):
+ ... # implementation of broadcast_to for MyArray objects
+
+ Note that it is not required for ``__array_function__`` implementations to
+ include *all* of the corresponding NumPy function's optional arguments
+ (e.g., ``broadcast_to`` above omits the irrelevant ``subok`` argument).
+ Optional arguments are only passed in to ``__array_function__`` if they
+ were explicitly used in the NumPy function call.
+
+ Just like the case for builtin special methods like ``__add__``, properly
+ written ``__array_function__`` methods should always return
+ ``NotImplemented`` when an unknown type is encountered. Otherwise, it will
+ be impossible to correctly override NumPy functions from another object
+ if the operation also includes one of your objects.
+
+ For the most part, the rules for dispatch with ``__array_function__``
+ match those for ``__array_ufunc__``. In particular:
+
+ - NumPy will gather implementations of ``__array_function__`` from all
+ specified inputs and call them in order: subclasses before
+ superclasses, and otherwise left to right. Note that in some edge cases
+ involving subclasses, this differs slightly from the
+ `current behavior <https://bugs.python.org/issue30140>`_ of Python.
+ - Implementations of ``__array_function__`` indicate that they can
+ handle the operation by returning any value other than
+ ``NotImplemented``.
+ - If all ``__array_function__`` methods return ``NotImplemented``,
+ NumPy will raise ``TypeError``.
+
+ If no ``__array_function__`` methods exists, NumPy will default to calling
+ its own implementation, intended for use on NumPy arrays. This case arises,
+ for example, when all array-like arguments are Python numbers or lists.
+ (NumPy arrays do have a ``__array_function__`` method, given below, but it
+ always returns ``NotImplemented`` if any argument other than a NumPy array
+ subclass implements ``__array_function__``.)
+
+ One deviation from the current behavior of ``__array_ufunc__`` is that
+ NumPy will only call ``__array_function__`` on the *first* argument of each
+ unique type. This matches Python's `rule for calling reflected methods
+ <https://docs.python.org/3/reference/datamodel.html#object.__ror__>`_, and
+ this ensures that checking overloads has acceptable performance even when
+ there are a large number of overloaded arguments.
+
.. py:method:: class.__array_finalize__(obj)
This method is called whenever the system internally allocates a
diff --git a/doc/source/release.rst b/doc/source/release.rst
index ea582e2aa..e5b3d5d13 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -2,6 +2,10 @@
Release Notes
*************
+.. include:: ../release/1.16.5-notes.rst
+.. include:: ../release/1.16.4-notes.rst
+.. include:: ../release/1.16.3-notes.rst
+.. include:: ../release/1.16.2-notes.rst
.. include:: ../release/1.16.1-notes.rst
.. include:: ../release/1.16.0-notes.rst
.. include:: ../release/1.15.4-notes.rst
diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst
new file mode 100644
index 000000000..f7b8da262
--- /dev/null
+++ b/doc/source/user/basics.dispatch.rst
@@ -0,0 +1,8 @@
+.. _basics.dispatch:
+
+*******************************
+Writing custom array containers
+*******************************
+
+.. automodule:: numpy.doc.dispatch
+
diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst
index 7875aff6e..e0fc0ece3 100644
--- a/doc/source/user/basics.rst
+++ b/doc/source/user/basics.rst
@@ -12,4 +12,5 @@ NumPy basics
basics.broadcasting
basics.byteswapping
basics.rec
+ basics.dispatch
basics.subclassing
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index a242a74d4..2ed11b2f1 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -10,6 +10,8 @@ NOTE: Many of the methods of ndarray have corresponding functions.
"""
from __future__ import division, absolute_import, print_function
+import sys
+
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
@@ -1461,57 +1463,58 @@ add_newdoc('numpy.core.multiarray', 'promote_types',
""")
-add_newdoc('numpy.core.multiarray', 'newbuffer',
- """
- newbuffer(size)
+if sys.version_info.major < 3:
+ add_newdoc('numpy.core.multiarray', 'newbuffer',
+ """
+ newbuffer(size)
- Return a new uninitialized buffer object.
+ Return a new uninitialized buffer object.
- Parameters
- ----------
- size : int
- Size in bytes of returned buffer object.
+ Parameters
+ ----------
+ size : int
+ Size in bytes of returned buffer object.
- Returns
- -------
- newbuffer : buffer object
- Returned, uninitialized buffer object of `size` bytes.
+ Returns
+ -------
+ newbuffer : buffer object
+ Returned, uninitialized buffer object of `size` bytes.
- """)
+ """)
-add_newdoc('numpy.core.multiarray', 'getbuffer',
- """
- getbuffer(obj [,offset[, size]])
+ add_newdoc('numpy.core.multiarray', 'getbuffer',
+ """
+ getbuffer(obj [,offset[, size]])
- Create a buffer object from the given object referencing a slice of
- length size starting at offset.
+ Create a buffer object from the given object referencing a slice of
+ length size starting at offset.
- Default is the entire buffer. A read-write buffer is attempted followed
- by a read-only buffer.
+ Default is the entire buffer. A read-write buffer is attempted followed
+ by a read-only buffer.
- Parameters
- ----------
- obj : object
+ Parameters
+ ----------
+ obj : object
- offset : int, optional
+ offset : int, optional
- size : int, optional
+ size : int, optional
- Returns
- -------
- buffer_obj : buffer
+ Returns
+ -------
+ buffer_obj : buffer
- Examples
- --------
- >>> buf = np.getbuffer(np.ones(5), 1, 3)
- >>> len(buf)
- 3
- >>> buf[0]
- '\\x00'
- >>> buf
- <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
+ Examples
+ --------
+ >>> buf = np.getbuffer(np.ones(5), 1, 3)
+ >>> len(buf)
+ 3
+ >>> buf[0]
+ '\\x00'
+ >>> buf
+ <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
- """)
+ """)
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
@@ -1977,13 +1980,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
-add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
- """Allow the array to be interpreted as a ctypes object by returning the
- data-memory location as an integer
-
- """))
-
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
@@ -2683,10 +2679,15 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
Notes
-----
- Starting in NumPy 1.9, astype method now returns an error if the string
- dtype to cast to is not long enough in 'safe' casting mode to hold the max
- value of integer/float array that is being casted. Previously the casting
- was allowed even if the result was truncated.
+ .. versionchanged:: 1.17.0
+ Casting between a simple data type and a structured one is possible only
+ for "unsafe" casting. Casting to multiple fields is allowed, but
+ casting from multiple fields is not.
+
+ .. versionchanged:: 1.9.0
+ Casting from numeric to string types in 'safe' casting mode requires
+ that the string dtype length is long enough to store the max
+ integer/float value converted.
Raises
------
@@ -3223,87 +3224,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""))
-add_newdoc('numpy.core.multiarray', 'shares_memory',
- """
- shares_memory(a, b, max_work=None)
-
- Determine if two arrays share memory
-
- Parameters
- ----------
- a, b : ndarray
- Input arrays
- max_work : int, optional
- Effort to spend on solving the overlap problem (maximum number
- of candidate solutions to consider). The following special
- values are recognized:
-
- max_work=MAY_SHARE_EXACT (default)
- The problem is solved exactly. In this case, the function returns
- True only if there is an element shared between the arrays.
- max_work=MAY_SHARE_BOUNDS
- Only the memory bounds of a and b are checked.
-
- Raises
- ------
- numpy.TooHardError
- Exceeded max_work.
-
- Returns
- -------
- out : bool
-
- See Also
- --------
- may_share_memory
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
-
- """)
-
-
-add_newdoc('numpy.core.multiarray', 'may_share_memory',
- """
- may_share_memory(a, b, max_work=None)
-
- Determine if two arrays might share memory
-
- A return of True does not necessarily mean that the two arrays
- share any element. It just means that they *might*.
-
- Only the memory bounds of a and b are checked by default.
-
- Parameters
- ----------
- a, b : ndarray
- Input arrays
- max_work : int, optional
- Effort to spend on solving the overlap problem. See
- `shares_memory` for details. Default for ``may_share_memory``
- is to do a bounds check.
-
- Returns
- -------
- out : bool
-
- See Also
- --------
- shares_memory
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
- >>> x = np.zeros([3, 4])
- >>> np.may_share_memory(x[:,0], x[:,1])
- True
-
- """)
-
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
@@ -3405,81 +3325,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""))
-add_newdoc('numpy.core.multiarray', 'copyto',
- """
- copyto(dst, src, casting='same_kind', where=True)
-
- Copies values from one array to another, broadcasting as necessary.
-
- Raises a TypeError if the `casting` rule is violated, and if
- `where` is provided, it selects which elements to copy.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- dst : ndarray
- The array into which values are copied.
- src : array_like
- The array from which values are copied.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- Controls what kind of data casting may occur when copying.
-
- * 'no' means the data types should not be cast at all.
- * 'equiv' means only byte-order changes are allowed.
- * 'safe' means only casts which can preserve values are allowed.
- * 'same_kind' means only safe casts or casts within a kind,
- like float64 to float32, are allowed.
- * 'unsafe' means any data conversions may be done.
- where : array_like of bool, optional
- A boolean array which is broadcasted to match the dimensions
- of `dst`, and selects elements to copy from `src` to `dst`
- wherever it contains the value True.
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'putmask',
- """
- putmask(a, mask, values)
-
- Changes elements of an array based on conditional and input values.
-
- Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
-
- If `values` is not the same size as `a` and `mask` then it will repeat.
- This gives behavior different from ``a[mask] = values``.
-
- Parameters
- ----------
- a : array_like
- Target array.
- mask : array_like
- Boolean mask array. It has to be the same shape as `a`.
- values : array_like
- Values to put into `a` where `mask` is True. If `values` is smaller
- than `a` it will be repeated.
-
- See Also
- --------
- place, put, take, copyto
-
- Examples
- --------
- >>> x = np.arange(6).reshape(2, 3)
- >>> np.putmask(x, x>2, x**2)
- >>> x
- array([[ 0, 1, 2],
- [ 9, 16, 25]])
-
- If `values` is smaller than `a` it is repeated:
-
- >>> x = np.arange(5)
- >>> np.putmask(x, x>1, [-33, -44])
- >>> x
- array([ 0, 1, -33, -44, -33])
-
- """)
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 1d3bb5584..c7c18fbfc 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -8,6 +8,7 @@ from __future__ import division, absolute_import, print_function
import re
import sys
+import platform
from numpy.compat import unicode
from numpy.core.overrides import set_module
@@ -17,6 +18,8 @@ try:
except ImportError:
ctypes = None
+IS_PYPY = platform.python_implementation() == 'PyPy'
+
if (sys.byteorder == 'little'):
_nbo = b'<'
else:
@@ -889,7 +892,12 @@ def npy_ctypes_check(cls):
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
- ctype_base = cls.__mro__[-2]
+ if IS_PYPY:
+ # (..., _ctypes.basics._CData, Bufferable, object)
+ ctype_base = cls.__mro__[-3]
+ else:
+ # # (..., _ctypes._CData, object)
+ ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return 'ctypes' in ctype_base.__module__
except Exception:
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index e3f1046cc..804452627 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -428,6 +428,13 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
#always succeed
+def _add_docstring(obj, doc):
+ try:
+ add_docstring(obj, doc)
+ except Exception:
+ pass
+
+
def add_newdoc(place, obj, doc):
"""
Adds documentation to obj which is in module place.
@@ -442,21 +449,19 @@ def add_newdoc(place, obj, doc):
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
- This routine never raises an error.
+ This routine never raises an error if the docstring can't be written, but
+ will raise an error if the object being documented does not exist.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
- try:
- new = getattr(__import__(place, globals(), {}, [obj]), obj)
- if isinstance(doc, str):
- add_docstring(new, doc.strip())
- elif isinstance(doc, tuple):
- add_docstring(getattr(new, doc[0]), doc[1].strip())
- elif isinstance(doc, list):
- for val in doc:
- add_docstring(getattr(new, val[0]), val[1].strip())
- except Exception:
- pass
+ new = getattr(__import__(place, globals(), {}, [obj]), obj)
+ if isinstance(doc, str):
+ _add_docstring(new, doc.strip())
+ elif isinstance(doc, tuple):
+ _add_docstring(getattr(new, doc[0]), doc[1].strip())
+ elif isinstance(doc, list):
+ for val in doc:
+ _add_docstring(getattr(new, val[0]), val[1].strip())
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index 45f008b1d..2cc7ced35 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -233,10 +233,10 @@ static NPY_INLINE int
NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
{
PyObject *title;
- if (PyTuple_GET_SIZE(value) != 3) {
+ if (PyTuple_Size(value) != 3) {
return 0;
}
- title = PyTuple_GET_ITEM(value, 2);
+ title = PyTuple_GetItem(value, 2);
if (key == title) {
return 1;
}
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 790896922..205d362ec 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -7,6 +7,7 @@ by importing from the extension module.
"""
import functools
+import sys
import warnings
from . import overrides
@@ -15,7 +16,7 @@ import numpy as np
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
- _ARRAY_API, _monotonicity
+ _ARRAY_API, _monotonicity, _get_ndarray_c_version
)
__all__ = [
@@ -30,15 +31,17 @@ __all__ = [
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
- 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
+ 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
- 'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
+ 'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
+if sys.version_info.major < 3:
+ __all__ += ['newbuffer', 'getbuffer']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
@@ -489,11 +492,15 @@ def can_cast(from_, to, casting=None):
Notes
-----
- Starting in NumPy 1.9, can_cast function now returns False in 'safe'
- casting mode for integer/float dtype and string dtype if the string dtype
- length is not long enough to store the max integer/float value converted
- to a string. Previously can_cast in 'safe' mode returned True for
- integer/float dtype and a string dtype of any length.
+ .. versionchanged:: 1.17.0
+ Casting between a simple data type and a structured one is possible only
+ for "unsafe" casting. Casting to multiple fields is allowed, but
+ casting from multiple fields is not.
+
+ .. versionchanged:: 1.9.0
+ Casting from numeric to string types in 'safe' casting mode requires
+ that the string dtype length is long enough to store the maximum
+ integer/float value converted.
See also
--------
diff --git a/numpy/core/src/common/npy_ctypes.h b/numpy/core/src/common/npy_ctypes.h
index f26db9e05..c0cc4f1a1 100644
--- a/numpy/core/src/common/npy_ctypes.h
+++ b/numpy/core/src/common/npy_ctypes.h
@@ -32,6 +32,7 @@ npy_ctypes_check(PyTypeObject *obj)
}
ret = PyObject_IsTrue(ret_obj);
+ Py_DECREF(ret_obj);
if (ret == -1) {
goto fail;
}
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 6755095d7..21a60914e 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -25,10 +25,14 @@
#include <assert.h>
-#ifdef HAVE_SYS_MMAN_H
+#ifdef NPY_OS_LINUX
#include <sys/mman.h>
-#if defined MADV_HUGEPAGE && defined HAVE_MADVISE
-#define HAVE_MADV_HUGEPAGE
+#ifndef MADV_HUGEPAGE
+/*
+ * Use code 14 (MADV_HUGEPAGE) if it isn't defined. This gives a chance of
+ * enabling huge pages even if built with linux kernel < 2.6.38
+ */
+#define MADV_HUGEPAGE 14
#endif
#endif
@@ -74,11 +78,15 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
#ifdef _PyPyGC_AddMemoryPressure
_PyPyPyGC_AddMemoryPressure(nelem * esz);
#endif
-#ifdef HAVE_MADV_HUGEPAGE
+#ifdef NPY_OS_LINUX
/* allow kernel allocating huge pages for large arrays */
if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u)))) {
npy_uintp offset = 4096u - (npy_uintp)p % (4096u);
npy_uintp length = nelem * esz - offset;
+ /**
+ * Intentionally not checking for errors that may be returned by
+ * older kernel versions; optimistically tries enabling huge pages.
+ */
madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE);
}
#endif
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 97aaee93d..d20dd639d 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -1163,8 +1163,10 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op)
newdims.ptr = dimensions;
newdims.len = result_ndim+1;
- memcpy(dimensions, PyArray_DIMS((PyArrayObject *)temp),
- sizeof(npy_intp)*result_ndim);
+ if (result_ndim) {
+ memcpy(dimensions, PyArray_DIMS((PyArrayObject *)temp),
+ sizeof(npy_intp)*result_ndim);
+ }
dimensions[result_ndim] = -1;
temp2 = PyArray_Newshape((PyArrayObject *)temp,
&newdims, NPY_ANYORDER);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 33a706412..3407c537c 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -680,15 +680,82 @@ NPY_NO_EXPORT npy_bool
PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
NPY_CASTING casting)
{
- /* Fast path for unsafe casts or basic types */
- if (casting == NPY_UNSAFE_CASTING ||
- (NPY_LIKELY(from->type_num < NPY_OBJECT) &&
- NPY_LIKELY(from->type_num == to->type_num) &&
- NPY_LIKELY(from->byteorder == to->byteorder))) {
+ /*
+ * Fast paths for equality and for basic types.
+ */
+ if (from == to ||
+ ((NPY_LIKELY(PyDataType_ISNUMBER(from)) ||
+ PyDataType_ISOBJECT(from)) &&
+ NPY_LIKELY(from->type_num == to->type_num) &&
+ NPY_LIKELY(from->byteorder == to->byteorder))) {
+ return 1;
+ }
+ /*
+ * Cases with subarrays and fields need special treatment.
+ */
+ if (PyDataType_HASFIELDS(from)) {
+ /*
+ * If from is a structured data type, then it can be cast to a simple
+ * non-object one only for unsafe casting *and* if it has a single
+ * field; recurse just in case the single field is itself structured.
+ */
+ if (!PyDataType_HASFIELDS(to) && !PyDataType_ISOBJECT(to)) {
+ if (casting == NPY_UNSAFE_CASTING &&
+ PyDict_Size(from->fields) == 1) {
+ Py_ssize_t ppos = 0;
+ PyObject *tuple;
+ PyArray_Descr *field;
+ PyDict_Next(from->fields, &ppos, NULL, &tuple);
+ field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0);
+ /*
+ * For a subarray, we need to get the underlying type;
+ * since we already are casting unsafely, we can ignore
+ * the shape.
+ */
+ if (PyDataType_HASSUBARRAY(field)) {
+ field = field->subarray->base;
+ }
+ return PyArray_CanCastTypeTo(field, to, casting);
+ }
+ else {
+ return 0;
+ }
+ }
+ /*
+ * Casting from one structured data type to another depends on the fields;
+ * we pass that case on to the EquivTypenums case below.
+ *
+ * TODO: move that part up here? Need to check whether equivalent type
+ * numbers is an addition constraint that is needed.
+ *
+ * TODO/FIXME: For now, always allow structured to structured for unsafe
+ * casting; this is not correct, but needed since the treatment in can_cast
+ * below got out of sync with astype; see gh-13667.
+ */
+ if (casting == NPY_UNSAFE_CASTING) {
+ return 1;
+ }
+ }
+ else if (PyDataType_HASFIELDS(to)) {
+ /*
+ * If "from" is a simple data type and "to" has fields, then only
+ * unsafe casting works (and that works always, even to multiple fields).
+ */
+ return casting == NPY_UNSAFE_CASTING;
+ }
+ /*
+ * Everything else we consider castable for unsafe for now.
+ * FIXME: ensure what we do here is consistent with "astype",
+ * i.e., deal more correctly with subarrays and user-defined dtype.
+ */
+ else if (casting == NPY_UNSAFE_CASTING) {
return 1;
}
- /* Equivalent types can be cast with any value of 'casting' */
- else if (PyArray_EquivTypenums(from->type_num, to->type_num)) {
+ /*
+ * Equivalent simple types can be cast with any value of 'casting', but
+ * we need to be careful about structured to structured.
+ */
+ if (PyArray_EquivTypenums(from->type_num, to->type_num)) {
/* For complicated case, use EquivTypes (for now) */
if (PyTypeNum_ISUSERDEF(from->type_num) ||
from->subarray != NULL) {
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index c8a78abfc..e72e602c1 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -746,6 +746,14 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
_dealloc_cached_buffer_info(obj);
return 0;
}
+ else if (PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_BufferError) ||
+ PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ } else {
+ return -1;
+ }
+ }
else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) {
d[0] = buffer_view.len;
*maxndim = 1;
@@ -753,8 +761,13 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
_dealloc_cached_buffer_info(obj);
return 0;
}
- else {
- PyErr_Clear();
+ else if (PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_BufferError) ||
+ PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ } else {
+ return -1;
+ }
}
}
@@ -912,7 +925,7 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
int allow_emptystring)
{
PyArrayObject_fields *fa;
- int i, is_empty;
+ int i;
npy_intp nbytes;
if (descr->subarray) {
@@ -966,7 +979,6 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
}
/* Check dimensions and multiply them to nbytes */
- is_empty = 0;
for (i = 0; i < nd; i++) {
npy_intp dim = dims[i];
@@ -975,7 +987,6 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* Compare to PyArray_OverflowMultiplyList that
* returns 0 in this case.
*/
- is_empty = 1;
continue;
}
@@ -1033,7 +1044,9 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
goto fail;
}
fa->strides = fa->dimensions + nd;
- memcpy(fa->dimensions, dims, sizeof(npy_intp)*nd);
+ if (nd) {
+ memcpy(fa->dimensions, dims, sizeof(npy_intp)*nd);
+ }
if (strides == NULL) { /* fill it in */
_array_fill_strides(fa->strides, dims, nd, descr->elsize,
flags, &(fa->flags));
@@ -1043,7 +1056,9 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* we allow strides even when we create
* the memory, but be careful with this...
*/
- memcpy(fa->strides, strides, sizeof(npy_intp)*nd);
+ if (nd) {
+ memcpy(fa->strides, strides, sizeof(npy_intp)*nd);
+ }
}
}
else {
@@ -1058,8 +1073,8 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* (a.data) doesn't work as it should.
* Could probably just allocate a few bytes here. -- Chuck
*/
- if (is_empty) {
- nbytes = descr->elsize;
+ if (nbytes == 0) {
+ nbytes = descr->elsize ? descr->elsize : 1;
}
/*
* It is bad to have uninitialized OBJECT pointers
@@ -2539,7 +2554,9 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
}
- memcpy(PyArray_STRIDES(ret), strides, n*sizeof(npy_intp));
+ if (n) {
+ memcpy(PyArray_STRIDES(ret), strides, n*sizeof(npy_intp));
+ }
}
PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL);
Py_DECREF(iface);
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index b6d33a74a..e7a4b6c72 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -470,9 +470,6 @@ _convert_from_array_descr(PyObject *obj, int align)
else {
ret = PyArray_DescrConverter(PyTuple_GET_ITEM(item, 1), &conv);
}
- if (ret == NPY_FAIL) {
- PyObject_Print(PyTuple_GET_ITEM(item, 1), stderr, 0);
- }
}
else if (PyTuple_GET_SIZE(item) == 3) {
newobj = PyTuple_GetSlice(item, 1, 3);
@@ -490,6 +487,7 @@ _convert_from_array_descr(PyObject *obj, int align)
if (ret == NPY_FAIL) {
goto fail;
}
+
if ((PyDict_GetItem(fields, name) != NULL)
|| (title
&& PyBaseString_Check(title)
@@ -1294,6 +1292,12 @@ _convert_from_dict(PyObject *obj, int align)
goto fail;
}
}
+
+ Py_XDECREF(fields);
+ Py_XDECREF(names);
+ Py_XDECREF(descrs);
+ Py_XDECREF(offsets);
+ Py_XDECREF(titles);
return new;
fail:
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index 14dfa71c2..8d52672e3 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -874,7 +874,7 @@ BigInt_Pow2(BigInt *result, npy_uint32 exponent)
result->length = blockIdx + 1;
bitIdx = (exponent % 32);
- result->blocks[blockIdx] |= (1 << bitIdx);
+ result->blocks[blockIdx] |= ((npy_uint32)1 << bitIdx);
}
/*
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 24962da8a..c5577c196 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -80,8 +80,10 @@ array_shape_set(PyArrayObject *self, PyObject *val)
return -1;
}
((PyArrayObject_fields *)self)->strides = PyArray_DIMS(self) + nd;
- memcpy(PyArray_DIMS(self), PyArray_DIMS(ret), nd*sizeof(npy_intp));
- memcpy(PyArray_STRIDES(self), PyArray_STRIDES(ret), nd*sizeof(npy_intp));
+ if (nd) {
+ memcpy(PyArray_DIMS(self), PyArray_DIMS(ret), nd*sizeof(npy_intp));
+ memcpy(PyArray_STRIDES(self), PyArray_STRIDES(ret), nd*sizeof(npy_intp));
+ }
}
else {
((PyArrayObject_fields *)self)->dimensions = NULL;
@@ -172,7 +174,9 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
"compatible with available memory");
goto fail;
}
- memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len);
+ if (newstrides.len) {
+ memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len);
+ }
PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS |
NPY_ARRAY_ALIGNED);
npy_free_cache_dim_obj(newstrides);
@@ -669,8 +673,10 @@ array_struct_get(PyArrayObject *self)
return PyErr_NoMemory();
}
inter->strides = inter->shape + PyArray_NDIM(self);
- memcpy(inter->shape, PyArray_DIMS(self), sizeof(npy_intp)*PyArray_NDIM(self));
- memcpy(inter->strides, PyArray_STRIDES(self), sizeof(npy_intp)*PyArray_NDIM(self));
+ if (PyArray_NDIM(self)) {
+ memcpy(inter->shape, PyArray_DIMS(self), sizeof(npy_intp)*PyArray_NDIM(self));
+ memcpy(inter->strides, PyArray_STRIDES(self), sizeof(npy_intp)*PyArray_NDIM(self));
+ }
}
else {
inter->shape = NULL;
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 17edd2bbf..a7b4ff236 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -611,9 +611,9 @@ prepare_index(PyArrayObject *self, PyObject *index,
/* Convert the boolean array into multiple integer ones */
n = _nonzero_indices((PyObject *)arr, nonzero_result);
- Py_DECREF(arr);
if (n < 0) {
+ Py_DECREF(arr);
goto failed_building_indices;
}
@@ -624,6 +624,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
for (i=0; i < n; i++) {
Py_DECREF(nonzero_result[i]);
}
+ Py_DECREF(arr);
goto failed_building_indices;
}
@@ -637,6 +638,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
used_ndim += 1;
curr_idx += 1;
}
+ Py_DECREF(arr);
/* All added indices have 1 dimension */
if (fancy_ndim < 1) {
@@ -1440,7 +1442,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
}
/* next check for a list of field names */
else if (PySequence_Check(ind) && !PyTuple_Check(ind)) {
- int seqlen, i;
+ npy_intp seqlen, i;
PyObject *name = NULL, *tup;
PyObject *fields, *names;
PyArray_Descr *view_dtype;
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index c81e53add..c3040b473 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -2007,7 +2007,9 @@ array_setstate(PyArrayObject *self, PyObject *args)
return PyErr_NoMemory();
}
fa->strides = PyArray_DIMS(self) + nd;
- memcpy(PyArray_DIMS(self), dimensions, sizeof(npy_intp)*nd);
+ if (nd) {
+ memcpy(PyArray_DIMS(self), dimensions, sizeof(npy_intp)*nd);
+ }
_array_fill_strides(PyArray_STRIDES(self), dimensions, nd,
PyArray_DESCR(self)->elsize,
(is_f_order ? NPY_ARRAY_F_CONTIGUOUS :
@@ -2041,7 +2043,9 @@ array_setstate(PyArrayObject *self, PyObject *args)
PyArray_DESCR(self)->elsize,
datastr, PyArray_DESCR(self)->elsize,
numels, 1, self);
- if (!PyArray_ISEXTENDED(self)) {
+ if (!(PyArray_ISEXTENDED(self) ||
+ PyArray_DESCR(self)->metadata ||
+ PyArray_DESCR(self)->c_metadata)) {
fa->descr = PyArray_DescrFromType(
PyArray_DESCR(self)->type_num);
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 29440ce78..88ed4664a 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -273,7 +273,9 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd,
}
*((char ****)ptr) = ptr3;
}
- memcpy(dims, PyArray_DIMS(ap), nd*sizeof(npy_intp));
+ if (nd) {
+ memcpy(dims, PyArray_DIMS(ap), nd*sizeof(npy_intp));
+ }
*op = (PyObject *)ap;
return 0;
}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 791d3693f..654893511 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -54,7 +54,9 @@ allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags,
/* Build the new strides and shape */
stride = dtype->elsize;
- memcpy(shape, arr_shape, ndim * sizeof(shape[0]));
+ if (ndim) {
+ memcpy(shape, arr_shape, ndim * sizeof(shape[0]));
+ }
for (idim = ndim-1; idim >= 0; --idim) {
npy_intp i_perm = strideperm[idim].perm;
if (axis_flags[i_perm]) {
@@ -325,7 +327,9 @@ PyArray_InitializeReduceResult(
*/
shape = PyArray_SHAPE(op_view);
nreduce_axes = 0;
- memcpy(shape_orig, shape, ndim * sizeof(npy_intp));
+ if (ndim) {
+ memcpy(shape_orig, shape, ndim * sizeof(npy_intp));
+ }
for (idim = 0; idim < ndim; ++idim) {
if (axis_flags[idim]) {
if (shape[idim] == 0) {
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index a33361218..ff0fb9eff 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -138,6 +138,18 @@ class TestRecord(object):
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount_dictionary_setting(self):
+ names = ["name1"]
+ formats = ["f8"]
+ titles = ["t1"]
+ offsets = [0]
+ d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
+ refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
+ np.dtype(d)
+ refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
+ assert refcounts == refcounts_new
+
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 99792cee7..f7485c3f7 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -249,6 +249,15 @@ class TestIndexing(object):
[4, 0, 6],
[0, 8, 0]])
+ def test_boolean_indexing_list(self):
+ # Regression test for #13715. It's a use-after-free bug which the
+ # test won't directly catch, but it will show up in valgrind.
+ a = np.array([1, 2, 3])
+ b = [True, False, True]
+ # Two variants of the test because the first takes a fast path
+ assert_equal(a[b], [1, 3])
+ assert_equal(a[None, b], [[1, 3]])
+
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 4550e303c..873aa9312 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -36,7 +36,7 @@ from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
- temppath, suppress_warnings
+ temppath, suppress_warnings, break_cycles,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -131,6 +131,7 @@ class TestFlags(object):
assert_(vals.flags.writeable)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
def test_writeable_pickle(self):
import pickle
# Small arrays will be copied without setting base.
@@ -3816,7 +3817,7 @@ class TestPickling(object):
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
del a, DATA, carray
- gc.collect()
+ break_cycles()
# check for reference leaks (gh-12793)
for ref in refs:
assert ref() is None
@@ -3872,6 +3873,17 @@ class TestPickling(object):
p = self._loads(s)
assert_equal(a, p)
+ def test_datetime64_byteorder(self):
+ original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')
+
+ original_byte_reversed = original.copy(order='K')
+ original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')
+ original_byte_reversed.byteswap(inplace=True)
+
+ new = pickle.loads(pickle.dumps(original_byte_reversed))
+
+ assert_equal(original.dtype, new.dtype)
+
class TestFancyIndexing(object):
def test_list(self):
@@ -7159,6 +7171,7 @@ def test_array_interface_empty_shape():
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
+@pytest.mark.skipif(IS_PYPY, reason='PyDict_GetItemString(.., "data") segfaults')
def test_array_interface_offset():
arr = np.array([1, 2, 3], dtype='int32')
interface = dict(arr.__array_interface__)
@@ -7201,7 +7214,7 @@ class TestMemEventHook(object):
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
- gc.collect()
+ break_cycles()
_multiarray_tests.test_pydatamem_seteventhook_end()
class TestMapIter(object):
@@ -7773,12 +7786,12 @@ class TestCTypes(object):
# `ctypes_ptr` should hold onto `arr`
del arr
- gc.collect()
+ break_cycles()
assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
# but when the `ctypes_ptr` object dies, so should `arr`
del ctypes_ptr
- gc.collect()
+ break_cycles()
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
@@ -7960,15 +7973,15 @@ class TestArrayFinalize(object):
assert_(isinstance(obj_subarray, RaisesInFinalize))
# reference should still be held by obj_arr
- gc.collect()
+ break_cycles()
assert_(obj_ref() is not None, "object should not already be dead")
del obj_arr
- gc.collect()
+ break_cycles()
assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
del obj_subarray
- gc.collect()
+ break_cycles()
assert_(obj_ref() is None, "no references should remain")
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 26fd9c346..3a24ce55e 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1864,7 +1864,7 @@ def test_iter_buffered_cast_structured_type():
# make sure multi-field struct type -> simple doesn't work
sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
- assert_raises(ValueError, lambda: (
+ assert_raises(TypeError, lambda: (
nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')))
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 5827c97bd..ed02c1561 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -888,6 +888,41 @@ class TestTypes(object):
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
+ def test_can_cast_simple_to_structured(self):
+ # Non-structured can only be cast to structured in 'unsafe' mode.
+ assert_(not np.can_cast('i4', 'i4,i4'))
+ assert_(not np.can_cast('i4', 'i4,i2'))
+ assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
+ assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
+ # Even if there is just a single field which is OK.
+ assert_(not np.can_cast('i2', [('f1', 'i4')]))
+ assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
+ assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
+ # It should be the same for recursive structured or subarrays.
+ assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
+ assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
+ assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
+ assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
+
+ def test_can_cast_structured_to_simple(self):
+ # Need unsafe casting for structured to simple.
+ assert_(not np.can_cast([('f1', 'i4')], 'i4'))
+ assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
+ assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
+ # Since it is unclear what is being cast, multiple fields to
+ # single should not work even for unsafe casting.
+ assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
+ # But a single field inside a single field is OK.
+ assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
+ assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
+ # And a subarray is fine too - it will just take the first element
+ # (arguably not very consistently; might also take the first field).
+ assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
+ assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
+ # But a structured subarray with multiple fields should fail.
+ assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
+ casting='unsafe'))
+
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes['int'] + np.sctypes['uint']:
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 71f7b7150..d0ff5578a 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -5,7 +5,7 @@ import itertools
import pytest
import numpy as np
-from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY
# This is the structure of the table used for plain objects:
#
@@ -491,6 +491,7 @@ def test_issctype(rep, expected):
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
class TestDocStrings(object):
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py
new file mode 100644
index 000000000..09a3e5134
--- /dev/null
+++ b/numpy/doc/dispatch.py
@@ -0,0 +1,271 @@
+""".. _dispatch_mechanism:
+
+Numpy's dispatch mechanism, introduced in numpy version v1.16 is the
+recommended approach for writing custom N-dimensional array containers that are
+compatible with the numpy API and provide custom implementations of numpy
+functionality. Applications include `dask <http://dask.pydata.org>`_ arrays, an
+N-dimensional array distributed across multiple nodes, and `cupy
+<https://docs-cupy.chainer.org/en/stable/>`_ arrays, an N-dimensional array on
+a GPU.
+
+To get a feel for writing custom array containers, we'll begin with a simple
+example that has rather narrow utility but illustrates the concepts involved.
+
+>>> import numpy as np
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+...
+
+Our custom array can be instantiated like:
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr
+DiagonalArray(N=5, value=1)
+
+We can convert to a numpy array using :func:`numpy.array` or
+:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a
+standard ``numpy.ndarray``.
+
+>>> np.asarray(arr)
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+If we operate on ``arr`` with a numpy function, numpy will again use the
+``__array__`` interface to convert it to an array and then apply the function
+in the usual way.
+
+>>> np.multiply(arr, 2)
+array([[2., 0., 0., 0., 0.],
+ [0., 2., 0., 0., 0.],
+ [0., 0., 2., 0., 0.],
+ [0., 0., 0., 2., 0.],
+ [0., 0., 0., 0., 2.]])
+
+
+Notice that the return type is a standard ``numpy.ndarray``.
+
+>>> type(arr)
+numpy.ndarray
+
+How can we pass our custom array type through this function? Numpy allows a
+class to indicate that it would like to handle computations in a custom-defined
+way through the interaces ``__array_ufunc__`` and ``__array_function__``. Let's
+take one at a time, starting with ``_array_ufunc__``. This method covers
+:ref:`ufuncs`, a class of functions that includes, for example,
+:func:`numpy.multiply` and :func:`numpy.sin`.
+
+The ``__array_ufunc__`` receives:
+
+- ``ufunc``, a function like ``numpy.multiply``
+- ``method``, a string, differentiating between ``numpy.multiply(...)`` and
+ variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so
+ on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``.
+- ``inputs``, which could be a mixture of different types
+- ``kwargs``, keyword arguments passed to the function
+
+For this example we will only handle the method ``'__call__``.
+
+>>> from numbers import Number
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+...
+
+Now our custom array type passes through numpy functions.
+
+>>> arr = DiagonalArray(5, 1)
+>>> np.multiply(arr, 3)
+DiagonalArray(N=5, value=3)
+>>> np.add(arr, 3)
+DiagonalArray(N=5, value=4)
+>>> np.sin(arr)
+DiagonalArray(N=5, value=0.8414709848078965)
+
+At this point ``arr + 3`` does not work.
+
+>>> arr + 3
+TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
+
+To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
+and so on to dispatch to the corresponding ufunc. We can achieve this
+conveniently by inheriting from the mixin
+:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`.
+
+>>> import numpy.lib.mixins
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+...
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr + 3
+DiagonalArray(N=5, value=4)
+>>> arr > 0
+DiagonalArray(N=5, value=True)
+
+Now let's tackle ``__array_function__``. We'll create dict that maps numpy
+functions to our custom variants.
+
+>>> HANDLED_FUNCTIONS = {}
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... # In this case we accept only scalar numbers or DiagonalArrays.
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+... def __array_function__(self, func, types, args, kwargs):
+... if func not in HANDLED_FUNCTIONS:
+... return NotImplemented
+... # Note: this allows subclasses that don't override
+... # __array_function__ to handle DiagonalArray objects.
+... if not all(issubclass(t, self.__class__) for t in types):
+... return NotImplemented
+... return HANDLED_FUNCTIONS[func](*args, **kwargs)
+...
+
+A convenient pattern is to define a decorator ``implements`` that can be used
+to add functions to ``HANDLED_FUNCTIONS``.
+
+>>> def implements(np_function):
+... "Register an __array_function__ implementation for DiagonalArray objects."
+... def decorator(func):
+... HANDLED_FUNCTIONS[np_function] = func
+... return func
+... return decorator
+...
+
+Now we write implementations of numpy functions for ``DiagonalArray``.
+For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
+calls ``numpy.sum(self)``, and the same for ``mean``.
+
+>>> @implements(np.sum)
+... def sum(a):
+... "Implementation of np.sum for DiagonalArray objects"
+... return arr._i * arr._N
+...
+>>> @implements(np.mean)
+... def sum(a):
+... "Implementation of np.mean for DiagonalArray objects"
+... return arr._i / arr._N
+...
+>>> arr = DiagonalArray(5, 1)
+>>> np.sum(arr)
+5
+>>> np.mean(arr)
+0.2
+
+If the user tries to use any numpy functions not included in
+``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that
+this operation is not supported. For example, concatenating two
+``DiagonalArrays`` does not produce another diagonal array, so it is not
+supported.
+
+>>> np.concatenate([arr, arr])
+TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
+
+Additionally, our implementations of ``sum`` and ``mean`` do not accept the
+optional arguments that numpy's implementation does.
+
+>>> np.sum(arr, axis=0)
+TypeError: sum() got an unexpected keyword argument 'axis'
+
+The user always has the option of converting to a normal ``numpy.ndarray`` with
+:func:`numpy.asarray` and using standard numpy from there.
+
+>>> np.concatenate([np.asarray(arr), np.asarray(arr)])
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.],
+ [1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+Refer to the `dask source code <https://github.com/dask/dask>`_ and
+`cupy source code <https://github.com/cupy/cupy>`_ for more fully-worked
+examples of custom array containers.
+
+See also `NEP 18 <http://www.numpy.org/neps/nep-0018-array-function-protocol.html>`_.
+"""
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index e92a06124..6d0ce49f9 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -335,10 +335,9 @@ structured datatype has just a single field::
>>> onefield = np.zeros(2, dtype=[('A', 'i4')])
>>> nostruct = np.zeros(2, dtype='i4')
>>> nostruct[:] = twofield
- ValueError: Can't cast from structure to non-structure, except if the structure only has a single field.
- >>> nostruct[:] = onefield
- >>> nostruct
- array([0, 0], dtype=int32)
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot cast scalar from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
Assignment from other Structured Arrays
```````````````````````````````````````
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
index 8fc072a5e..4f1678980 100644
--- a/numpy/f2py/tests/test_block_docstring.py
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -4,7 +4,7 @@ import sys
import pytest
from . import util
-from numpy.testing import assert_equal
+from numpy.testing import assert_equal, IS_PYPY
class TestBlockDocString(util.F2PyTest):
code = """
@@ -18,6 +18,7 @@ class TestBlockDocString(util.F2PyTest):
@pytest.mark.skipif(sys.platform=='win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_block_docstring(self):
expected = "'i'-array(2,3)\n"
assert_equal(self.module.block.__doc__, expected)
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 482eabe14..d69e04e80 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -918,7 +918,17 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,
weights=None, density=None):
- return (sample, bins, weights)
+ if hasattr(sample, 'shape'): # same condition as used in histogramdd
+ yield sample
+ else:
+ for s in sample:
+ yield s
+ try:
+ for b in bins:
+ yield b
+ except TypeError:
+ pass
+ yield weights
@array_function_dispatch(_histogramdd_dispatcher)
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index d412281ff..c17c39c8a 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -26,10 +26,13 @@ _check_fill_value = np.ma.core._check_fill_value
__all__ = [
- 'append_fields', 'drop_fields', 'find_duplicates',
- 'get_fieldstructure', 'join_by', 'merge_arrays',
- 'rec_append_fields', 'rec_drop_fields', 'rec_join',
- 'recursive_fill_fields', 'rename_fields', 'stack_arrays',
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
+ 'join_by', 'merge_arrays', 'rec_append_fields',
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
+ 'rename_fields', 'repack_fields', 'require_fields',
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
]
@@ -57,11 +60,10 @@ def recursive_fill_fields(input, output):
Examples
--------
>>> from numpy.lib import recfunctions as rfn
- >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
+ >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
- array([(1, 10.0), (2, 20.0), (0, 0.0)],
- dtype=[('A', '<i4'), ('B', '<f8')])
+ array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
@@ -77,7 +79,7 @@ def recursive_fill_fields(input, output):
return output
-def get_fieldspec(dtype):
+def _get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
@@ -89,11 +91,11 @@ def get_fieldspec(dtype):
Examples
--------
- >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)])
+ >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
>>> dt.descr
- [(('a', 'A'), '<i4'), ('b', '<f8', (3,))]
- >>> get_fieldspec(dt)
- [(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))]
+ [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
+ >>> _get_fieldspec(dt)
+ [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
"""
if dtype.names is None:
@@ -120,10 +122,15 @@ def get_names(adtype):
Examples
--------
>>> from numpy.lib import recfunctions as rfn
- >>> rfn.get_names(np.empty((1,), dtype=int)) is None
- True
+ >>> rfn.get_names(np.empty((1,), dtype=int))
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
+
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
- ('A', 'B')
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
@@ -142,7 +149,7 @@ def get_names(adtype):
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
- are flattend beforehand.
+ are flattened beforehand.
Parameters
----------
@@ -153,9 +160,13 @@ def get_names_flat(adtype):
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
- True
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
- ('A', 'B')
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
@@ -196,12 +207,7 @@ def flatten_descr(ndtype):
return tuple(descr)
-def _zip_dtype_dispatcher(seqarrays, flatten=None):
- return seqarrays
-
-
-@array_function_dispatch(_zip_dtype_dispatcher)
-def zip_dtype(seqarrays, flatten=False):
+def _zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
@@ -211,14 +217,13 @@ def zip_dtype(seqarrays, flatten=False):
current = a.dtype
if current.names and len(current.names) <= 1:
# special case - dtypes of 0 or 1 field are flattened
- newdtype.extend(get_fieldspec(current))
+ newdtype.extend(_get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
-@array_function_dispatch(_zip_dtype_dispatcher)
-def zip_descr(seqarrays, flatten=False):
+def _zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
@@ -229,7 +234,7 @@ def zip_descr(seqarrays, flatten=False):
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
- return zip_dtype(seqarrays, flatten=flatten).descr
+ return _zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
@@ -310,12 +315,7 @@ def _izip_fields(iterable):
yield element
-def _izip_records_dispatcher(seqarrays, fill_value=None, flatten=None):
- return seqarrays
-
-
-@array_function_dispatch(_izip_records_dispatcher)
-def izip_records(seqarrays, fill_value=None, flatten=True):
+def _izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
@@ -403,20 +403,18 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
- masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
- mask = [(False, False) (False, False) (True, False)],
- fill_value = (999999, 1e+20),
- dtype = [('f0', '<i4'), ('f1', '<f8')])
-
- >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
- ... usemask=False)
- array([(1, 10.0), (2, 20.0), (-1, 30.0)],
- dtype=[('f0', '<i4'), ('f1', '<f8')])
- >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
+ array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+
+ >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
+ ... np.array([10., 20., 30.])), usemask=False)
+ array([(1, 10.0), (2, 20.0), (-1, 30.0)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+ >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
- rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
- dtype=[('a', '<i4'), ('f1', '<f8')])
+ rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('a', '<i8'), ('f1', '<f8')])
Notes
-----
@@ -439,7 +437,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
# Make sure we have named fields
if not seqdtype.names:
seqdtype = np.dtype([('', seqdtype)])
- if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype:
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
@@ -462,7 +460,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
- newdtype = zip_dtype(seqarrays, flatten=flatten)
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
@@ -490,9 +488,9 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
- data = tuple(izip_records(seqdata, flatten=flatten))
+ data = tuple(_izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
- mask=list(izip_records(seqmask, flatten=flatten)))
+ mask=list(_izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
@@ -510,7 +508,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
- output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
@@ -547,16 +545,14 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
- ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
>>> rfn.drop_fields(a, 'a')
- array([((2.0, 3),), ((5.0, 6),)],
- dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
+ array([((2., 3),), ((5., 6),)],
+ dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
>>> rfn.drop_fields(a, 'ba')
- array([(1, (3,)), (4, (6,))],
- dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
+ array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
- array([(1,), (4,)],
- dtype=[('a', '<i4')])
+ array([(1,), (4,)], dtype=[('a', '<i8')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names]
@@ -648,8 +644,8 @@ def rename_fields(base, namemapper):
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
- array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
- dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
+ array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
+ dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
"""
def _recursive_rename_fields(ndtype, namemapper):
@@ -739,7 +735,7 @@ def append_fields(base, names, data, dtypes=None,
#
output = ma.masked_all(
max(len(base), len(data)),
- dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype))
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
@@ -830,22 +826,23 @@ def repack_fields(a, align=False, recurse=False):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
- >>> dt = np.dtype('u1,i4,f4', align=True)
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
- dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True)
+ dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True)
>>> print_offsets(dt)
- offsets: [0, 4, 8]
- itemsize: 16
- >>> packed_dt = repack_fields(dt)
+ offsets: [0, 8, 16]
+ itemsize: 24
+ >>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
- dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')])
+ dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
- offsets: [0, 1, 5]
- itemsize: 13
+ offsets: [0, 1, 9]
+ itemsize: 17
"""
if not isinstance(a, np.dtype):
@@ -877,16 +874,35 @@ def _get_fields_and_offsets(dt, offset=0):
scalar fields in the dtype "dt", including nested fields, in left
to right order.
"""
+
+ # counts up elements in subarrays, including nested subarrays, and returns
+ # base dtype and count
+ def count_elem(dt):
+ count = 1
+ while dt.shape != ():
+ for size in dt.shape:
+ count *= size
+ dt = dt.base
+ return dt, count
+
fields = []
for name in dt.names:
field = dt.fields[name]
- if field[0].names is None:
- count = 1
- for size in field[0].shape:
- count *= size
- fields.append((field[0], count, field[1] + offset))
+ f_dt, f_offset = field[0], field[1]
+ f_dt, n = count_elem(f_dt)
+
+ if f_dt.names is None:
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
else:
- fields.extend(_get_fields_and_offsets(field[0], field[1] + offset))
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
+ size = f_dt.itemsize
+
+ for i in range(n):
+ if i == 0:
+ # optimization: avoid list comprehension if no subarray
+ fields.extend(subfields)
+ else:
+ fields.extend([(d, c, o + i*size) for d, c, o in subfields])
return fields
@@ -928,12 +944,13 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a
array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
- >>> structured_to_unstructured(arr)
+ >>> rfn.structured_to_unstructured(a)
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
@@ -941,7 +958,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
- >>> np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
array([ 3. , 5.5, 9. , 11. ])
"""
@@ -950,6 +967,12 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
fields = _get_fields_and_offsets(arr.dtype)
n_fields = len(fields)
+ if n_fields == 0 and dtype is None:
+ raise ValueError("arr has no fields. Unable to guess dtype")
+ elif n_fields == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("arr with no fields is not supported")
+
dts, counts, offsets = zip(*fields)
names = ['f{}'.format(n) for n in range(n_fields)]
@@ -978,6 +1001,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
# finally is it safe to view the packed fields as the unstructured type
return arr.view((out_dtype, (sum(counts),)))
+
def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
align=None, copy=None, casting=None):
return (arr,)
@@ -1023,6 +1047,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
>>> a = np.arange(20).reshape((4,5))
>>> a
@@ -1030,7 +1055,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]])
- >>> unstructured_to_structured(a, dt)
+ >>> rfn.unstructured_to_structured(a, dt)
array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
@@ -1039,6 +1064,9 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
if arr.shape == ():
raise ValueError('arr must have at least one dimension')
n_elem = arr.shape[-1]
+ if n_elem == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("last axis with size 0 is not supported")
if dtype is None:
if names is None:
@@ -1051,7 +1079,11 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
raise ValueError("don't supply both dtype and names")
# sanity check of the input dtype
fields = _get_fields_and_offsets(dtype)
- dts, counts, offsets = zip(*fields)
+ if len(fields) == 0:
+ dts, counts, offsets = [], [], []
+ else:
+ dts, counts, offsets = zip(*fields)
+
if n_elem != sum(counts):
raise ValueError('The length of the last dimension of arr must '
'be equal to the number of fields in dtype')
@@ -1107,11 +1139,12 @@ def apply_along_fields(func, arr):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
- >>> apply_along_fields(np.mean, b)
+ >>> rfn.apply_along_fields(np.mean, b)
array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
- >>> apply_along_fields(np.mean, b[['x', 'z']])
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
array([ 3. , 5.5, 9. , 11. ])
"""
@@ -1196,14 +1229,15 @@ def require_fields(array, required_dtype):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
- >>> require_fields(a, [('b', 'f4'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
array([(1., 1), (1., 1), (1., 1), (1., 1)],
dtype=[('b', '<f4'), ('c', 'u1')])
- >>> require_fields(a, [('b', 'f4'), ('newf', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
array([(1., 0), (1., 0), (1., 0), (1., 0)],
dtype=[('b', '<f4'), ('newf', 'u1')])
-
+
"""
out = np.empty(array.shape, dtype=required_dtype)
assign_fields_by_name(out, array)
@@ -1244,15 +1278,16 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
- ... dtype=[('A', '|S3'), ('B', float), ('C', float)])
+ ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
- masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
- ('c', 30.0, 300.0)],
- mask = [(False, False, True) (False, False, True) (False, False, False)
- (False, False, False) (False, False, False)],
- fill_value = ('N/A', 1e+20, 1e+20),
- dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
+ masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
+ (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
+ mask=[(False, False, True), (False, False, True),
+ (False, False, False), (False, False, False),
+ (False, False, False)],
+ fill_value=(b'N/A', 1.e+20, 1.e+20),
+ dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
@@ -1265,10 +1300,10 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
- newdescr = get_fieldspec(dtype_l)
+ newdescr = _get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
- for fname, fdtype in get_fieldspec(dtype_n):
+ for fname, fdtype in _get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
@@ -1331,7 +1366,10 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
- ... # XXX: judging by the output, the ignoremask flag has no effect
+ (masked_array(data=[(1,), (1,), (2,), (2,)],
+ mask=[(False,), (False,), (False,), (False,)],
+ fill_value=(999999,),
+ dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
@@ -1488,15 +1526,15 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
#
# Build the new description of the output array .......
# Start with the key fields
- ndtype = get_fieldspec(r1k.dtype)
+ ndtype = _get_fieldspec(r1k.dtype)
# Add the fields from r1
- for fname, fdtype in get_fieldspec(r1.dtype):
+ for fname, fdtype in _get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
- for fname, fdtype in get_fieldspec(r2.dtype):
+ for fname, fdtype in _get_fieldspec(r2.dtype):
# Have we seen the current name already ?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 3d4b0e3b2..088ca2bae 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -10,7 +10,7 @@ import numpy as np
from numpy import ma
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_raises, assert_allclose,
+ assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY,
assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT,
)
import numpy.lib.function_base as nfb
@@ -3106,6 +3106,7 @@ class TestAdd_newdoc_ufunc(object):
class TestAdd_newdoc(object):
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+ @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index f713fb64d..dc4afe077 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -12,9 +12,11 @@ from numpy.lib.recfunctions import (
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
+get_fieldspec = np.lib.recfunctions._get_fieldspec
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
-zip_descr = np.lib.recfunctions.zip_descr
+zip_descr = np.lib.recfunctions._zip_descr
+zip_dtype = np.lib.recfunctions._zip_dtype
class TestRecFunctions(object):
@@ -246,7 +248,8 @@ class TestRecFunctions(object):
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
- dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
+ ('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
@@ -260,6 +263,40 @@ class TestRecFunctions(object):
assert_equal(res, np.zeros((10, 6), dtype=int))
+ # test nested combinations of subarrays and structured arrays, gh-13333
+ def subarray(dt, shape):
+ return np.dtype((dt, shape))
+
+ def structured(*dts):
+ return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])
+
+ def inspect(dt, dtype=None):
+ arr = np.zeros((), dt)
+ ret = structured_to_unstructured(arr, dtype=dtype)
+ backarr = unstructured_to_structured(ret, dt)
+ return ret.shape, ret.dtype, backarr.dtype
+
+ dt = structured(subarray(structured(np.int32, np.int32), 3))
+ assert_equal(inspect(dt), ((6,), np.int32, dt))
+
+ dt = structured(subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((4,), np.int32, dt))
+
+ dt = structured(np.int32)
+ assert_equal(inspect(dt), ((1,), np.int32, dt))
+
+ dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((5,), np.int32, dt))
+
+ dt = structured()
+ assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
+
+ # these currently don't work, but we may make it work in the future
+ assert_raises(NotImplementedError, structured_to_unstructured,
+ np.zeros(3, dt), dtype=np.int32)
+ assert_raises(NotImplementedError, unstructured_to_structured,
+ np.zeros((3,0), dtype=np.int32))
+
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index bf93b4adb..fe1348d28 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -5,7 +5,7 @@ from __future__ import division, absolute_import, print_function
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
- assert_array_almost_equal, assert_raises,
+ assert_array_almost_equal, assert_raises, assert_
)
from numpy import (
@@ -17,6 +17,9 @@ from numpy import (
import numpy as np
+from numpy.core.tests.test_overrides import requires_array_function
+
+
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
@@ -273,6 +276,27 @@ class TestHistogram2d(object):
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
+ @requires_array_function
+ def test_dispatch(self):
+ class ShouldDispatch(object):
+ def __array_function__(self, function, types, args, kwargs):
+ return types, args, kwargs
+
+ xy = [1, 2]
+ s_d = ShouldDispatch()
+ r = histogram2d(s_d, xy)
+ # Cannot use assert_equal since that dispatches...
+ assert_(r == ((ShouldDispatch,), (s_d, xy), {}))
+ r = histogram2d(xy, s_d)
+ assert_(r == ((ShouldDispatch,), (xy, s_d), {}))
+ r = histogram2d(xy, xy, bins=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d)))
+ r = histogram2d(xy, xy, bins=[s_d, 5])
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5])))
+ assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])
+ r = histogram2d(xy, xy, weights=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d)))
+
class TestTri(object):
def test_dtype(self):
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 5c840b111..a058070e5 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -565,7 +565,21 @@ def vander(x, N=None, increasing=False):
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
- return (x, y, bins, weights)
+ yield x
+ yield y
+
+ # This terrible logic is adapted from the checks in histogram2d
+ try:
+ N = len(bins)
+ except TypeError:
+ N = 1
+ if N == 2:
+ for b in bins:
+ yield b
+ else:
+ yield bins
+
+ yield weights
@array_function_dispatch(_histogram2d_dispatcher)
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index e2153d601..5eec368fd 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -6,6 +6,7 @@ from __future__ import division, absolute_import, print_function
import os
import sys
+import platform
import re
import gc
import operator
@@ -39,6 +40,7 @@ __all__ = [
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
+ 'break_cycles',
]
@@ -50,7 +52,7 @@ class KnownFailureException(Exception):
KnownFailureTest = KnownFailureException # backwards compat
verbose = 0
-IS_PYPY = '__pypy__' in sys.modules
+IS_PYPY = platform.python_implementation() == 'PyPy'
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
@@ -2255,6 +2257,7 @@ def _assert_no_gc_cycles_context(name=None):
# not meaningful to test if there is no refcounting
if not HAS_REFCOUNT:
+ yield
return
assert_(gc.isenabled())
@@ -2333,3 +2336,19 @@ def assert_no_gc_cycles(*args, **kwargs):
args = args[1:]
with _assert_no_gc_cycles_context(name=func.__name__):
func(*args, **kwargs)
+
+def break_cycles():
+ """
+ Break reference cycles by calling gc.collect
+ Objects can call other objects' methods (for instance, another object's
+ __del__) inside their own __del__. On PyPy, the interpreter only runs
+ between calls to gc.collect, so multiple calls are needed to completely
+ release all cycles.
+ """
+
+ gc.collect()
+ if IS_PYPY:
+ # interpreter runs now, to call deleted objects' __del__ methods
+ gc.collect()
+ # one more, just to make sure
+ gc.collect()
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index bb3ea1acb..c029bf90c 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -13,7 +13,9 @@ from numpy.testing import (
try:
- import nose # noqa: F401
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ import nose # noqa: F401
except ImportError:
HAVE_NOSE = False
else:
diff --git a/pavement.py b/pavement.py
index 2cc29d3fa..33a3fc751 100644
--- a/pavement.py
+++ b/pavement.py
@@ -42,7 +42,7 @@ from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Path to the release notes
-RELEASE_NOTES = 'doc/release/1.16.4-notes.rst'
+RELEASE_NOTES = 'doc/release/1.16.5-notes.rst'
#-------------------------------------------------------
diff --git a/setup.py b/setup.py
index 8b2ded1f2..61c5e6e7d 100755
--- a/setup.py
+++ b/setup.py
@@ -61,7 +61,7 @@ Operating System :: MacOS
MAJOR = 1
MINOR = 16
-MICRO = 4
+MICRO = 5
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
@@ -370,6 +370,10 @@ def setup_package():
url = "https://www.numpy.org",
author = "Travis E. Oliphant et al.",
download_url = "https://pypi.python.org/pypi/numpy",
+ project_urls={
+ "Bug Tracker": "https://github.com/numpy/numpy/issues",
+ "Source Code": "https://github.com/numpy/numpy",
+ },
license = 'BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh
new file mode 100644
index 000000000..246425467
--- /dev/null
+++ b/tools/pypy-test.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# Exit if a command fails
+set -e
+set -o pipefail
+# Print expanded commands
+set -x
+
+sudo apt-get -yq update
+sudo apt-get -yq install libatlas-base-dev liblapack-dev gfortran-5
+F77=gfortran-5 F90=gfortran-5 \
+
+# Download the proper OpenBLAS x64 precompiled library
+OPENBLAS=openblas-v0.3.5-274-g6a8b4269-manylinux1_x86_64.tar.gz
+echo getting $OPENBLAS
+wget -q https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/$OPENBLAS -O openblas.tar.gz
+mkdir -p openblas
+(cd openblas; tar -xf ../openblas.tar.gz)
+export LD_LIBRARY_PATH=$PWD/openblas/usr/local/lib
+export LIB=$PWD/openblas/usr/local/lib
+export INCLUDE=$PWD/openblas/usr/local/include
+
+# Use a site.cfg to build with local openblas
+cat << EOF > site.cfg
+[openblas]
+libraries = openblas
+library_dirs = $PWD/openblas/usr/local/lib:$LIB
+include_dirs = $PWD/openblas/usr/local/lib:$LIB
+runtime_library_dirs = $PWD/openblas/usr/local/lib
+EOF
+
+echo getting PyPy 2.7 nightly
+wget -q http://buildbot.pypy.org/nightly/trunk/pypy-c-jit-latest-linux64.tar.bz2 -O pypy.tar.bz2
+mkdir -p pypy
+(cd pypy; tar --strip-components=1 -xf ../pypy.tar.bz2)
+pypy/bin/pypy -mensurepip
+pypy/bin/pypy -m pip install --upgrade pip setuptools
+pypy/bin/pypy -m pip install --user cython==0.29.0 pytest pytz --no-warn-script-location
+
+echo
+echo pypy version
+pypy/bin/pypy -c "import sys; print(sys.version)"
+echo
+
+pypy/bin/pypy runtests.py -vv --show-build-log -- -rsx \
+ --junitxml=junit/test-results.xml --durations 10
+
+echo Make sure the correct openblas has been linked in
+
+pypy/bin/pip install .
+(cd pypy; bin/pypy -c "$TEST_GET_CONFIG")
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index c334e91ae..7cc1c1925 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -26,6 +26,6 @@ if [ -n "$INSTALL_PICKLE5" ]; then
fi
pip install --upgrade pip setuptools
-pip install nose pytz cython pytest
+pip install nose pytz cython 'pytest<=5.0.1'
if [ -n "$USE_ASV" ]; then pip install asv; fi
popd