summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:34:06 +0900
committerDongHun Kwak <dh0128.kwak@samsung.com>2020-12-31 09:34:06 +0900
commit4b17002dd27193e2eb87f4b3c7566d929a7ac788 (patch)
treec6c5142f99ce1479902e180d434f81d5d2f922a0 /numpy
parentf14f97841aa140385b7fca2aeb1c7c96b2711560 (diff)
downloadpython-numpy-4b17002dd27193e2eb87f4b3c7566d929a7ac788.tar.gz
python-numpy-4b17002dd27193e2eb87f4b3c7566d929a7ac788.tar.bz2
python-numpy-4b17002dd27193e2eb87f4b3c7566d929a7ac788.zip
Imported Upstream version 1.16.1upstream/1.16.1
Diffstat (limited to 'numpy')
-rw-r--r--numpy/compat/py3k.py4
-rw-r--r--numpy/compat/tests/test_compat.py7
-rw-r--r--numpy/core/__init__.py19
-rw-r--r--numpy/core/_internal.py7
-rw-r--r--numpy/core/_methods.py12
-rw-r--r--numpy/core/code_generators/genapi.py1
-rw-r--r--numpy/core/code_generators/generate_umath.py5
-rw-r--r--numpy/core/multiarray.py12
-rw-r--r--numpy/core/overrides.py114
-rw-r--r--numpy/core/records.py5
-rw-r--r--numpy/core/setup.py10
-rw-r--r--numpy/core/shape_base.py7
-rw-r--r--numpy/core/src/common/get_attr_string.h1
-rw-r--r--numpy/core/src/multiarray/_multiarray_module_test.c129
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src7
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c376
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.h16
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src81
-rw-r--r--numpy/core/src/multiarray/buffer.c6
-rw-r--r--numpy/core/src/multiarray/common.c10
-rw-r--r--numpy/core/src/multiarray/compiled_base.c4
-rw-r--r--numpy/core/src/multiarray/ctors.c21
-rw-r--r--numpy/core/src/multiarray/datetime.c16
-rw-r--r--numpy/core/src/multiarray/descriptor.c9
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c31
-rw-r--r--numpy/core/src/multiarray/methods.c249
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c19
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.h1
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c7
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c2
-rw-r--r--numpy/core/src/multiarray/number.c7
-rw-r--r--numpy/core/src/multiarray/refcount.c86
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src2
-rw-r--r--numpy/core/src/multiarray/usertypes.c33
-rw-r--r--numpy/core/src/npymath/halffloat.c31
-rw-r--r--numpy/core/src/umath/_struct_ufunc_tests.c.src1
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src2
-rw-r--r--numpy/core/src/umath/loops.c.src58
-rw-r--r--numpy/core/src/umath/loops.h.src6
-rw-r--r--numpy/core/src/umath/reduction.c1
-rw-r--r--numpy/core/src/umath/ufunc_object.c2
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c58
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h7
-rw-r--r--numpy/core/src/umath/umathmodule.c14
-rw-r--r--numpy/core/tests/test_arrayprint.py4
-rw-r--r--numpy/core/tests/test_datetime.py131
-rw-r--r--numpy/core/tests/test_dtype.py171
-rw-r--r--numpy/core/tests/test_half.py79
-rw-r--r--numpy/core/tests/test_multiarray.py101
-rw-r--r--numpy/core/tests/test_overrides.py78
-rw-r--r--numpy/core/tests/test_regression.py36
-rw-r--r--numpy/core/tests/test_shape_base.py4
-rw-r--r--numpy/ctypeslib.py168
-rw-r--r--numpy/distutils/fcompiler/__init__.py10
-rw-r--r--numpy/distutils/fcompiler/absoft.py2
-rw-r--r--numpy/distutils/fcompiler/environment.py15
-rw-r--r--numpy/distutils/fcompiler/gnu.py5
-rw-r--r--numpy/distutils/fcompiler/intel.py5
-rw-r--r--numpy/distutils/fcompiler/pg.py8
-rw-r--r--numpy/distutils/fcompiler/sun.py2
-rw-r--r--numpy/distutils/system_info.py19
-rw-r--r--numpy/distutils/tests/test_fcompiler.py41
-rw-r--r--numpy/f2py/__init__.py22
-rwxr-xr-xnumpy/f2py/f2py2e.py21
-rw-r--r--numpy/f2py/tests/test_compile_function.py17
-rw-r--r--numpy/lib/_datasource.py9
-rw-r--r--numpy/lib/arraysetops.py26
-rw-r--r--numpy/lib/function_base.py5
-rw-r--r--numpy/lib/tests/test_arraysetops.py8
-rw-r--r--numpy/lib/twodim_base.py4
-rw-r--r--numpy/random/mtrand/mtrand.pyx12
-rw-r--r--numpy/random/tests/test_random.py5
-rw-r--r--numpy/testing/_private/nosetester.py2
-rw-r--r--numpy/testing/_private/utils.py208
-rw-r--r--numpy/testing/tests/test_utils.py115
-rw-r--r--numpy/tests/test_ctypeslib.py92
-rw-r--r--numpy/tests/test_scripts.py88
77 files changed, 2379 insertions, 630 deletions
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index 8e06ead78..067292776 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -219,7 +219,7 @@ else:
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
- if isinstance(path, (str, bytes)):
+ if isinstance(path, (unicode, bytes)):
return path
# Work from the object's type to match method resolution of other magic
@@ -235,7 +235,7 @@ else:
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
- if isinstance(path_repr, (str, bytes)):
+ if isinstance(path_repr, (unicode, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py
index 1543aafaf..9bb316a4d 100644
--- a/numpy/compat/tests/test_compat.py
+++ b/numpy/compat/tests/test_compat.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
from os.path import join
-from numpy.compat import isfileobj
+from numpy.compat import isfileobj, os_fspath
from numpy.testing import assert_
from numpy.testing import tempdir
@@ -19,3 +19,8 @@ def test_isfileobj():
with open(filename, 'rb') as f:
assert_(isfileobj(f))
+
+
+def test_os_fspath_strings():
+ for string_path in (b'/a/b/c.d', u'/a/b/c.d'):
+ assert_(os_fspath(string_path) == string_path)
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index 80ce84f00..6b76e63b7 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -53,7 +53,19 @@ del env_added
del os
from . import umath
-from . import _internal # for freeze programs
+
+# Check that multiarray,umath are pure python modules wrapping
+# _multiarray_umath and not either of the old c-extension modules
+if not (hasattr(multiarray, '_multiarray_umath') and
+ hasattr(umath, '_multiarray_umath')):
+ import sys
+ path = sys.modules['numpy'].__path__
+ msg = ("Something is wrong with the numpy installation. "
+ "While importing we detected an older version of "
+ "numpy in {}. One method of fixing this is to repeatedly uninstall "
+ "numpy until none is found, then reinstall this version.")
+ raise ImportError(msg.format(path))
+
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
@@ -83,6 +95,11 @@ from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
+# add these for module-freeze analysis (like PyInstaller)
+from . import _dtype_ctypes
+from . import _internal
+from . import _dtype
+from . import _methods
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 27a3deeda..1d3bb5584 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -830,6 +830,13 @@ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
.format(ufunc, method, args_string, types_string))
+def array_function_errmsg_formatter(public_api, types):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
+ return ("no implementation found for '{}' on types that implement "
+ '__array_function__: {}'.format(func_name, list(types)))
+
+
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index baeab6383..33f6d01a8 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -154,15 +154,3 @@ def _ptp(a, axis=None, out=None, keepdims=False):
umr_minimum(a, axis, None, None, keepdims),
out
)
-
-_NDARRAY_ARRAY_FUNCTION = mu.ndarray.__array_function__
-
-def _array_function(self, func, types, args, kwargs):
- # TODO: rewrite this in C
- # Cannot handle items that have __array_function__ other than our own.
- for t in types:
- if not issubclass(t, mu.ndarray) and hasattr(t, '__array_function__'):
- return NotImplemented
-
- # The regular implementation can handle this, so we call it directly.
- return func.__wrapped__(*args, **kwargs)
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 1d2cd25c8..4aca2373c 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -19,6 +19,7 @@ __docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'alloc.c'),
+ join('multiarray', 'arrayfunction_override.c'),
join('multiarray', 'array_assign_array.c'),
join('multiarray', 'array_assign_scalar.c'),
join('multiarray', 'arrayobject.c'),
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index f5ee02c42..0fac9b05e 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -315,7 +315,7 @@ defdict = {
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
- #TypeDescription('m', FullTypeDescr, 'mm', 'd'),
+ TypeDescription('m', FullTypeDescr, 'mm', 'q'),
],
TD(O, f='PyNumber_FloorDivide'),
),
@@ -802,8 +802,9 @@ defdict = {
'divmod':
Ufunc(2, 2, None,
docstrings.get('numpy.core.umath.divmod'),
- None,
+ 'PyUFunc_DivmodTypeResolver',
TD(intflt),
+ [TypeDescription('m', FullTypeDescr, 'mm', 'qm')],
# TD(O, f='PyNumber_Divmod'), # gh-9730
),
'hypot':
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index df0ed2df4..790896922 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -40,6 +40,10 @@ __all__ = [
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
+# For backward compatibility, make sure pickle imports these functions from here
+_reconstruct.__module__ = 'numpy.core.multiarray'
+scalar.__module__ = 'numpy.core.multiarray'
+
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
@@ -211,9 +215,11 @@ def concatenate(arrays, axis=None, out=None):
fill_value=999999)
"""
- for array in arrays:
- yield array
- yield out
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 0979858a1..c55174ecd 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -1,73 +1,23 @@
-"""Preliminary implementation of NEP-18
-
-TODO: rewrite this in C for performance.
-"""
+"""Implementation of __array_function__ overrides from NEP-18."""
import collections
import functools
import os
-from numpy.core._multiarray_umath import add_docstring, ndarray
+from numpy.core._multiarray_umath import (
+ add_docstring, implement_array_function, _get_implementing_args)
from numpy.compat._inspect import getargspec
-_NDARRAY_ARRAY_FUNCTION = ndarray.__array_function__
-_NDARRAY_ONLY = [ndarray]
-
ENABLE_ARRAY_FUNCTION = bool(
int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 0)))
-def get_overloaded_types_and_args(relevant_args):
- """Returns a list of arguments on which to call __array_function__.
-
- Parameters
- ----------
- relevant_args : iterable of array-like
- Iterable of array-like arguments to check for __array_function__
- methods.
-
- Returns
- -------
- overloaded_types : collection of types
- Types of arguments from relevant_args with __array_function__ methods.
- overloaded_args : list
- Arguments from relevant_args on which to call __array_function__
- methods, in the order in which they should be called.
+add_docstring(
+ implement_array_function,
"""
- # Runtime is O(num_arguments * num_unique_types)
- overloaded_types = []
- overloaded_args = []
- for arg in relevant_args:
- arg_type = type(arg)
- # We only collect arguments if they have a unique type, which ensures
- # reasonable performance even with a long list of possibly overloaded
- # arguments.
- if (arg_type not in overloaded_types and
- hasattr(arg_type, '__array_function__')):
-
- # Create lists explicitly for the first type (usually the only one
- # done) to avoid setting up the iterator for overloaded_args.
- if overloaded_types:
- overloaded_types.append(arg_type)
- # By default, insert argument at the end, but if it is
- # subclass of another argument, insert it before that argument.
- # This ensures "subclasses before superclasses".
- index = len(overloaded_args)
- for i, old_arg in enumerate(overloaded_args):
- if issubclass(arg_type, type(old_arg)):
- index = i
- break
- overloaded_args.insert(index, arg)
- else:
- overloaded_types = [arg_type]
- overloaded_args = [arg]
-
- return overloaded_types, overloaded_args
-
-
-def array_function_implementation_or_override(
- implementation, public_api, relevant_args, args, kwargs):
- """Implement a function with checks for __array_function__ overrides.
+ Implement a function with checks for __array_function__ overrides.
+
+ All arguments are required, and can only be passed by position.
Arguments
---------
@@ -82,41 +32,37 @@ def array_function_implementation_or_override(
Iterable of arguments to check for __array_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
- kwargs : tuple
+ kwargs : dict
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
- Result from calling `implementation()` or an `__array_function__`
+ Result from calling ``implementation()`` or an ``__array_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
+ """)
+
+
+# exposed for testing purposes; used internally by implement_array_function
+add_docstring(
+ _get_implementing_args,
"""
- # Check for __array_function__ methods.
- types, overloaded_args = get_overloaded_types_and_args(relevant_args)
- # Short-cut for common cases: no overload or only ndarray overload
- # (directly or with subclasses that do not override __array_function__).
- if (not overloaded_args or types == _NDARRAY_ONLY or
- all(type(arg).__array_function__ is _NDARRAY_ARRAY_FUNCTION
- for arg in overloaded_args)):
- return implementation(*args, **kwargs)
-
- # Call overrides
- for overloaded_arg in overloaded_args:
- # Use `public_api` instead of `implemenation` so __array_function__
- # implementations can do equality/identity comparisons.
- result = overloaded_arg.__array_function__(
- public_api, types, args, kwargs)
-
- if result is not NotImplemented:
- return result
-
- func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
- raise TypeError("no implementation found for '{}' on types that implement "
- '__array_function__: {}'
- .format(func_name, list(map(type, overloaded_args))))
+ Collect arguments on which to call __array_function__.
+
+ Parameters
+ ----------
+ relevant_args : iterable of array-like
+ Iterable of possibly array-like arguments to check for
+ __array_function__ methods.
+
+ Returns
+ -------
+ Sequence of arguments with __array_function__ methods, in the order in
+ which they should be called.
+ """)
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
@@ -215,7 +161,7 @@ def array_function_dispatch(dispatcher, module=None, verify=True,
@functools.wraps(implementation)
def public_api(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
- return array_function_implementation_or_override(
+ return implement_array_function(
implementation, public_api, relevant_args, args, kwargs)
if module is not None:
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 86a43306a..5898bb163 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -711,7 +711,7 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
a string"""
if dtype is None and formats is None:
- raise ValueError("Must have dtype= or formats=")
+ raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
if dtype is not None:
descr = sb.dtype(dtype)
@@ -758,6 +758,9 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
>>> r.shape
(10,)
"""
+
+ if dtype is None and formats is None:
+ raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
if (shape is None or shape == 0):
shape = (-1,)
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 467b590ac..00a9ffe69 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -775,6 +775,7 @@ def configuration(parent_package='',top_path=None):
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
+ join('src', 'multiarray', 'arrayfunction_override.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
@@ -827,6 +828,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
+ join('src', 'multiarray', 'arrayfunction_override.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
@@ -960,6 +962,14 @@ def configuration(parent_package='',top_path=None):
config.add_extension('_operand_flag_tests',
sources=[join('src', 'umath', '_operand_flag_tests.c.src')])
+ #######################################################################
+ # _multiarray_module_test module #
+ #######################################################################
+
+ config.add_extension('_multiarray_module_test',
+ sources=[join('src', 'multiarray',
+ '_multiarray_module_test.c')])
+
config.add_data_dir('tests')
config.add_data_dir('tests/data')
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index a529d2ad7..d20afd8be 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -342,10 +342,11 @@ def hstack(tup):
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
- for a in arrays:
- yield a
if out is not None:
- yield out
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
@array_function_dispatch(_stack_dispatcher)
diff --git a/numpy/core/src/common/get_attr_string.h b/numpy/core/src/common/get_attr_string.h
index bec87c5ed..d458d9550 100644
--- a/numpy/core/src/common/get_attr_string.h
+++ b/numpy/core/src/common/get_attr_string.h
@@ -103,7 +103,6 @@ PyArray_LookupSpecial(PyObject *obj, char *name)
if (_is_basic_python_type(tp)) {
return NULL;
}
-
return maybe_get_attr((PyObject *)tp, name);
}
diff --git a/numpy/core/src/multiarray/_multiarray_module_test.c b/numpy/core/src/multiarray/_multiarray_module_test.c
new file mode 100644
index 000000000..8dc017279
--- /dev/null
+++ b/numpy/core/src/multiarray/_multiarray_module_test.c
@@ -0,0 +1,129 @@
+#include "Python.h"
+
+/*
+ * This is a dummy module. It will be used to ruin the import of multiarray
+ * during testing. It exports two entry points, one to make the build happy,
+ * and a multiarray one for the actual test. The content of the module is
+ * irrelevant to the test.
+ *
+ * The code is from
+ * https://docs.python.org/3/howto/cporting.html
+ * or
+ * https://github.com/python/cpython/blob/v3.7.0/Doc/howto/cporting.rst
+ */
+
+#if defined _WIN32 || defined __CYGWIN__ || defined __MINGW32__
+ #if defined __GNUC__ || defined __clang__
+ #define DLL_PUBLIC __attribute__ ((dllexport))
+ #else
+ #define DLL_PUBLIC __declspec(dllexport)
+ #endif
+#elif defined __GNUC__ || defined __clang__
+ #define DLL_PUBLIC __attribute__ ((visibility ("default")))
+#else
+ /* Enhancement: error now instead ? */
+ #define DLL_PUBLIC
+#endif
+
+struct module_state {
+ PyObject *error;
+};
+
+#if PY_MAJOR_VERSION >= 3
+#define GETSTATE(m) ((struct module_state*)PyModule_GetState(m))
+#else
+#define GETSTATE(m) (&_state)
+static struct module_state _state;
+#endif
+
+static PyObject *
+error_out(PyObject *m) {
+ struct module_state *st = GETSTATE(m);
+ PyErr_SetString(st->error, "something bad happened");
+ return NULL;
+}
+
+static PyMethodDef multiarray_methods[] = {
+ {"error_out", (PyCFunction)error_out, METH_NOARGS, NULL},
+ {NULL, NULL}
+};
+
+#if PY_MAJOR_VERSION >= 3
+
+static int multiarray_traverse(PyObject *m, visitproc visit, void *arg) {
+ Py_VISIT(GETSTATE(m)->error);
+ return 0;
+}
+
+static int multiarray_clear(PyObject *m) {
+ Py_CLEAR(GETSTATE(m)->error);
+ return 0;
+}
+
+
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "multiarray",
+ NULL,
+ sizeof(struct module_state),
+ multiarray_methods,
+ NULL,
+ multiarray_traverse,
+ multiarray_clear,
+ NULL
+};
+
+#define INITERROR return NULL
+
+DLL_PUBLIC PyObject *
+PyInit_multiarray(void)
+
+#else
+#define INITERROR return
+
+void
+DLL_PUBLIC initmultiarray(void)
+#endif
+{
+#if PY_MAJOR_VERSION >= 3
+ PyObject *module = PyModule_Create(&moduledef);
+#else
+ PyObject *module = Py_InitModule("multiarray", multiarray_methods);
+#endif
+ struct module_state *st;
+ if (module == NULL)
+ INITERROR;
+ st = GETSTATE(module);
+
+ st->error = PyErr_NewException("multiarray.Error", NULL, NULL);
+ if (st->error == NULL) {
+ Py_DECREF(module);
+ INITERROR;
+ }
+
+#if PY_MAJOR_VERSION >= 3
+ return module;
+#endif
+}
+
+/*
+ * Define a dummy entry point to make MSVC happy
+ * Python's build system will export this function automatically
+ */
+#if PY_MAJOR_VERSION >= 3
+
+PyObject *
+PyInit__multiarray_module_test(void)
+{
+ return PyInit_multiarray();
+}
+
+#else
+
+void
+init_multiarray_module_test(void)
+{
+ initmultiarray();
+}
+
+#endif
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 2a8275572..c26bd16ac 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -1871,11 +1871,14 @@ printf_float_g(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
static PyObject *
getset_numericops(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
{
- PyObject * ops = PyArray_GetNumericOps();
+ PyObject *ret;
+ PyObject *ops = PyArray_GetNumericOps();
if (ops == NULL) {
return NULL;
}
- return PyLong_FromLong(PyArray_SetNumericOps(ops));
+ ret = PyLong_FromLong(PyArray_SetNumericOps(ops));
+ Py_DECREF(ops);
+ return ret;
}
static PyMethodDef Multiarray_TestsMethods[] = {
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
new file mode 100644
index 000000000..e62b32ab2
--- /dev/null
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -0,0 +1,376 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include "npy_pycompat.h"
+#include "get_attr_string.h"
+#include "npy_import.h"
+#include "multiarraymodule.h"
+
+
+/* Return the ndarray.__array_function__ method. */
+static PyObject *
+get_ndarray_array_function(void)
+{
+ PyObject* method = PyObject_GetAttrString((PyObject *)&PyArray_Type,
+ "__array_function__");
+ assert(method != NULL);
+ return method;
+}
+
+
+/*
+ * Get an object's __array_function__ method in the fastest way possible.
+ * Never raises an exception. Returns NULL if the method doesn't exist.
+ */
+static PyObject *
+get_array_function(PyObject *obj)
+{
+ static PyObject *ndarray_array_function = NULL;
+
+ if (ndarray_array_function == NULL) {
+ ndarray_array_function = get_ndarray_array_function();
+ }
+
+ /* Fast return for ndarray */
+ if (PyArray_CheckExact(obj)) {
+ Py_INCREF(ndarray_array_function);
+ return ndarray_array_function;
+ }
+
+ return PyArray_LookupSpecial(obj, "__array_function__");
+}
+
+
+/*
+ * Like list.insert(), but for C arrays of PyObject*. Skips error checking.
+ */
+static void
+pyobject_array_insert(PyObject **array, int length, int index, PyObject *item)
+{
+ int j;
+
+ for (j = length; j > index; j--) {
+ array[j] = array[j - 1];
+ }
+ array[index] = item;
+}
+
+
+/*
+ * Collects arguments with __array_function__ and their corresponding methods
+ * in the order in which they should be tried (i.e., skipping redundant types).
+ * `relevant_args` is expected to have been produced by PySequence_Fast.
+ * Returns the number of arguments, or -1 on failure.
+ */
+static int
+get_implementing_args_and_methods(PyObject *relevant_args,
+ PyObject **implementing_args,
+ PyObject **methods)
+{
+ int num_implementing_args = 0;
+ Py_ssize_t i;
+ int j;
+
+ PyObject **items = PySequence_Fast_ITEMS(relevant_args);
+ Py_ssize_t length = PySequence_Fast_GET_SIZE(relevant_args);
+
+ for (i = 0; i < length; i++) {
+ int new_class = 1;
+ PyObject *argument = items[i];
+
+ /* Have we seen this type before? */
+ for (j = 0; j < num_implementing_args; j++) {
+ if (Py_TYPE(argument) == Py_TYPE(implementing_args[j])) {
+ new_class = 0;
+ break;
+ }
+ }
+ if (new_class) {
+ PyObject *method = get_array_function(argument);
+
+ if (method != NULL) {
+ int arg_index;
+
+ if (num_implementing_args >= NPY_MAXARGS) {
+ PyErr_Format(
+ PyExc_TypeError,
+ "maximum number (%d) of distinct argument types " \
+ "implementing __array_function__ exceeded",
+ NPY_MAXARGS);
+ Py_DECREF(method);
+ goto fail;
+ }
+
+ /* "subclasses before superclasses, otherwise left to right" */
+ arg_index = num_implementing_args;
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *other_type;
+ other_type = (PyObject *)Py_TYPE(implementing_args[j]);
+ if (PyObject_IsInstance(argument, other_type)) {
+ arg_index = j;
+ break;
+ }
+ }
+ Py_INCREF(argument);
+ pyobject_array_insert(implementing_args, num_implementing_args,
+ arg_index, argument);
+ pyobject_array_insert(methods, num_implementing_args,
+ arg_index, method);
+ ++num_implementing_args;
+ }
+ }
+ }
+ return num_implementing_args;
+
+fail:
+ for (j = 0; j < num_implementing_args; j++) {
+ Py_DECREF(implementing_args[j]);
+ Py_DECREF(methods[j]);
+ }
+ return -1;
+}
+
+
+/*
+ * Is this object ndarray.__array_function__?
+ */
+static int
+is_default_array_function(PyObject *obj)
+{
+ static PyObject *ndarray_array_function = NULL;
+
+ if (ndarray_array_function == NULL) {
+ ndarray_array_function = get_ndarray_array_function();
+ }
+ return obj == ndarray_array_function;
+}
+
+
+/*
+ * Core implementation of ndarray.__array_function__. This is exposed
+ * separately so we can avoid the overhead of a Python method call from
+ * within `implement_array_function`.
+ */
+NPY_NO_EXPORT PyObject *
+array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
+ PyObject *kwargs)
+{
+ Py_ssize_t j;
+ PyObject *implementation, *result;
+
+ PyObject **items = PySequence_Fast_ITEMS(types);
+ Py_ssize_t length = PySequence_Fast_GET_SIZE(types);
+
+ for (j = 0; j < length; j++) {
+ int is_subclass = PyObject_IsSubclass(
+ items[j], (PyObject *)&PyArray_Type);
+ if (is_subclass == -1) {
+ return NULL;
+ }
+ if (!is_subclass) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ }
+
+ implementation = PyObject_GetAttr(func, npy_ma_str_wrapped);
+ if (implementation == NULL) {
+ return NULL;
+ }
+ result = PyObject_Call(implementation, args, kwargs);
+ Py_DECREF(implementation);
+ return result;
+}
+
+
+/*
+ * Calls __array_function__ on the provided argument, with a fast-path for
+ * ndarray.
+ */
+static PyObject *
+call_array_function(PyObject* argument, PyObject* method,
+ PyObject* public_api, PyObject* types,
+ PyObject* args, PyObject* kwargs)
+{
+ if (is_default_array_function(method)) {
+ return array_function_method_impl(public_api, types, args, kwargs);
+ }
+ else {
+ return PyObject_CallFunctionObjArgs(
+ method, argument, public_api, types, args, kwargs, NULL);
+ }
+}
+
+
+/*
+ * Implements the __array_function__ protocol for a function, as described in
+ * in NEP-18. See numpy.core.overrides for a full docstring.
+ */
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+ PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
+
+ PyObject *types = NULL;
+ PyObject *implementing_args[NPY_MAXARGS];
+ PyObject *array_function_methods[NPY_MAXARGS];
+
+ int j, any_overrides;
+ int num_implementing_args = 0;
+ PyObject *result = NULL;
+
+ static PyObject *errmsg_formatter = NULL;
+
+ if (!PyArg_UnpackTuple(
+ positional_args, "implement_array_function", 5, 5,
+ &implementation, &public_api, &relevant_args, &args, &kwargs)) {
+ return NULL;
+ }
+
+ relevant_args = PySequence_Fast(
+ relevant_args,
+ "dispatcher for __array_function__ did not return an iterable");
+ if (relevant_args == NULL) {
+ return NULL;
+ }
+
+ /* Collect __array_function__ implementations */
+ num_implementing_args = get_implementing_args_and_methods(
+ relevant_args, implementing_args, array_function_methods);
+ if (num_implementing_args == -1) {
+ goto cleanup;
+ }
+
+ /*
+ * Handle the typical case of no overrides. This is merely an optimization
+ * if some arguments are ndarray objects, but is also necessary if no
+ * arguments implement __array_function__ at all (e.g., if they are all
+ * built-in types).
+ */
+ any_overrides = 0;
+ for (j = 0; j < num_implementing_args; j++) {
+ if (!is_default_array_function(array_function_methods[j])) {
+ any_overrides = 1;
+ break;
+ }
+ }
+ if (!any_overrides) {
+ result = PyObject_Call(implementation, args, kwargs);
+ goto cleanup;
+ }
+
+ /*
+ * Create a Python object for types.
+ * We use a tuple, because it's the fastest Python collection to create
+ * and has the bonus of being immutable.
+ */
+ types = PyTuple_New(num_implementing_args);
+ if (types == NULL) {
+ goto cleanup;
+ }
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *arg_type = (PyObject *)Py_TYPE(implementing_args[j]);
+ Py_INCREF(arg_type);
+ PyTuple_SET_ITEM(types, j, arg_type);
+ }
+
+ /* Call __array_function__ methods */
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *argument = implementing_args[j];
+ PyObject *method = array_function_methods[j];
+
+ /*
+ * We use `public_api` instead of `implementation` here so
+ * __array_function__ implementations can do equality/identity
+ * comparisons.
+ */
+ result = call_array_function(
+ argument, method, public_api, types, args, kwargs);
+
+ if (result == Py_NotImplemented) {
+ /* Try the next one */
+ Py_DECREF(result);
+ result = NULL;
+ }
+ else {
+ /* Either a good result, or an exception was raised. */
+ goto cleanup;
+ }
+ }
+
+ /* No acceptable override found, raise TypeError. */
+ npy_cache_import("numpy.core._internal",
+ "array_function_errmsg_formatter",
+ &errmsg_formatter);
+ if (errmsg_formatter != NULL) {
+ PyObject *errmsg = PyObject_CallFunctionObjArgs(
+ errmsg_formatter, public_api, types, NULL);
+ if (errmsg != NULL) {
+ PyErr_SetObject(PyExc_TypeError, errmsg);
+ Py_DECREF(errmsg);
+ }
+ }
+
+cleanup:
+ for (j = 0; j < num_implementing_args; j++) {
+ Py_DECREF(implementing_args[j]);
+ Py_DECREF(array_function_methods[j]);
+ }
+ Py_XDECREF(types);
+ Py_DECREF(relevant_args);
+ return result;
+}
+
+
+/*
+ * Python wrapper for get_implementing_args_and_methods, for testing purposes.
+ */
+NPY_NO_EXPORT PyObject *
+array__get_implementing_args(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+ PyObject *relevant_args;
+ int j;
+ int num_implementing_args = 0;
+ PyObject *implementing_args[NPY_MAXARGS];
+ PyObject *array_function_methods[NPY_MAXARGS];
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTuple(positional_args, "O:array__get_implementing_args",
+ &relevant_args)) {
+ return NULL;
+ }
+
+ relevant_args = PySequence_Fast(
+ relevant_args,
+ "dispatcher for __array_function__ did not return an iterable");
+ if (relevant_args == NULL) {
+ return NULL;
+ }
+
+ num_implementing_args = get_implementing_args_and_methods(
+ relevant_args, implementing_args, array_function_methods);
+ if (num_implementing_args == -1) {
+ goto cleanup;
+ }
+
+ /* create a Python object for implementing_args */
+ result = PyList_New(num_implementing_args);
+ if (result == NULL) {
+ goto cleanup;
+ }
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *argument = implementing_args[j];
+ Py_INCREF(argument);
+ PyList_SET_ITEM(result, j, argument);
+ }
+
+cleanup:
+ for (j = 0; j < num_implementing_args; j++) {
+ Py_DECREF(implementing_args[j]);
+ Py_DECREF(array_function_methods[j]);
+ }
+ Py_DECREF(relevant_args);
+ return result;
+}
diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h
new file mode 100644
index 000000000..0d224e2b6
--- /dev/null
+++ b/numpy/core/src/multiarray/arrayfunction_override.h
@@ -0,0 +1,16 @@
+#ifndef _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H
+#define _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H
+
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
+
+NPY_NO_EXPORT PyObject *
+array__get_implementing_args(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
+
+NPY_NO_EXPORT PyObject *
+array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
+ PyObject *kwargs);
+
+#endif
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 823ee7115..ca5f5a47b 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -2205,15 +2205,19 @@ static void
VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
npy_intp n, int swap, PyArrayObject *arr)
{
+ PyArray_Descr *descr;
+
if (arr == NULL) {
return;
}
+
+ descr = PyArray_DESCR(arr);
+
if (PyArray_HASFIELDS(arr)) {
PyObject *key, *value;
- PyArray_Descr *descr;
+
Py_ssize_t pos = 0;
- descr = PyArray_DESCR(arr);
while (PyDict_Next(descr->fields, &pos, &key, &value)) {
npy_intp offset;
PyArray_Descr * new;
@@ -2236,14 +2240,28 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
- if (swap && PyArray_DESCR(arr)->subarray != NULL) {
- PyArray_Descr *descr, *new;
+ if (PyDataType_HASSUBARRAY(descr)) {
+ PyArray_Descr *new;
npy_intp num;
npy_intp i;
int subitemsize;
char *dstptr, *srcptr;
+ /*
+ * In certain cases subarray copy can be optimized. This is when
+ * swapping is unecessary and the subarrays data type can certainly
+ * be simply copied (no object, fields, subarray, and not a user dtype).
+ */
+ npy_bool can_optimize_subarray = (!swap &&
+ !PyDataType_HASFIELDS(descr->subarray->base) &&
+ !PyDataType_HASSUBARRAY(descr->subarray->base) &&
+ !PyDataType_REFCHK(descr->subarray->base) &&
+ (descr->subarray->base->type_num < NPY_NTYPES));
+
+ if (can_optimize_subarray) {
+ _basic_copyn(dst, dstride, src, sstride, n, descr->elsize);
+ return;
+ }
- descr = PyArray_DESCR(arr);
new = descr->subarray->base;
/*
* TODO: temporarily modifying the array like this
@@ -2253,6 +2271,10 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
dstptr = dst;
srcptr = src;
subitemsize = new->elsize;
+ if (subitemsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
num = descr->elsize / subitemsize;
for (i = 0; i < n; i++) {
new->f->copyswapn(dstptr, subitemsize, srcptr,
@@ -2265,22 +2287,26 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
- _basic_copyn(dst, dstride, src, sstride, n, PyArray_DESCR(arr)->elsize);
+ /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */
+ _basic_copyn(dst, dstride, src, sstride, n, descr->elsize);
return;
}
static void
VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
{
+ PyArray_Descr *descr;
+
if (arr == NULL) {
return;
}
+
+ descr = PyArray_DESCR(arr);
+
if (PyArray_HASFIELDS(arr)) {
PyObject *key, *value;
- PyArray_Descr *descr;
Py_ssize_t pos = 0;
- descr = PyArray_DESCR(arr);
while (PyDict_Next(descr->fields, &pos, &key, &value)) {
npy_intp offset;
PyArray_Descr * new;
@@ -2303,28 +2329,45 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
- if (swap && PyArray_DESCR(arr)->subarray != NULL) {
- PyArray_Descr *descr, *new;
+ if (PyDataType_HASSUBARRAY(descr)) {
+ PyArray_Descr *new;
npy_intp num;
- int itemsize;
+ int subitemsize;
+ /*
+ * In certain cases subarray copy can be optimized. This is when
+ * swapping is unecessary and the subarrays data type can certainly
+ * be simply copied (no object, fields, subarray, and not a user dtype).
+ */
+ npy_bool can_optimize_subarray = (!swap &&
+ !PyDataType_HASFIELDS(descr->subarray->base) &&
+ !PyDataType_HASSUBARRAY(descr->subarray->base) &&
+ !PyDataType_REFCHK(descr->subarray->base) &&
+ (descr->subarray->base->type_num < NPY_NTYPES));
+
+ if (can_optimize_subarray) {
+ _basic_copy(dst, src, descr->elsize);
+ return;
+ }
- descr = PyArray_DESCR(arr);
new = descr->subarray->base;
/*
* TODO: temporarily modifying the array like this
* is bad coding style, should be changed.
*/
((PyArrayObject_fields *)arr)->descr = new;
- itemsize = new->elsize;
- num = descr->elsize / itemsize;
- new->f->copyswapn(dst, itemsize, src,
- itemsize, num, swap, arr);
+ subitemsize = new->elsize;
+ if (subitemsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ num = descr->elsize / subitemsize;
+ new->f->copyswapn(dst, subitemsize, src,
+ subitemsize, num, swap, arr);
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
-
- /* copy first if needed */
- _basic_copy(dst, src, PyArray_DESCR(arr)->elsize);
+ /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */
+ _basic_copy(dst, src, descr->elsize);
return;
}
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 2f66d7f2f..d8ad80266 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -509,6 +509,10 @@ _buffer_info_new(PyObject *obj)
PyArray_Descr *descr = NULL;
int err = 0;
+ /*
+ * Note that the buffer info is cached as pyints making them appear like
+ * unreachable lost memory to valgrind.
+ */
info = malloc(sizeof(_buffer_info_t));
if (info == NULL) {
PyErr_NoMemory();
@@ -579,9 +583,11 @@ _buffer_info_new(PyObject *obj)
err = _buffer_format_string(descr, &fmt, obj, NULL, NULL);
Py_DECREF(descr);
if (err != 0) {
+ free(info->shape);
goto fail;
}
if (_append_char(&fmt, '\0') < 0) {
+ free(info->shape);
goto fail;
}
info->format = fmt.s;
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 2e51cee7e..addb67732 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -164,7 +164,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (string_type == NPY_STRING) {
if ((temp = PyObject_Str(obj)) == NULL) {
- return -1;
+ goto fail;
}
#if defined(NPY_PY3K)
#if PY_VERSION_HEX >= 0x03030000
@@ -182,7 +182,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
#else
if ((temp = PyObject_Unicode(obj)) == NULL) {
#endif
- return -1;
+ goto fail;
}
itemsize = PyUnicode_GET_DATA_SIZE(temp);
#ifndef Py_UNICODE_WIDE
@@ -216,7 +216,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (string_type == NPY_STRING) {
if ((temp = PyObject_Str(obj)) == NULL) {
- return -1;
+ goto fail;
}
#if defined(NPY_PY3K)
#if PY_VERSION_HEX >= 0x03030000
@@ -234,7 +234,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
#else
if ((temp = PyObject_Unicode(obj)) == NULL) {
#endif
- return -1;
+ goto fail;
}
itemsize = PyUnicode_GET_DATA_SIZE(temp);
#ifndef Py_UNICODE_WIDE
@@ -511,7 +511,7 @@ promote_types:
PyArray_Descr *res_dtype = PyArray_PromoteTypes(dtype, *out_dtype);
Py_DECREF(dtype);
if (res_dtype == NULL) {
- return -1;
+ goto fail;
}
if (!string_type &&
res_dtype->type_num == NPY_UNICODE &&
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 10e3478e2..625028bfb 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -328,6 +328,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
} else {
Py_XDECREF(values);
Py_XDECREF(mask);
+ PyArray_ResolveWritebackIfCopy(array);
Py_XDECREF(array);
Py_RETURN_NONE;
}
@@ -358,6 +359,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
fail:
Py_XDECREF(mask);
+ PyArray_ResolveWritebackIfCopy(array);
Py_XDECREF(array);
Py_XDECREF(values);
return NULL;
@@ -1575,6 +1577,7 @@ pack_bits(PyObject *input, int axis)
if (!PyArray_ISBOOL(inp) && !PyArray_ISINTEGER(inp)) {
PyErr_SetString(PyExc_TypeError,
"Expected an input array of integer or boolean data type");
+ Py_DECREF(inp);
goto fail;
}
@@ -1682,6 +1685,7 @@ unpack_bits(PyObject *input, int axis)
if (PyArray_TYPE(inp) != NPY_UBYTE) {
PyErr_SetString(PyExc_TypeError,
"Expected an input array of unsigned byte data type");
+ Py_DECREF(inp);
goto fail;
}
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index a17621946..b9059ba4d 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1410,6 +1410,7 @@ _array_from_buffer_3118(PyObject *memoryview)
* dimensions, so the array is now 0d.
*/
nd = 0;
+ Py_DECREF(descr);
descr = (PyArray_Descr *)PyObject_CallFunctionObjArgs(
(PyObject *)&PyArrayDescr_Type, Py_TYPE(view->obj), NULL);
if (descr == NULL) {
@@ -2128,12 +2129,15 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
*/
/* 2017-Nov-10 1.14 */
- if (DEPRECATE("NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and "
- "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, "
- "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively "
- "instead, and call PyArray_ResolveWritebackIfCopy before the "
- "array is deallocated, i.e. before the last call to Py_DECREF.") < 0)
+ if (DEPRECATE(
+ "NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and "
+ "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, "
+ "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively "
+ "instead, and call PyArray_ResolveWritebackIfCopy before the "
+ "array is deallocated, i.e. before the last call to Py_DECREF.") < 0) {
+ Py_DECREF(ret);
return NULL;
+ }
Py_INCREF(arr);
if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) {
Py_DECREF(ret);
@@ -2160,14 +2164,12 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
Py_DECREF(newtype);
if (needview) {
- PyArray_Descr *dtype = PyArray_DESCR(arr);
PyTypeObject *subtype = NULL;
if (flags & NPY_ARRAY_ENSUREARRAY) {
subtype = &PyArray_Type;
}
- Py_INCREF(dtype);
ret = (PyArrayObject *)PyArray_View(arr, NULL, subtype);
if (ret == NULL) {
return NULL;
@@ -2495,6 +2497,11 @@ PyArray_FromInterface(PyObject *origin)
&PyArray_Type, dtype,
n, dims, NULL, data,
dataflags, NULL, base);
+ /*
+ * Ref to dtype was stolen by PyArray_NewFromDescrAndBase
+ * Prevent DECREFing dtype in fail codepath by setting to NULL
+ */
+ dtype = NULL;
if (ret == NULL) {
goto fail;
}
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index a8550d958..54d19d993 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -3822,18 +3822,26 @@ recursive_find_object_timedelta64_type(PyObject *obj,
* single object using [()], but not by using
* __getitem__(integer) approaches
*/
- PyObject *item, *meth, *args;
+ PyObject *item, *args;
- meth = PyObject_GetAttrString(obj, "__getitem__");
- args = Py_BuildValue("(())");
- item = PyObject_CallObject(meth, args);
+ args = PyTuple_New(0);
+ if (args == NULL) {
+ return 0;
+ }
+ item = PyObject_GetItem(obj, args);
+ Py_DECREF(args);
+ if (item == NULL) {
+ return 0;
+ }
/*
* NOTE: may need other type checks here in the future
* for expanded 0 D datetime array conversions?
*/
if (PyDelta_Check(item)) {
+ Py_DECREF(item);
return delta_checker(meta);
}
+ Py_DECREF(item);
}
}
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 3038e4dea..0471a2a3e 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -515,6 +515,7 @@ _convert_from_array_descr(PyObject *obj, int align)
#if defined(NPY_PY3K)
Py_DECREF(name);
#endif
+ Py_DECREF(conv);
goto fail;
}
dtypeflags |= (conv->flags & NPY_FROM_FIELDS);
@@ -837,9 +838,11 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
else if (new->elsize != conv->elsize) {
PyErr_SetString(PyExc_ValueError,
"mismatch in size of old and new data-descriptor");
+ Py_DECREF(new);
goto fail;
}
else if (invalid_union_object_dtype(new, conv)) {
+ Py_DECREF(new);
goto fail;
}
@@ -1728,6 +1731,7 @@ PyArray_DescrNew(PyArray_Descr *base)
newdescr->c_metadata = NPY_AUXDATA_CLONE(base->c_metadata);
if (newdescr->c_metadata == NULL) {
PyErr_NoMemory();
+ /* TODO: This seems wrong, as the old fields get decref'd? */
Py_DECREF(newdescr);
return NULL;
}
@@ -3336,12 +3340,15 @@ static PyObject *
_subscript_by_index(PyArray_Descr *self, Py_ssize_t i)
{
PyObject *name = PySequence_GetItem(self->names, i);
+ PyObject *ret;
if (name == NULL) {
PyErr_Format(PyExc_IndexError,
"Field index %zd out of range.", i);
return NULL;
}
- return _subscript_by_name(self, name);
+ ret = _subscript_by_name(self, name);
+ Py_DECREF(name);
+ return ret;
}
static PyObject *
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 63b1ead25..6347d35eb 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -1572,12 +1572,30 @@ get_cast_transfer_function(int aligned,
src_dtype,
&tobuffer, &todata);
-
- /* Get the copy/swap operation to dst */
- PyArray_GetDTypeCopySwapFn(aligned,
- dst_itemsize, dst_stride,
- dst_dtype,
- &frombuffer, &fromdata);
+ if (!PyDataType_REFCHK(dst_dtype)) {
+ /* Copying from buffer is a simple copy/swap operation */
+ PyArray_GetDTypeCopySwapFn(aligned,
+ dst_itemsize, dst_stride,
+ dst_dtype,
+ &frombuffer, &fromdata);
+ }
+ else {
+ /*
+ * Since the buffer is initialized to NULL, need to move the
+ * references in order to DECREF the existing data.
+ */
+ /* Object types cannot be byte swapped */
+ assert(PyDataType_ISNOTSWAPPED(dst_dtype));
+ /* The loop already needs the python api if this is reached */
+ assert(*out_needs_api);
+
+ if (PyArray_GetDTypeTransferFunction(
+ aligned, dst_itemsize, dst_stride,
+ dst_dtype, dst_dtype, 1,
+ &frombuffer, &fromdata, out_needs_api) != NPY_SUCCEED) {
+ return NPY_FAIL;
+ }
+ }
if (frombuffer == NULL || tobuffer == NULL) {
NPY_AUXDATA_FREE(castdata);
@@ -2001,6 +2019,7 @@ typedef struct {
_subarray_broadcast_offsetrun offsetruns;
} _subarray_broadcast_data;
+
/* transfer data free function */
static void _subarray_broadcast_data_free(NpyAuxData *data)
{
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 7c814e6e6..2fcc2ec22 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -8,6 +8,7 @@
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
+#include "arrayfunction_override.h"
#include "npy_config.h"
#include "npy_pycompat.h"
#include "npy_import.h"
@@ -1088,13 +1089,29 @@ cleanup:
return result;
}
-
static PyObject *
-array_function(PyArrayObject *self, PyObject *args, PyObject *kwds)
+array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds)
{
- NPY_FORWARD_NDARRAY_METHOD("_array_function");
-}
+ PyObject *func, *types, *args, *kwargs, *result;
+ static char *kwlist[] = {"func", "types", "args", "kwargs", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(
+ c_args, c_kwds, "OOOO:__array_function__", kwlist,
+ &func, &types, &args, &kwargs)) {
+ return NULL;
+ }
+ types = PySequence_Fast(
+ types,
+ "types argument to ndarray.__array_function__ must be iterable");
+ if (types == NULL) {
+ return NULL;
+ }
+
+ result = array_function_method_impl(func, types, args, kwargs);
+ Py_DECREF(types);
+ return result;
+}
static PyObject *
array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds)
@@ -1364,6 +1381,7 @@ array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
newd = PyArray_DescrNew(saved);
+ Py_DECREF(newd->names);
newd->names = new_name;
((PyArrayObject_fields *)self)->descr = newd;
}
@@ -1418,6 +1436,7 @@ array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
newd = PyArray_DescrNew(saved);
+ Py_DECREF(newd->names);
newd->names = new_name;
((PyArrayObject_fields *)self)->descr = newd;
}
@@ -1521,7 +1540,6 @@ array_deepcopy(PyArrayObject *self, PyObject *args)
copy = PyImport_ImportModule("copy");
if (copy == NULL) {
Py_DECREF(copied_array);
- Py_DECREF(copy);
return NULL;
}
deepcopy = PyObject_GetAttrString(copy, "deepcopy");
@@ -1704,129 +1722,150 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
}
static PyObject *
-array_reduce_ex(PyArrayObject *self, PyObject *args)
+array_reduce_ex_regular(PyArrayObject *self, int protocol)
{
- int protocol;
- PyObject *ret = NULL, *numeric_mod = NULL, *from_buffer_func = NULL;
- PyObject *buffer_tuple = NULL, *pickle_module = NULL, *pickle_class = NULL;
- PyObject *class_args = NULL, *class_args_tuple = NULL, *unused = NULL;
PyObject *subclass_array_reduce = NULL;
+ PyObject *ret;
+
+ /* We do not call array_reduce directly but instead lookup and call
+ * the __reduce__ method to make sure that it's possible to customize
+ * pickling in sub-classes. */
+ subclass_array_reduce = PyObject_GetAttrString((PyObject *)self,
+ "__reduce__");
+ if (subclass_array_reduce == NULL) {
+ return NULL;
+ }
+ ret = PyObject_CallObject(subclass_array_reduce, NULL);
+ Py_DECREF(subclass_array_reduce);
+ return ret;
+}
+
+static PyObject *
+array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol)
+{
+ PyObject *numeric_mod = NULL, *from_buffer_func = NULL;
+ PyObject *pickle_module = NULL, *picklebuf_class = NULL;
+ PyObject *picklebuf_args = NULL;
PyObject *buffer = NULL, *transposed_array = NULL;
PyArray_Descr *descr = NULL;
char order;
- if (PyArg_ParseTuple(args, "i", &protocol)){
- descr = PyArray_DESCR(self);
- if ((protocol < 5) ||
- (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
- !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) ||
- PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
- (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
- ((PyObject*)self)->ob_type != &PyArray_Type) ||
- PyDataType_ISUNSIZED(descr)) {
- /* The PickleBuffer class from version 5 of the pickle protocol
- * can only be used for arrays backed by a contiguous data buffer.
- * For all other cases we fallback to the generic array_reduce
- * method that involves using a temporary bytes allocation. However
- * we do not call array_reduce directly but instead lookup and call
- * the __reduce__ method to make sure that it's possible customize
- * pickling in sub-classes. */
- subclass_array_reduce = PyObject_GetAttrString((PyObject *)self,
- "__reduce__");
- return PyObject_CallObject(subclass_array_reduce, unused);
- }
- else if (protocol == 5){
- ret = PyTuple_New(2);
-
- if (ret == NULL) {
- return NULL;
- }
+ descr = PyArray_DESCR(self);
- /* if the python version is below 3.8, the pickle module does not provide
- * built-in support for protocol 5. We try importing the pickle5
- * backport instead */
+ /* if the python version is below 3.8, the pickle module does not provide
+ * built-in support for protocol 5. We try importing the pickle5
+ * backport instead */
#if PY_VERSION_HEX >= 0x03080000
- pickle_module = PyImport_ImportModule("pickle");
-#elif PY_VERSION_HEX < 0x03080000 && PY_VERSION_HEX >= 0x03060000
- pickle_module = PyImport_ImportModule("pickle5");
- if (pickle_module == NULL){
- /* for protocol 5, raise a clear ImportError if pickle5 is not found
- */
- PyErr_SetString(PyExc_ImportError, "Using pickle protocol 5 "
- "requires the pickle5 module for python versions >=3.6 "
- "and <3.8");
- return NULL;
- }
+ /* we expect protocol 5 to be available in Python 3.8 */
+ pickle_module = PyImport_ImportModule("pickle");
+#elif PY_VERSION_HEX >= 0x03060000
+ pickle_module = PyImport_ImportModule("pickle5");
+ if (pickle_module == NULL) {
+ /* for protocol 5, raise a clear ImportError if pickle5 is not found
+ */
+ PyErr_SetString(PyExc_ImportError, "Using pickle protocol 5 "
+ "requires the pickle5 module for Python >=3.6 and <3.8");
+ return NULL;
+ }
#else
- PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
- "for python versions < 3.6");
- return NULL;
+ PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
+ "for Python < 3.6");
+ return NULL;
#endif
- if (pickle_module == NULL){
- return NULL;
- }
-
- pickle_class = PyObject_GetAttrString(pickle_module,
- "PickleBuffer");
+ if (pickle_module == NULL){
+ return NULL;
+ }
+ picklebuf_class = PyObject_GetAttrString(pickle_module, "PickleBuffer");
+ Py_DECREF(pickle_module);
+ if (picklebuf_class == NULL) {
+ return NULL;
+ }
- class_args_tuple = PyTuple_New(1);
- if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
- PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)){
+ /* Construct a PickleBuffer of the array */
- /* if the array if Fortran-contiguous and not C-contiguous,
- * the PickleBuffer instance will hold a view on the transpose
- * of the initial array, that is C-contiguous. */
- order = 'F';
- transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL);
- PyTuple_SET_ITEM(class_args_tuple, 0, transposed_array);
- }
- else {
- order = 'C';
- PyTuple_SET_ITEM(class_args_tuple, 0, (PyObject *)self);
- Py_INCREF(self);
- }
+ if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*) self) &&
+ PyArray_IS_F_CONTIGUOUS((PyArrayObject*) self)) {
+ /* if the array if Fortran-contiguous and not C-contiguous,
+ * the PickleBuffer instance will hold a view on the transpose
+ * of the initial array, that is C-contiguous. */
+ order = 'F';
+ transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL);
+ picklebuf_args = Py_BuildValue("(N)", transposed_array);
+ }
+ else {
+ order = 'C';
+ picklebuf_args = Py_BuildValue("(O)", self);
+ }
+ if (picklebuf_args == NULL) {
+ Py_DECREF(picklebuf_class);
+ return NULL;
+ }
- class_args = Py_BuildValue("O", class_args_tuple);
+ buffer = PyObject_CallObject(picklebuf_class, picklebuf_args);
+ Py_DECREF(picklebuf_class);
+ Py_DECREF(picklebuf_args);
+ if (buffer == NULL) {
+ /* Some arrays may refuse to export a buffer, in which case
+ * just fall back on regular __reduce_ex__ implementation
+ * (gh-12745).
+ */
+ PyErr_Clear();
+ return array_reduce_ex_regular(self, protocol);
+ }
- buffer = PyObject_CallObject(pickle_class, class_args);
+ /* Get the _frombuffer() function for reconstruction */
- numeric_mod = PyImport_ImportModule("numpy.core.numeric");
- if (numeric_mod == NULL) {
- Py_DECREF(ret);
- return NULL;
- }
- from_buffer_func = PyObject_GetAttrString(numeric_mod,
- "_frombuffer");
- Py_DECREF(numeric_mod);
+ numeric_mod = PyImport_ImportModule("numpy.core.numeric");
+ if (numeric_mod == NULL) {
+ Py_DECREF(buffer);
+ return NULL;
+ }
+ from_buffer_func = PyObject_GetAttrString(numeric_mod,
+ "_frombuffer");
+ Py_DECREF(numeric_mod);
+ if (from_buffer_func == NULL) {
+ Py_DECREF(buffer);
+ return NULL;
+ }
- Py_INCREF(descr);
+ return Py_BuildValue("N(NONN)",
+ from_buffer_func, buffer, (PyObject *)descr,
+ PyObject_GetAttrString((PyObject *)self, "shape"),
+ PyUnicode_FromStringAndSize(&order, 1));
+}
- buffer_tuple = PyTuple_New(4);
- PyTuple_SET_ITEM(buffer_tuple, 0, buffer);
- PyTuple_SET_ITEM(buffer_tuple, 1, (PyObject *)descr);
- PyTuple_SET_ITEM(buffer_tuple, 2,
- PyObject_GetAttrString((PyObject *)self,
- "shape"));
- PyTuple_SET_ITEM(buffer_tuple, 3,
- PyUnicode_FromStringAndSize(&order,
- (Py_ssize_t)1));
+static PyObject *
+array_reduce_ex(PyArrayObject *self, PyObject *args)
+{
+ int protocol;
+ PyArray_Descr *descr = NULL;
- PyTuple_SET_ITEM(ret, 0, from_buffer_func);
- PyTuple_SET_ITEM(ret, 1, buffer_tuple);
+ if (!PyArg_ParseTuple(args, "i", &protocol)) {
+ return NULL;
+ }
- return ret;
- }
- else {
- PyErr_Format(PyExc_ValueError,
- "cannot call __reduce_ex__ with protocol >= %d",
- 5);
- return NULL;
- }
+ descr = PyArray_DESCR(self);
+ if ((protocol < 5) ||
+ (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
+ !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) ||
+ PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
+ (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
+ ((PyObject*)self)->ob_type != &PyArray_Type) ||
+ PyDataType_ISUNSIZED(descr)) {
+ /* The PickleBuffer class from version 5 of the pickle protocol
+ * can only be used for arrays backed by a contiguous data buffer.
+ * For all other cases we fallback to the generic array_reduce
+ * method that involves using a temporary bytes allocation. */
+ return array_reduce_ex_regular(self, protocol);
+ }
+ else if (protocol == 5) {
+ return array_reduce_ex_picklebuffer(self, protocol);
}
else {
+ PyErr_Format(PyExc_ValueError,
+ "__reduce_ex__ called with protocol > 5");
return NULL;
}
-
}
static PyObject *
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 8135769d9..166533b3f 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -34,6 +34,7 @@
NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
/* Internal APIs */
+#include "arrayfunction_override.h"
#include "arraytypes.h"
#include "arrayobject.h"
#include "hashdescr.h"
@@ -3254,6 +3255,7 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
meta = get_datetime_metadata_from_dtype(dtype);
+ Py_DECREF(dtype);
if (meta == NULL) {
return NULL;
}
@@ -3618,6 +3620,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
if (nargs == -1 || nargs > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"len(args) must be < %d", NPY_MAXARGS - 1);
+ Py_DECREF(type);
goto err;
}
@@ -3625,6 +3628,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
for (i = 1; i < nargs; i++) {
PyObject* item = PySequence_GetItem(args, i-1);
if (item == NULL) {
+ Py_DECREF(type);
goto err;
}
broadcast_args[i] = item;
@@ -3633,6 +3637,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
in_iter = (PyArrayMultiIterObject*)PyArray_MultiIterFromObjects
(broadcast_args, nargs, 0);
if (in_iter == NULL) {
+ Py_DECREF(type);
goto err;
}
n = in_iter->numiter;
@@ -3713,6 +3718,7 @@ _vec_string_no_args(PyArrayObject* char_array,
in_iter = (PyArrayIterObject*)PyArray_IterNew((PyObject*)char_array);
if (in_iter == NULL) {
+ Py_DECREF(type);
goto err;
}
@@ -3769,7 +3775,7 @@ static PyObject *
_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
{
PyArrayObject* char_array = NULL;
- PyArray_Descr *type = NULL;
+ PyArray_Descr *type;
PyObject* method_name;
PyObject* args_seq = NULL;
@@ -3806,6 +3812,7 @@ _vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
result = _vec_string_with_args(char_array, type, method, args_seq);
}
else {
+ Py_DECREF(type);
PyErr_SetString(PyExc_TypeError,
"'args' must be a sequence of arguments");
goto err;
@@ -4062,6 +4069,9 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
}
static struct PyMethodDef array_module_methods[] = {
+ {"_get_implementing_args",
+ (PyCFunction)array__get_implementing_args,
+ METH_VARARGS, NULL},
{"_get_ndarray_c_version",
(PyCFunction)array__get_ndarray_c_version,
METH_VARARGS|METH_KEYWORDS, NULL},
@@ -4224,6 +4234,9 @@ static struct PyMethodDef array_module_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"_monotonicity", (PyCFunction)arr__monotonicity,
METH_VARARGS | METH_KEYWORDS, NULL},
+ {"implement_array_function",
+ (PyCFunction)array_implement_array_function,
+ METH_VARARGS, NULL},
{"interp", (PyCFunction)arr_interp,
METH_VARARGS | METH_KEYWORDS, NULL},
{"interp_complex", (PyCFunction)arr_interp_complex,
@@ -4476,6 +4489,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_buffer = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ufunc = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_wrapped = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL;
@@ -4492,6 +4506,7 @@ intern_strings(void)
npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__");
npy_ma_str_buffer = PyUString_InternFromString("__buffer__");
npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__");
+ npy_ma_str_wrapped = PyUString_InternFromString("__wrapped__");
npy_ma_str_order = PyUString_InternFromString("order");
npy_ma_str_copy = PyUString_InternFromString("copy");
npy_ma_str_dtype = PyUString_InternFromString("dtype");
@@ -4501,7 +4516,7 @@ intern_strings(void)
return npy_ma_str_array && npy_ma_str_array_prepare &&
npy_ma_str_array_wrap && npy_ma_str_array_finalize &&
- npy_ma_str_buffer && npy_ma_str_ufunc &&
+ npy_ma_str_buffer && npy_ma_str_ufunc && npy_ma_str_wrapped &&
npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype &&
npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h
index 3de68c549..60a3965c9 100644
--- a/numpy/core/src/multiarray/multiarraymodule.h
+++ b/numpy/core/src/multiarray/multiarraymodule.h
@@ -7,6 +7,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_buffer;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ufunc;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_wrapped;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype;
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 90cff4077..18a2cc84f 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1248,9 +1248,9 @@ npyiter_prepare_operands(int nop, PyArrayObject **op_in,
return 1;
fail_nop:
- iop = nop;
+ iop = nop - 1;
fail_iop:
- for (i = 0; i < iop; ++i) {
+ for (i = 0; i < iop+1; ++i) {
Py_XDECREF(op[i]);
Py_XDECREF(op_dtype[i]);
}
@@ -3175,6 +3175,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
&stransfer,
&transferdata,
&needs_api) != NPY_SUCCEED) {
+ iop -= 1; /* This one cannot be cleaned up yet. */
goto fail;
}
readtransferfn[iop] = stransfer;
@@ -3268,7 +3269,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
return 1;
fail:
- for (i = 0; i < iop; ++i) {
+ for (i = 0; i < iop+1; ++i) {
if (readtransferdata[iop] != NULL) {
NPY_AUXDATA_FREE(readtransferdata[iop]);
readtransferdata[iop] = NULL;
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 5a9f3c5fa..30a81e0ca 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -2355,6 +2355,8 @@ npyiter_close(NewNpyArrayIterObject *self)
}
ret = NpyIter_Deallocate(iter);
self->iter = NULL;
+ Py_XDECREF(self->nested_child);
+ self->nested_child = NULL;
if (ret < 0) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index d153a8a64..420501ce2 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -599,15 +599,16 @@ array_positive(PyArrayObject *m1)
PyErr_Restore(exc, val, tb);
return NULL;
}
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+
/* 2018-06-28, 1.16.0 */
if (DEPRECATE("Applying '+' to a non-numerical array is "
"ill-defined. Returning a copy, but in the future "
"this will error.") < 0) {
return NULL;
}
- Py_XDECREF(exc);
- Py_XDECREF(val);
- Py_XDECREF(tb);
value = PyArray_Return((PyArrayObject *)PyArray_Copy(m1));
}
return value;
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index 4b018b056..b8230c81a 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -19,8 +19,12 @@
static void
_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype);
-/* Incref all objects found at this record */
+
/*NUMPY_API
+ * XINCREF all objects in a single array item. This is complicated for
+ * structured datatypes where the position of objects needs to be extracted.
+ * The function is execute recursively for each nested field or subarrays dtype
+ * such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
*/
NPY_NO_EXPORT void
PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
@@ -51,11 +55,37 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
PyArray_Item_INCREF(data + offset, new);
}
}
+ else if (PyDataType_HASSUBARRAY(descr)) {
+ int size, i, inner_elsize;
+
+ inner_elsize = descr->subarray->base->elsize;
+ if (inner_elsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ /* Subarrays are always contiguous in memory */
+ size = descr->elsize / inner_elsize;
+
+ for (i = 0; i < size; i++){
+ /* Recursively increment the reference count of subarray elements */
+ PyArray_Item_INCREF(data + i * inner_elsize,
+ descr->subarray->base);
+ }
+ }
+ else {
+ /* This path should not be reachable. */
+ assert(0);
+ }
return;
}
-/* XDECREF all objects found at this record */
+
/*NUMPY_API
+ *
+ * XDECREF all objects in a single array item. This is complicated for
+ * structured datatypes where the position of objects needs to be extracted.
+ * The function is execute recursively for each nested field or subarrays dtype
+ * such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
*/
NPY_NO_EXPORT void
PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
@@ -87,6 +117,27 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
PyArray_Item_XDECREF(data + offset, new);
}
}
+ else if (PyDataType_HASSUBARRAY(descr)) {
+ int size, i, inner_elsize;
+
+ inner_elsize = descr->subarray->base->elsize;
+ if (inner_elsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ /* Subarrays are always contiguous in memory */
+ size = descr->elsize / inner_elsize;
+
+ for (i = 0; i < size; i++){
+ /* Recursively decrement the reference count of subarray elements */
+ PyArray_Item_XDECREF(data + i * inner_elsize,
+ descr->subarray->base);
+ }
+ }
+ else {
+ /* This path should not be reachable. */
+ assert(0);
+ }
return;
}
@@ -258,6 +309,10 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
Py_XDECREF(arr);
}
}
+ if (dtype->type_num == NPY_OBJECT) {
+ Py_XINCREF(obj);
+ NPY_COPY_PYOBJECT_PTR(optr, &obj);
+ }
else if (PyDataType_HASFIELDS(dtype)) {
PyObject *key, *value, *title = NULL;
PyArray_Descr *new;
@@ -274,15 +329,26 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
_fillobject(optr + offset, obj, new);
}
}
- else {
- npy_intp i;
- npy_intp nsize = dtype->elsize / sizeof(obj);
+ else if (PyDataType_HASSUBARRAY(dtype)) {
+ int size, i, inner_elsize;
- for (i = 0; i < nsize; i++) {
- Py_XINCREF(obj);
- NPY_COPY_PYOBJECT_PTR(optr, &obj);
- optr += sizeof(obj);
+ inner_elsize = dtype->subarray->base->elsize;
+ if (inner_elsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ /* Subarrays are always contiguous in memory */
+ size = dtype->elsize / inner_elsize;
+
+ /* Call _fillobject on each item recursively. */
+ for (i = 0; i < size; i++){
+ _fillobject(optr, obj, dtype->subarray->base);
+ optr += inner_elsize;
}
- return;
}
+ else {
+ /* This path should not be reachable. */
+ assert(0);
+ }
+ return;
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 2f71c8ae9..52de31289 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -2599,6 +2599,8 @@ NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
static void
void_dealloc(PyVoidScalarObject *v)
{
+ _dealloc_cached_buffer_info((PyObject *)v);
+
if (v->flags & NPY_ARRAY_OWNDATA) {
npy_free_cache(v->obval, Py_SIZE(v));
}
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index 8e8090002..2e8fb514f 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -40,19 +40,27 @@ maintainer email: oliphant.travis@ieee.org
NPY_NO_EXPORT PyArray_Descr **userdescrs=NULL;
-static int *
-_append_new(int *types, int insert)
+static int
+_append_new(int **p_types, int insert)
{
int n = 0;
int *newtypes;
+ int *types = *p_types;
while (types[n] != NPY_NOTYPE) {
n++;
}
newtypes = (int *)realloc(types, (n + 2)*sizeof(int));
+ if (newtypes == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
newtypes[n] = insert;
newtypes[n + 1] = NPY_NOTYPE;
- return newtypes;
+
+ /* Replace the passed-in pointer */
+ *p_types = newtypes;
+ return 0;
}
static npy_bool
@@ -247,10 +255,13 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
*/
if (descr->f->cancastto == NULL) {
descr->f->cancastto = (int *)malloc(1*sizeof(int));
+ if (descr->f->cancastto == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
descr->f->cancastto[0] = NPY_NOTYPE;
}
- descr->f->cancastto = _append_new(descr->f->cancastto,
- totype);
+ return _append_new(&descr->f->cancastto, totype);
}
else {
/* register with cancastscalarkindto */
@@ -258,6 +269,10 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
int i;
descr->f->cancastscalarkindto =
(int **)malloc(NPY_NSCALARKINDS* sizeof(int*));
+ if (descr->f->cancastscalarkindto == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
for (i = 0; i < NPY_NSCALARKINDS; i++) {
descr->f->cancastscalarkindto[i] = NULL;
}
@@ -265,11 +280,13 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
if (descr->f->cancastscalarkindto[scalar] == NULL) {
descr->f->cancastscalarkindto[scalar] =
(int *)malloc(1*sizeof(int));
+ if (descr->f->cancastscalarkindto[scalar] == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
descr->f->cancastscalarkindto[scalar][0] =
NPY_NOTYPE;
}
- descr->f->cancastscalarkindto[scalar] =
- _append_new(descr->f->cancastscalarkindto[scalar], totype);
+ return _append_new(&descr->f->cancastscalarkindto[scalar], totype);
}
- return 0;
}
diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c
index c2bd28d60..84af86009 100644
--- a/numpy/core/src/npymath/halffloat.c
+++ b/numpy/core/src/npymath/halffloat.c
@@ -301,15 +301,23 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
npy_set_floatstatus_underflow();
}
#endif
+ /*
+ * Usually the significand is shifted by 13. For subnormals an
+ * additional shift needs to occur. This shift is one for the largest
+ * exponent giving a subnormal `f_exp = 0x38000000 >> 23 = 112`, which
+ * offsets the new first bit. At most the shift can be 1+10 bits.
+ */
f_sig >>= (113 - f_exp);
/* Handle rounding by adding 1 to the bit beyond half precision */
#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
* If the last bit in the half significand is 0 (already even), and
* the remaining bit pattern is 1000...0, then we do not add one
- * to the bit after the half significand. In all other cases, we do.
+ * to the bit after the half significand. However, the (113 - f_exp)
+ * shift can lose up to 11 bits, so the || checks them in the original.
+ * In all other cases, we can just add one.
*/
- if ((f_sig&0x00003fffu) != 0x00001000u) {
+ if (((f_sig&0x00003fffu) != 0x00001000u) || (f&0x000007ffu)) {
f_sig += 0x00001000u;
}
#else
@@ -416,7 +424,16 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
npy_set_floatstatus_underflow();
}
#endif
- d_sig >>= (1009 - d_exp);
+ /*
+ * Unlike floats, doubles have enough room to shift left to align
+ * the subnormal significand leading to no loss of the last bits.
+ * The smallest possible exponent giving a subnormal is:
+ * `d_exp = 0x3e60000000000000 >> 52 = 998`. All larger subnormals are
+ * shifted with respect to it. This adds a shift of 10+1 bits the final
+ * right shift when comparing it to the one in the normal branch.
+ */
+ assert(d_exp - 998 >= 0);
+ d_sig <<= (d_exp - 998);
/* Handle rounding by adding 1 to the bit beyond half precision */
#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
@@ -424,13 +441,13 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
* the remaining bit pattern is 1000...0, then we do not add one
* to the bit after the half significand. In all other cases, we do.
*/
- if ((d_sig&0x000007ffffffffffULL) != 0x0000020000000000ULL) {
- d_sig += 0x0000020000000000ULL;
+ if ((d_sig&0x003fffffffffffffULL) != 0x0010000000000000ULL) {
+ d_sig += 0x0010000000000000ULL;
}
#else
- d_sig += 0x0000020000000000ULL;
+ d_sig += 0x0010000000000000ULL;
#endif
- h_sig = (npy_uint16) (d_sig >> 42);
+ h_sig = (npy_uint16) (d_sig >> 53);
/*
* If the rounding causes a bit to spill into h_exp, it will
* increment h_exp from zero to one and h_sig will be zero.
diff --git a/numpy/core/src/umath/_struct_ufunc_tests.c.src b/numpy/core/src/umath/_struct_ufunc_tests.c.src
index b831d5c2a..5c6e235e0 100644
--- a/numpy/core/src/umath/_struct_ufunc_tests.c.src
+++ b/numpy/core/src/umath/_struct_ufunc_tests.c.src
@@ -114,6 +114,7 @@ PyMODINIT_FUNC init_struct_ufunc_tests(void)
dtypes,
NULL);
+ Py_DECREF(dtype);
d = PyModule_GetDict(m);
PyDict_SetItemString(d, "add_triplet", add_triplet);
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 8cb74f177..6c3bcce71 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -564,7 +564,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
core_dim_sizes = Py_None;
}
Py_DECREF(f);
- return Py_BuildValue("iOOOO", core_enabled, core_num_dims,
+ return Py_BuildValue("iNNNN", core_enabled, core_num_dims,
core_dim_ixs, core_dim_flags, core_dim_sizes);
fail:
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index ae3ece77b..975a5e6b8 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1603,7 +1603,7 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, voi
else {
if (in2 == 0) {
npy_set_floatstatus_divbyzero();
- *((npy_timedelta *)op1) = 0;
+ *((npy_timedelta *)op1) = NPY_DATETIME_NAT;
}
else {
/* handle mixed case the way Python does */
@@ -1619,6 +1619,62 @@ TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, voi
}
}
+NPY_NO_EXPORT void
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const npy_timedelta in1 = *(npy_timedelta *)ip1;
+ const npy_timedelta in2 = *(npy_timedelta *)ip2;
+ if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+ npy_set_floatstatus_invalid();
+ *((npy_int64 *)op1) = 0;
+ }
+ else if (in2 == 0) {
+ npy_set_floatstatus_divbyzero();
+ *((npy_int64 *)op1) = 0;
+ }
+ else {
+ if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) {
+ *((npy_int64 *)op1) = in1/in2 - 1;
+ }
+ else {
+ *((npy_int64 *)op1) = in1/in2;
+ }
+ }
+ }
+}
+
+NPY_NO_EXPORT void
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_TWO_OUT {
+ const npy_timedelta in1 = *(npy_timedelta *)ip1;
+ const npy_timedelta in2 = *(npy_timedelta *)ip2;
+ if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+ npy_set_floatstatus_invalid();
+ *((npy_int64 *)op1) = 0;
+ *((npy_timedelta *)op2) = NPY_DATETIME_NAT;
+ }
+ else if (in2 == 0) {
+ npy_set_floatstatus_divbyzero();
+ *((npy_int64 *)op1) = 0;
+ *((npy_timedelta *)op2) = NPY_DATETIME_NAT;
+ }
+ else {
+ const npy_int64 quo = in1 / in2;
+ const npy_timedelta rem = in1 % in2;
+ if ((in1 > 0) == (in2 > 0) || rem == 0) {
+ *((npy_int64 *)op1) = quo;
+ *((npy_timedelta *)op2) = rem;
+ }
+ else {
+ *((npy_int64 *)op1) = quo - 1;
+ *((npy_timedelta *)op2) = rem + in2;
+ }
+ }
+ }
+}
+
/*
*****************************************************************************
** FLOAT LOOPS **
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 9b6327308..5264a6533 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -474,8 +474,14 @@ NPY_NO_EXPORT void
TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+NPY_NO_EXPORT void
TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
/* Special case equivalents to above functions */
#define TIMEDELTA_mq_m_true_divide TIMEDELTA_mq_m_divide
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 6d04ce372..791d3693f 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -186,7 +186,6 @@ conform_reduce_result(int ndim, npy_bool *axis_flags,
return NULL;
}
- Py_INCREF(ret);
if (PyArray_SetWritebackIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) {
Py_DECREF(ret);
Py_DECREF(ret_copy);
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index a2df58698..1ab48bb90 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -3063,6 +3063,8 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
Py_XDECREF(axis);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
+ PyArray_free(remap_axis_memory);
+ PyArray_free(remap_axis);
NPY_UF_DBG_PRINT1("Returning code %d\n", retval);
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index ec60d9cfd..c2d81fc5d 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1114,7 +1114,16 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
}
out_dtypes[1] = out_dtypes[0];
Py_INCREF(out_dtypes[1]);
+
+ /*
+ * TODO: split function into truediv and floordiv resolvers
+ */
+ if (strcmp(ufunc->name, "floor_divide") == 0) {
+ out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG);
+ }
+ else {
out_dtypes[2] = PyArray_DescrFromType(NPY_DOUBLE);
+ }
if (out_dtypes[2] == NULL) {
Py_DECREF(out_dtypes[0]);
out_dtypes[0] = NULL;
@@ -2247,3 +2256,52 @@ type_tuple_type_resolver(PyUFuncObject *self,
return -1;
}
+
+NPY_NO_EXPORT int
+PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
+{
+ int type_num1, type_num2;
+ int i;
+
+ type_num1 = PyArray_DESCR(operands[0])->type_num;
+ type_num2 = PyArray_DESCR(operands[1])->type_num;
+
+ /* Use the default when datetime and timedelta are not involved */
+ if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
+ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
+ }
+ if (type_num1 == NPY_TIMEDELTA) {
+ if (type_num2 == NPY_TIMEDELTA) {
+ out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]),
+ PyArray_DESCR(operands[1]));
+ out_dtypes[1] = out_dtypes[0];
+ Py_INCREF(out_dtypes[1]);
+ out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG);
+ Py_INCREF(out_dtypes[2]);
+ out_dtypes[3] = out_dtypes[0];
+ Py_INCREF(out_dtypes[3]);
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
+
+ /* Check against the casting rules */
+ if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
+ for (i = 0; i < 4; ++i) {
+ Py_DECREF(out_dtypes[i]);
+ out_dtypes[i] = NULL;
+ }
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index 2f37af753..78313b1ef 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -99,6 +99,13 @@ PyUFunc_RemainderTypeResolver(PyUFuncObject *ufunc,
PyObject *type_tup,
PyArray_Descr **out_dtypes);
+NPY_NO_EXPORT int
+PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
/*
* Does a linear search for the best inner loop of the ufunc.
*
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 5de19fec2..b334a89da 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -174,11 +174,17 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
char *docstr, *newdocstr;
#if defined(NPY_PY3K)
+ PyObject *tmp;
+
if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
&PyUnicode_Type, &str)) {
return NULL;
}
- docstr = PyBytes_AS_STRING(PyUnicode_AsUTF8String(str));
+ tmp = PyUnicode_AsUTF8String(str);
+ if (tmp == NULL) {
+ return NULL;
+ }
+ docstr = PyBytes_AS_STRING(tmp);
#else
if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
&PyString_Type, &str)) {
@@ -190,6 +196,9 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (NULL != ufunc->doc) {
PyErr_SetString(PyExc_ValueError,
"Cannot change docstring of ufunc with non-NULL docstring");
+#if defined(NPY_PY3K)
+ Py_DECREF(tmp);
+#endif
return NULL;
}
@@ -203,6 +212,9 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
strcpy(newdocstr, docstr);
ufunc->doc = newdocstr;
+#if defined(NPY_PY3K)
+ Py_DECREF(tmp);
+#endif
Py_RETURN_NONE;
}
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 7a858d2e2..f2b8fdca7 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -90,6 +90,7 @@ class TestArrayRepr(object):
assert_equal(repr(x),
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
assert_equal(str(x), '...')
+ x[()] = 0 # resolve circular references for garbage collector
# nested 0d-subclass-object
x = sub(None)
@@ -124,11 +125,13 @@ class TestArrayRepr(object):
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
+ arr0d[()] = 0 # resolve recursion for garbage collector
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
+ arr1d[1] = 0 # resolve recursion for garbage collector
first = np.array(None)
second = np.array(None)
@@ -136,6 +139,7 @@ class TestArrayRepr(object):
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
+ first[()] = 0 # resolve circular references for garbage collector
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index b2ce0402a..9832b4275 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1081,6 +1081,133 @@ class TestDateTime(object):
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
+ @pytest.mark.parametrize("op1, op2, exp", [
+ # m8 same units round down
+ (np.timedelta64(7, 's'),
+ np.timedelta64(4, 's'),
+ 1),
+ # m8 same units round down with negative
+ (np.timedelta64(7, 's'),
+ np.timedelta64(-4, 's'),
+ -2),
+ # m8 same units negative no round down
+ (np.timedelta64(8, 's'),
+ np.timedelta64(-4, 's'),
+ -2),
+ # m8 different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(31, 's'),
+ 1),
+ # m8 generic units
+ (np.timedelta64(1890),
+ np.timedelta64(31),
+ 60),
+ # Y // M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64('13', 'M'),
+ 1),
+ # handle 1D arrays
+ (np.array([1, 2, 3], dtype='m8'),
+ np.array([2], dtype='m8'),
+ np.array([0, 1, 1], dtype=np.int64)),
+ ])
+ def test_timedelta_floor_divide(self, op1, op2, exp):
+ assert_equal(op1 // op2, exp)
+
+ @pytest.mark.parametrize("op1, op2", [
+ # div by 0
+ (np.timedelta64(10, 'us'),
+ np.timedelta64(0, 'us')),
+ # div with NaT
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'us')),
+ # special case for int64 min
+ # in integer floor division
+ (np.timedelta64(np.iinfo(np.int64).min),
+ np.timedelta64(-1)),
+ ])
+ def test_timedelta_floor_div_warnings(self, op1, op2):
+ with assert_warns(RuntimeWarning):
+ actual = op1 // op2
+ assert_equal(actual, 0)
+ assert_equal(actual.dtype, np.int64)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # the smallest integer that can't be represented
+ # exactly in a double should be preserved if we avoid
+ # casting to double in floordiv operation
+ (9007199254740993, 1),
+ # stress the alternate floordiv code path where
+ # operand signs don't match and remainder isn't 0
+ (9007199254740999, -2),
+ ])
+ def test_timedelta_floor_div_precision(self, val1, val2):
+ op1 = np.timedelta64(val1)
+ op2 = np.timedelta64(val2)
+ actual = op1 // op2
+ # Python reference integer floor
+ expected = val1 // val2
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # years and months sometimes can't be unambiguously
+ # divided for floor division operation
+ (np.timedelta64(7, 'Y'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(7, 'M'),
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_floor_div_error(self, val1, val2):
+ with assert_raises_regex(TypeError, "common metadata divisor"):
+ val1 // val2
+
+ @pytest.mark.parametrize("op1, op2", [
+ # reuse the test cases from floordiv
+ (np.timedelta64(7, 's'),
+ np.timedelta64(4, 's')),
+ # m8 same units round down with negative
+ (np.timedelta64(7, 's'),
+ np.timedelta64(-4, 's')),
+ # m8 same units negative no round down
+ (np.timedelta64(8, 's'),
+ np.timedelta64(-4, 's')),
+ # m8 different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(31, 's')),
+ # m8 generic units
+ (np.timedelta64(1890),
+ np.timedelta64(31)),
+ # Y // M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64('13', 'M')),
+ # handle 1D arrays
+ (np.array([1, 2, 3], dtype='m8'),
+ np.array([2], dtype='m8')),
+ ])
+ def test_timedelta_divmod(self, op1, op2):
+ expected = (op1 // op2, op1 % op2)
+ assert_equal(divmod(op1, op2), expected)
+
+ @pytest.mark.parametrize("op1, op2", [
+ # reuse cases from floordiv
+ # div by 0
+ (np.timedelta64(10, 'us'),
+ np.timedelta64(0, 'us')),
+ # div with NaT
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'us')),
+ # special case for int64 min
+ # in integer floor division
+ (np.timedelta64(np.iinfo(np.int64).min),
+ np.timedelta64(-1)),
+ ])
+ def test_timedelta_divmod_warnings(self, op1, op2):
+ with assert_warns(RuntimeWarning):
+ expected = (op1 // op2, op1 % op2)
+ with assert_warns(RuntimeWarning):
+ actual = divmod(op1, op2)
+ assert_equal(actual, expected)
+
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
@@ -1111,8 +1238,6 @@ class TestDateTime(object):
assert_equal(tda / tdd, 60.0)
assert_equal(tdd / tda, 1.0 / 60.0)
- # m8 // m8
- assert_raises(TypeError, np.floor_divide, tda, tdb)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
@@ -1680,7 +1805,7 @@ class TestDateTime(object):
def test_timedelta_modulus_div_by_zero(self):
with assert_warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
- assert_equal(actual, np.timedelta64(0, 's'))
+ assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
# cases where one operand is not
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index c55751e3c..8f371197c 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -4,10 +4,12 @@ import sys
import operator
import pytest
import ctypes
+import gc
import numpy as np
from numpy.core._rational_tests import rational
-from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.core.numeric import pickle
def assert_dtype_equal(a, b):
@@ -446,6 +448,173 @@ class TestSubarray(object):
assert_equal(t1.alignment, t2.alignment)
+def iter_struct_object_dtypes():
+ """
+ Iterates over a few complex dtypes and object pattern which
+ fill the array with a given object (defaults to a singleton).
+
+ Yields
+ ------
+ dtype : dtype
+ pattern : tuple
+ Structured tuple for use with `np.array`.
+ count : int
+ Number of objects stored in the dtype.
+ singleton : object
+ A singleton object. The returned pattern is constructed so that
+ all objects inside the datatype are set to the singleton.
+ """
+ obj = object()
+
+ dt = np.dtype([('b', 'O', (2, 3))])
+ p = ([[obj] * 3] * 2,)
+ yield pytest.param(dt, p, 6, obj, id="<subarray>")
+
+ dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
+ p = (0, [[obj] * 3] * 2)
+ yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
+
+ dt = np.dtype([('a', 'i4'),
+ ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
+ p = (0, [[(obj, 0)] * 3] * 2)
+ yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
+
+ dt = np.dtype([('a', 'i4'),
+ ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
+ p = (0, [[(obj, obj)] * 3] * 2)
+ yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestStructuredObjectRefcounting:
+ """These tests cover various uses of complicated structured types which
+ include objects and thus require reference counting.
+ """
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ @pytest.mark.parametrize(["creation_func", "creation_obj"], [
+ pytest.param(np.empty, None,
+ # None is probably used for too many things
+ marks=pytest.mark.skip("unreliable due to python's behaviour")),
+ (np.ones, 1),
+ (np.zeros, 0)])
+ def test_structured_object_create_delete(self, dt, pat, count, singleton,
+ creation_func, creation_obj):
+ """Structured object reference counting in creation and deletion"""
+ # The test assumes that 0, 1, and None are singletons.
+ gc.collect()
+ before = sys.getrefcount(creation_obj)
+ arr = creation_func(3, dt)
+
+ now = sys.getrefcount(creation_obj)
+ assert now - before == count * 3
+ del arr
+ now = sys.getrefcount(creation_obj)
+ assert now == before
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ def test_structured_object_item_setting(self, dt, pat, count, singleton):
+ """Structured object reference counting for simple item setting"""
+ one = 1
+
+ gc.collect()
+ before = sys.getrefcount(singleton)
+ arr = np.array([pat] * 3, dt)
+ assert sys.getrefcount(singleton) - before == count * 3
+ # Fill with `1` and check that it was replaced correctly:
+ before2 = sys.getrefcount(one)
+ arr[...] = one
+ after2 = sys.getrefcount(one)
+ assert after2 - before2 == count * 3
+ del arr
+ gc.collect()
+ assert sys.getrefcount(one) == before2
+ assert sys.getrefcount(singleton) == before
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ @pytest.mark.parametrize(
+ ['shape', 'index', 'items_changed'],
+ [((3,), ([0, 2],), 2),
+ ((3, 2), ([0, 2], slice(None)), 4),
+ ((3, 2), ([0, 2], [1]), 2),
+ ((3,), ([True, False, True]), 2)])
+ def test_structured_object_indexing(self, shape, index, items_changed,
+ dt, pat, count, singleton):
+ """Structured object reference counting for advanced indexing."""
+ zero = 0
+ one = 1
+
+ arr = np.zeros(shape, dt)
+
+ gc.collect()
+ before_zero = sys.getrefcount(zero)
+ before_one = sys.getrefcount(one)
+ # Test item getting:
+ part = arr[index]
+ after_zero = sys.getrefcount(zero)
+ assert after_zero - before_zero == count * items_changed
+ del part
+ # Test item setting:
+ arr[index] = one
+ gc.collect()
+ after_zero = sys.getrefcount(zero)
+ after_one = sys.getrefcount(one)
+ assert before_zero - after_zero == count * items_changed
+ assert after_one - before_one == count * items_changed
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
+ """Structured object reference counting for specialized functions.
+ The older functions such as take and repeat use different code paths
+ then item setting (when writing this).
+ """
+ indices = [0, 1]
+
+ arr = np.array([pat] * 3, dt)
+ gc.collect()
+ before = sys.getrefcount(singleton)
+ res = arr.take(indices)
+ after = sys.getrefcount(singleton)
+ assert after - before == count * 2
+ new = res.repeat(10)
+ gc.collect()
+ after_repeat = sys.getrefcount(singleton)
+ assert after_repeat - after == count * 2 * 10
+
+
+class TestStructuredDtypeSparseFields(object):
+ """Tests subarray fields which contain sparse dtypes so that
+ not all memory is used by the dtype work. Such dtype's should
+ leave the underlying memory unchanged.
+ """
+ dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
+ 'offsets':[0, 4]}, (2, 3))])
+ sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
+ 'offsets':[4]}, (2, 3))])
+
+ @pytest.mark.xfail(reason="inaccessible data is changed see gh-12686.")
+ @pytest.mark.valgrind_error(reason="reads from unitialized buffers.")
+ def test_sparse_field_assignment(self):
+ arr = np.zeros(3, self.dtype)
+ sparse_arr = arr.view(self.sparse_dtype)
+
+ sparse_arr[...] = np.finfo(np.float32).max
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+ def test_sparse_field_assignment_fancy(self):
+ # Fancy assignment goes to the copyswap function for comlex types:
+ arr = np.zeros(3, self.dtype)
+ sparse_arr = arr.view(self.sparse_dtype)
+
+ sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+
class TestMonsterType(object):
"""Test deeply nested subtypes."""
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index b28c933db..770712501 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -69,6 +69,85 @@ class TestHalf(object):
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
+ @pytest.mark.parametrize("offset", [None, "up", "down"])
+ @pytest.mark.parametrize("shift", [None, "up", "down"])
+ @pytest.mark.parametrize("float_t", [np.float32, np.float64])
+ def test_half_conversion_rounding(self, float_t, shift, offset):
+ # Assumes that round to even is used during casting.
+ max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
+
+ # Test all (positive) finite numbers, denormals are most interesting
+ # however:
+ f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
+ f16s_float = f16s_patterns.view(np.float16).astype(float_t)
+
+ # Shift the values by half a bit up or a down (or do not shift),
+ if shift == "up":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
+ elif shift == "down":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
+ else:
+ f16s_float = f16s_float[1:-1]
+
+ # Increase the float by a minimal value:
+ if offset == "up":
+ f16s_float = np.nextafter(f16s_float, float_t(1e50))
+ elif offset == "down":
+ f16s_float = np.nextafter(f16s_float, float_t(-1e50))
+
+ # Convert back to float16 and its bit pattern:
+ res_patterns = f16s_float.astype(np.float16).view(np.uint16)
+
+ # The above calculations tries the original values, or the exact
+ # mid points between the float16 values. It then further offsets them
+ # by as little as possible. If no offset occurs, "round to even"
+ # logic will be necessary, an arbitrarily small offset should cause
+ # normal up/down rounding always.
+
+ # Calculate the expecte pattern:
+ cmp_patterns = f16s_patterns[1:-1].copy()
+
+ if shift == "down" and offset != "up":
+ shift_pattern = -1
+ elif shift == "up" and offset != "down":
+ shift_pattern = 1
+ else:
+ # There cannot be a shift, either shift is None, so all rounding
+ # will go back to original, or shift is reduced by offset too much.
+ shift_pattern = 0
+
+ # If rounding occurs, is it normal rounding or round to even?
+ if offset is None:
+ # Round to even occurs, modify only non-even, cast to allow + (-1)
+ cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
+ else:
+ cmp_patterns.view(np.int16)[...] += shift_pattern
+
+ assert_equal(res_patterns, cmp_patterns)
+
+ @pytest.mark.parametrize(["float_t", "uint_t", "bits"],
+ [(np.float32, np.uint32, 23),
+ (np.float64, np.uint64, 52)])
+ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
+ # Test specifically that all bits are considered when deciding
+ # whether round to even should occur (i.e. no bits are lost at the
+ # end. Compare also gh-12721. The most bits can get lost for the
+ # smallest denormal:
+ smallest_value = np.uint16(1).view(np.float16).astype(float_t)
+ assert smallest_value == 2**-24
+
+ # Will be rounded to zero based on round to even rule:
+ rounded_to_zero = smallest_value / float_t(2)
+ assert rounded_to_zero.astype(np.float16) == 0
+
+ # The significand will be all 0 for the float_t, test that we do not
+ # lose the lower ones of these:
+ for i in range(bits):
+ # slightly increasing the value should make it round up:
+ larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
+ larger_value = larger_pattern.view(float_t)
+ assert larger_value.astype(np.float16) == smallest_value
+
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 06cabe2cb..8a196308c 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -3769,10 +3769,16 @@ class TestPickling(object):
('c', float)])
]
+ refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
+ del a, DATA, carray
+ gc.collect()
+ # check for reference leaks (gh-12793)
+ for ref in refs:
+ assert ref() is None
def _loads(self, obj):
if sys.version_info[0] >= 3:
@@ -7009,12 +7015,11 @@ class TestArrayAttributeDeletion(object):
assert_raises(AttributeError, delattr, a, s)
-def test_array_interface():
- # Test scalar coercion within the array interface
+class TestArrayInterface():
class Foo(object):
def __init__(self, value):
self.value = value
- self.iface = {'typestr': '=f8'}
+ self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@@ -7023,22 +7028,39 @@ def test_array_interface():
def __array_interface__(self):
return self.iface
+
f = Foo(0.5)
- assert_equal(np.array(f), 0.5)
- assert_equal(np.array([f]), [0.5])
- assert_equal(np.array([f, f]), [0.5, 0.5])
- assert_equal(np.array(f).dtype, np.dtype('=f8'))
- # Test various shape definitions
- f.iface['shape'] = ()
- assert_equal(np.array(f), 0.5)
- f.iface['shape'] = None
- assert_raises(TypeError, np.array, f)
- f.iface['shape'] = (1, 1)
- assert_equal(np.array(f), [[0.5]])
- f.iface['shape'] = (2,)
- assert_raises(ValueError, np.array, f)
-
- # test scalar with no shape
+
+ @pytest.mark.parametrize('val, iface, expected', [
+ (f, {}, 0.5),
+ ([f], {}, [0.5]),
+ ([f, f], {}, [0.5, 0.5]),
+ (f, {'shape': ()}, 0.5),
+ (f, {'shape': None}, TypeError),
+ (f, {'shape': (1, 1)}, [[0.5]]),
+ (f, {'shape': (2,)}, ValueError),
+ (f, {'strides': ()}, 0.5),
+ (f, {'strides': (2,)}, ValueError),
+ (f, {'strides': 16}, TypeError),
+ ])
+ def test_scalar_interface(self, val, iface, expected):
+ # Test scalar coercion within the array interface
+ self.f.iface = {'typestr': 'f8'}
+ self.f.iface.update(iface)
+ if HAS_REFCOUNT:
+ pre_cnt = sys.getrefcount(np.dtype('f8'))
+ if isinstance(expected, type):
+ assert_raises(expected, np.array, val)
+ else:
+ result = np.array(val)
+ assert_equal(np.array(val), expected)
+ assert result.dtype == 'f8'
+ del result
+ if HAS_REFCOUNT:
+ post_cnt = sys.getrefcount(np.dtype('f8'))
+ assert_equal(pre_cnt, post_cnt)
+
+def test_interface_no_shape():
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
@@ -7211,6 +7233,7 @@ class TestConversion(object):
except NameError:
Error = RuntimeError # python < 3.5
assert_raises(Error, bool, self_containing) # previously stack overflow
+ self_containing[0] = None # resolve circular reference
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
@@ -7712,6 +7735,8 @@ class TestWritebackIfCopy(object):
# uses arr_insert
np.place(a, a>2, [44, 55])
assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
+ # hit one of the failing paths
+ assert_raises(ValueError, np.place, a, a>20, [])
def test_put_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
@@ -8068,3 +8093,43 @@ def test_getfield():
pytest.raises(ValueError, a.getfield, 'uint8', -1)
pytest.raises(ValueError, a.getfield, 'uint8', 16)
pytest.raises(ValueError, a.getfield, 'uint64', 0)
+
+def test_multiarray_module():
+ # gh-12736
+ # numpy 1.16 replaced the multiarray and umath c-extension modules with
+ # a single _multiarray_umath one. For backward compatibility, it added a
+ # pure-python multiarray.py and umath.py shim so people can still do
+ # from numpy.core.multirarray import something-public-api
+ # It turns out pip can leave old pieces of previous versions of numpy
+ # around when installing a newer version. If the old c-extension modules
+ # are found, they will be given precedence over the new pure-python ones.
+ #
+ # This test copies a multiarray c-extension in parallel with the pure-
+ # python one, and starts another python interpreter to load multiarray.
+ # The expectation is that import will fail.
+ import subprocess, shutil
+ core_dir = os.path.dirname(np.core.multiarray.__file__)
+ cextension = np.core._multiarray_umath.__file__
+ testfile = cextension.replace('_multiarray_umath', '_multiarray_module_test')
+ badfile = cextension.replace('_multiarray_umath', 'multiarray')
+ assert not os.path.exists(badfile), '%s exists, this numpy ' \
+ 'installation is faulty' % badfile
+ try:
+ shutil.copy(testfile, badfile)
+ p = subprocess.Popen([sys.executable, '-c', 'import numpy' ],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ env=os.environ.copy())
+ stdout, stderr = p.communicate()
+ r = p.wait()
+ #print(stdout.decode())
+ #print(stderr.decode())
+ assert r != 0
+ assert b'ImportError' in stderr
+ finally:
+ if os.path.exists(badfile):
+ try:
+ # can this fail?
+ os.remove(badfile)
+ except:
+ print("Could not remove %s, remove it by hand" % badfile)
+ raise
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 62b2a3e53..8f1c16539 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -7,7 +7,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex)
from numpy.core.overrides import (
- get_overloaded_types_and_args, array_function_dispatch,
+ _get_implementing_args, array_function_dispatch,
verify_matching_signatures, ENABLE_ARRAY_FUNCTION)
from numpy.core.numeric import pickle
import pytest
@@ -18,11 +18,6 @@ requires_array_function = pytest.mark.skipif(
reason="__array_function__ dispatch not enabled.")
-def _get_overloaded_args(relevant_args):
- types, args = get_overloaded_types_and_args(relevant_args)
- return args
-
-
def _return_not_implemented(self, *args, **kwargs):
return NotImplemented
@@ -41,26 +36,21 @@ def dispatched_two_arg(array1, array2):
@requires_array_function
-class TestGetOverloadedTypesAndArgs(object):
+class TestGetImplementingArgs(object):
def test_ndarray(self):
array = np.array(1)
- types, args = get_overloaded_types_and_args([array])
- assert_equal(set(types), {np.ndarray})
+ args = _get_implementing_args([array])
assert_equal(list(args), [array])
- types, args = get_overloaded_types_and_args([array, array])
- assert_equal(len(types), 1)
- assert_equal(set(types), {np.ndarray})
+ args = _get_implementing_args([array, array])
assert_equal(list(args), [array])
- types, args = get_overloaded_types_and_args([array, 1])
- assert_equal(set(types), {np.ndarray})
+ args = _get_implementing_args([array, 1])
assert_equal(list(args), [array])
- types, args = get_overloaded_types_and_args([1, array])
- assert_equal(set(types), {np.ndarray})
+ args = _get_implementing_args([1, array])
assert_equal(list(args), [array])
def test_ndarray_subclasses(self):
@@ -75,17 +65,14 @@ class TestGetOverloadedTypesAndArgs(object):
override_sub = np.array(1).view(OverrideSub)
no_override_sub = np.array(1).view(NoOverrideSub)
- types, args = get_overloaded_types_and_args([array, override_sub])
- assert_equal(set(types), {np.ndarray, OverrideSub})
+ args = _get_implementing_args([array, override_sub])
assert_equal(list(args), [override_sub, array])
- types, args = get_overloaded_types_and_args([array, no_override_sub])
- assert_equal(set(types), {np.ndarray, NoOverrideSub})
+ args = _get_implementing_args([array, no_override_sub])
assert_equal(list(args), [no_override_sub, array])
- types, args = get_overloaded_types_and_args(
+ args = _get_implementing_args(
[override_sub, no_override_sub])
- assert_equal(set(types), {OverrideSub, NoOverrideSub})
assert_equal(list(args), [override_sub, no_override_sub])
def test_ndarray_and_duck_array(self):
@@ -96,12 +83,10 @@ class TestGetOverloadedTypesAndArgs(object):
array = np.array(1)
other = Other()
- types, args = get_overloaded_types_and_args([other, array])
- assert_equal(set(types), {np.ndarray, Other})
+ args = _get_implementing_args([other, array])
assert_equal(list(args), [other, array])
- types, args = get_overloaded_types_and_args([array, other])
- assert_equal(set(types), {np.ndarray, Other})
+ args = _get_implementing_args([array, other])
assert_equal(list(args), [array, other])
def test_ndarray_subclass_and_duck_array(self):
@@ -116,9 +101,9 @@ class TestGetOverloadedTypesAndArgs(object):
subarray = np.array(1).view(OverrideSub)
other = Other()
- assert_equal(_get_overloaded_args([array, subarray, other]),
+ assert_equal(_get_implementing_args([array, subarray, other]),
[subarray, array, other])
- assert_equal(_get_overloaded_args([array, other, subarray]),
+ assert_equal(_get_implementing_args([array, other, subarray]),
[subarray, array, other])
def test_many_duck_arrays(self):
@@ -140,15 +125,26 @@ class TestGetOverloadedTypesAndArgs(object):
c = C()
d = D()
- assert_equal(_get_overloaded_args([1]), [])
- assert_equal(_get_overloaded_args([a]), [a])
- assert_equal(_get_overloaded_args([a, 1]), [a])
- assert_equal(_get_overloaded_args([a, a, a]), [a])
- assert_equal(_get_overloaded_args([a, d, a]), [a, d])
- assert_equal(_get_overloaded_args([a, b]), [b, a])
- assert_equal(_get_overloaded_args([b, a]), [b, a])
- assert_equal(_get_overloaded_args([a, b, c]), [b, c, a])
- assert_equal(_get_overloaded_args([a, c, b]), [c, b, a])
+ assert_equal(_get_implementing_args([1]), [])
+ assert_equal(_get_implementing_args([a]), [a])
+ assert_equal(_get_implementing_args([a, 1]), [a])
+ assert_equal(_get_implementing_args([a, a, a]), [a])
+ assert_equal(_get_implementing_args([a, d, a]), [a, d])
+ assert_equal(_get_implementing_args([a, b]), [b, a])
+ assert_equal(_get_implementing_args([b, a]), [b, a])
+ assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
+ assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
+
+ def test_too_many_duck_arrays(self):
+ namespace = dict(__array_function__=_return_not_implemented)
+ types = [type('A' + str(i), (object,), namespace) for i in range(33)]
+ relevant_args = [t() for t in types]
+
+ actual = _get_implementing_args(relevant_args[:32])
+ assert_equal(actual, relevant_args[:32])
+
+ with assert_raises_regex(TypeError, 'distinct argument types'):
+ _get_implementing_args(relevant_args)
@requires_array_function
@@ -201,6 +197,14 @@ class TestNDArrayArrayFunction(object):
result = np.concatenate((array, override_sub))
assert_equal(result, expected.view(OverrideSub))
+ def test_no_wrapper(self):
+ array = np.array(1)
+ func = dispatched_one_arg.__wrapped__
+ with assert_raises_regex(AttributeError, '__wrapped__'):
+ array.__array_function__(func=func,
+ types=(np.ndarray,),
+ args=(array,), kwargs={})
+
@requires_array_function
class TestArrayFunctionDispatch(object):
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 2421a1161..472a83696 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -46,7 +46,7 @@ class TestRegression(object):
assert_array_equal(a, b)
def test_typeNA(self):
- # Issue gh-515
+ # Issue gh-515
with suppress_warnings() as sup:
sup.filter(np.VisibleDeprecationWarning)
assert_equal(np.typeNA[np.int64], 'Int64')
@@ -2411,7 +2411,41 @@ class TestRegression(object):
if HAS_REFCOUNT:
assert_(base <= sys.getrefcount(s))
+ @pytest.mark.parametrize('val', [
+ # arrays and scalars
+ np.ones((10, 10), dtype='int32'),
+ np.uint64(10),
+ ])
+ @pytest.mark.parametrize('protocol',
+ range(2, pickle.HIGHEST_PROTOCOL + 1)
+ )
+ def test_pickle_module(self, protocol, val):
+ # gh-12837
+ s = pickle.dumps(val, protocol)
+ assert b'_multiarray_umath' not in s
+ if protocol == 5 and len(val.shape) > 0:
+ # unpickling ndarray goes through _frombuffer for protocol 5
+ assert b'numpy.core.numeric' in s
+ else:
+ assert b'numpy.core.multiarray' in s
+
def test_object_casting_errors(self):
# gh-11993
arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
assert_raises(TypeError, arr.astype, 'c8')
+
+ def test_eff1d_casting(self):
+ # gh-12711
+ x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
+ res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+ assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
+ assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20))
+ assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20))
+
+ def test_pickle_datetime64_array(self):
+ # gh-12745 (would fail with pickle5 installed)
+ d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
+ arr = np.array([d])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ dumped = pickle.dumps(arr, protocol=proto)
+ assert_equal(pickle.loads(dumped), arr)
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index ef5c118ec..b996321c2 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -373,6 +373,10 @@ def test_stack():
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
+ # out
+ out = np.zeros_like(r1)
+ np.stack((a, b), out=out)
+ assert_array_equal(out, r1)
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 9eefbc9f4..78aa59ddc 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -346,27 +346,157 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
return klass
-def _get_typecodes():
- """ Return a dictionary mapping __array_interface__ formats to ctypes types """
- ct = ctypes
- simple_types = [
- ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
- ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
- ct.c_float, ct.c_double,
- ]
+if ctypes is not None:
+ def _ctype_ndarray(element_type, shape):
+ """ Create an ndarray of the given element type and shape """
+ for dim in shape[::-1]:
+ element_type = dim * element_type
+ # prevent the type name include np.ctypeslib
+ element_type.__module__ = None
+ return element_type
- return {_dtype(ctype).str: ctype for ctype in simple_types}
+ def _get_scalar_type_map():
+ """
+ Return a dictionary mapping native endian scalar dtype to ctypes types
+ """
+ ct = ctypes
+ simple_types = [
+ ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+ ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+ ct.c_float, ct.c_double,
+ ct.c_bool,
+ ]
+ return {_dtype(ctype): ctype for ctype in simple_types}
-def _ctype_ndarray(element_type, shape):
- """ Create an ndarray of the given element type and shape """
- for dim in shape[::-1]:
- element_type = element_type * dim
- return element_type
+ _scalar_type_map = _get_scalar_type_map()
+
+
+ def _ctype_from_dtype_scalar(dtype):
+ # swapping twice ensure that `=` is promoted to <, >, or |
+ dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+ dtype_native = dtype.newbyteorder('=')
+ try:
+ ctype = _scalar_type_map[dtype_native]
+ except KeyError:
+ raise NotImplementedError(
+ "Converting {!r} to a ctypes type".format(dtype)
+ )
+
+ if dtype_with_endian.byteorder == '>':
+ ctype = ctype.__ctype_be__
+ elif dtype_with_endian.byteorder == '<':
+ ctype = ctype.__ctype_le__
+
+ return ctype
+
+
+ def _ctype_from_dtype_subarray(dtype):
+ element_dtype, shape = dtype.subdtype
+ ctype = _ctype_from_dtype(element_dtype)
+ return _ctype_ndarray(ctype, shape)
+
+
+ def _ctype_from_dtype_structured(dtype):
+ # extract offsets of each field
+ field_data = []
+ for name in dtype.names:
+ field_dtype, offset = dtype.fields[name][:2]
+ field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+ # ctypes doesn't care about field order
+ field_data = sorted(field_data, key=lambda f: f[0])
+
+ if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
+ # union, if multiple fields all at address 0
+ size = 0
+ _fields_ = []
+ for offset, name, ctype in field_data:
+ _fields_.append((name, ctype))
+ size = max(size, ctypes.sizeof(ctype))
+
+ # pad to the right size
+ if dtype.itemsize != size:
+ _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+ # we inserted manual padding, so always `_pack_`
+ return type('union', (ctypes.Union,), dict(
+ _fields_=_fields_,
+ _pack_=1,
+ __module__=None,
+ ))
+ else:
+ last_offset = 0
+ _fields_ = []
+ for offset, name, ctype in field_data:
+ padding = offset - last_offset
+ if padding < 0:
+ raise NotImplementedError("Overlapping fields")
+ if padding > 0:
+ _fields_.append(('', ctypes.c_char * padding))
+
+ _fields_.append((name, ctype))
+ last_offset = offset + ctypes.sizeof(ctype)
+
+
+ padding = dtype.itemsize - last_offset
+ if padding > 0:
+ _fields_.append(('', ctypes.c_char * padding))
+
+ # we inserted manual padding, so always `_pack_`
+ return type('struct', (ctypes.Structure,), dict(
+ _fields_=_fields_,
+ _pack_=1,
+ __module__=None,
+ ))
+
+
+ def _ctype_from_dtype(dtype):
+ if dtype.fields is not None:
+ return _ctype_from_dtype_structured(dtype)
+ elif dtype.subdtype is not None:
+ return _ctype_from_dtype_subarray(dtype)
+ else:
+ return _ctype_from_dtype_scalar(dtype)
+
+
+ def as_ctypes_type(dtype):
+ """
+ Convert a dtype into a ctypes type.
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype to convert
+
+ Returns
+ -------
+ ctypes
+ A ctype scalar, union, array, or struct
+
+ Raises
+ ------
+ NotImplementedError
+ If the conversion is not possible
+
+ Notes
+ -----
+ This function does not losslessly round-trip in either direction.
+
+ ``np.dtype(as_ctypes_type(dt))`` will:
+ - insert padding fields
+ - reorder fields to be sorted by offset
+ - discard field titles
+
+ ``as_ctypes_type(np.dtype(ctype))`` will:
+ - discard the class names of ``Structure``s and ``Union``s
+ - convert single-element ``Union``s into single-element ``Structure``s
+ - insert padding fields
+
+ """
+ return _ctype_from_dtype(_dtype(dtype))
-if ctypes is not None:
- _typecodes = _get_typecodes()
def as_array(obj, shape=None):
"""
@@ -388,6 +518,7 @@ if ctypes is not None:
return array(obj, copy=False)
+
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
@@ -399,7 +530,8 @@ if ctypes is not None:
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
- tp = _ctype_ndarray(_typecodes[ai["typestr"]], ai["shape"])
- result = tp.from_address(addr)
+
+ dtype = _dtype((ai["typestr"], ai["shape"]))
+ result = as_ctypes_type(dtype).from_address(addr)
result.__keep = obj
return result
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 12b32832e..001dea5ec 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -466,10 +466,8 @@ class FCompiler(CCompiler):
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
- f77 = shlex.split(self.command_vars.compiler_f77,
- posix=(os.name == 'posix'))
- f90 = shlex.split(self.command_vars.compiler_f90,
- posix=(os.name == 'posix'))
+ f77 = self.command_vars.compiler_f77
+ f90 = self.command_vars.compiler_f90
f77flags = []
f90flags = []
@@ -477,8 +475,10 @@ class FCompiler(CCompiler):
fixflags = []
if f77:
+ f77 = shlex.split(f77, posix=(os.name == 'posix'))
f77flags = self.flag_vars.f77
if f90:
+ f90 = shlex.split(f90, posix=(os.name == 'posix'))
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
@@ -490,8 +490,8 @@ class FCompiler(CCompiler):
# environment variable has been customized by CI or a user
# should perhaps eventually be more throughly tested and more
# robustly handled
- fix = shlex.split(fix, posix=(os.name == 'posix'))
if fix:
+ fix = shlex.split(fix, posix=(os.name == 'posix'))
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py
index 2c3edfe02..d14fee0e1 100644
--- a/numpy/distutils/fcompiler/absoft.py
+++ b/numpy/distutils/fcompiler/absoft.py
@@ -66,7 +66,7 @@ class AbsoftFCompiler(FCompiler):
def library_dir_option(self, dir):
if os.name=='nt':
- return ['-link', '/PATH:"%s"' % (dir)]
+ return ['-link', '/PATH:%s' % (dir)]
return "-L" + dir
def library_option(self, lib):
diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
index 489784580..4238f35cb 100644
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -1,6 +1,7 @@
from __future__ import division, absolute_import, print_function
import os
+import warnings
from distutils.dist import Distribution
__metaclass__ = type
@@ -54,8 +55,18 @@ class EnvironmentConfig(object):
if envvar is not None:
envvar_contents = os.environ.get(envvar)
if envvar_contents is not None:
- if var and append and os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
- var = var + [envvar_contents]
+ if var and append:
+ if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
+ var = var + [envvar_contents]
+ else:
+ var = envvar_contents
+ if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys():
+ msg = "{} is used as is, not appended ".format(envvar) + \
+ "to flags already defined " + \
+ "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \
+ "to obtain appending behavior instead (this " + \
+ "behavior will become default in a future release)."
+ warnings.warn(msg, UserWarning, stacklevel=3)
else:
var = envvar_contents
if confvar is not None and self._conf:
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 81769e562..965c67041 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -269,8 +269,11 @@ class GnuFCompiler(FCompiler):
# Linux/Solaris/Unix support RPATH, Windows and AIX do not
raise NotImplementedError
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
sep = ',' if sys.platform == 'darwin' else '='
- return '-Wl,-rpath%s"%s"' % (sep, dir)
+ return '-Wl,-rpath%s%s' % (sep, dir)
class Gnu95FCompiler(GnuFCompiler):
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index 217eac8fb..51f681274 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -23,7 +23,10 @@ class BaseIntelFCompiler(FCompiler):
f + '.f', '-o', f + '.o']
def runtime_library_dir_option(self, dir):
- return '-Wl,-rpath="%s"' % dir
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
+ return '-Wl,-rpath=%s' % dir
class IntelFCompiler(BaseIntelFCompiler):
diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
index 99071800a..9c51947fd 100644
--- a/numpy/distutils/fcompiler/pg.py
+++ b/numpy/distutils/fcompiler/pg.py
@@ -33,7 +33,7 @@ class PGroupFCompiler(FCompiler):
'compiler_f77': ["pgfortran"],
'compiler_fix': ["pgfortran", "-Mfixed"],
'compiler_f90': ["pgfortran"],
- 'linker_so': ["pgfortran", "-shared", "-fpic"],
+ 'linker_so': ["pgfortran"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
@@ -56,8 +56,12 @@ class PGroupFCompiler(FCompiler):
def get_flags_linker_so(self):
return ["-dynamic", '-undefined', 'dynamic_lookup']
+ else:
+ def get_flags_linker_so(self):
+ return ["-shared", '-fpic']
+
def runtime_library_dir_option(self, dir):
- return '-R"%s"' % dir
+ return '-R%s' % dir
if sys.version_info >= (3, 5):
diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py
index d477d3308..561ea854f 100644
--- a/numpy/distutils/fcompiler/sun.py
+++ b/numpy/distutils/fcompiler/sun.py
@@ -44,7 +44,7 @@ class SunFCompiler(FCompiler):
return opt
def runtime_library_dir_option(self, dir):
- return '-R"%s"' % dir
+ return '-R%s' % dir
if __name__ == '__main__':
from distutils import log
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index cd63cc849..befe53b3d 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -164,6 +164,17 @@ _bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
+def _c_string_literal(s):
+ """
+ Convert a python string into a literal suitable for inclusion into C code
+ """
+ # only these three characters are forbidden in C strings
+ s = s.replace('\\', r'\\')
+ s = s.replace('"', r'\"')
+ s = s.replace('\n', r'\n')
+ return '"{}"'.format(s)
+
+
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
@@ -1497,7 +1508,7 @@ Make sure that -lgfortran is used for C++ extensions.
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
- 'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
@@ -1518,7 +1529,7 @@ Make sure that -lgfortran is used for C++ extensions.
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
- 'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
])
result = _cached_atlas_version[key] = atlas_version, info
return result
@@ -2063,7 +2074,7 @@ class _numpy_info(system_info):
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
- '"\\"%s\\""' % (vrs)),
+ _c_string_literal(vrs)),
(self.modulename.upper(), None)]
break
dict_append(info, define_macros=macros)
@@ -2268,7 +2279,7 @@ class _pkg_config_info(system_info):
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
- '"\\"%s\\""' % (version)))
+ _c_string_literal(version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py
index 95e44b051..ba19a97ea 100644
--- a/numpy/distutils/tests/test_fcompiler.py
+++ b/numpy/distutils/tests/test_fcompiler.py
@@ -1,6 +1,8 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_
+import pytest
+
+from numpy.testing import assert_, suppress_warnings
import numpy.distutils.fcompiler
customizable_flags = [
@@ -25,6 +27,7 @@ def test_fcompiler_flags(monkeypatch):
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
+
monkeypatch.delenv(envvar)
assert_(new_flags == [new_flag])
@@ -33,12 +36,46 @@ def test_fcompiler_flags(monkeypatch):
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
-
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
+
monkeypatch.delenv(envvar)
if prev_flags is None:
assert_(new_flags == [new_flag])
else:
assert_(new_flags == prev_flags + [new_flag])
+
+def test_fcompiler_flags_append_warning(monkeypatch):
+ # Test to check that the warning for append behavior changing in future
+ # is triggered. Need to use a real compiler instance so that we have
+ # non-empty flags to start with (otherwise the "if var and append" check
+ # will always be false).
+ try:
+ with suppress_warnings() as sup:
+ sup.record()
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
+ fc.customize()
+ except numpy.distutils.fcompiler.CompilerNotFound:
+ pytest.skip("gfortran not found, so can't execute this test")
+
+ # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined
+ monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False)
+
+ for opt, envvar in customizable_flags:
+ new_flag = '-dummy-{}-flag'.format(opt)
+ with suppress_warnings() as sup:
+ sup.record()
+ prev_flags = getattr(fc.flag_vars, opt)
+
+ monkeypatch.setenv(envvar, new_flag)
+ with suppress_warnings() as sup:
+ sup.record()
+ new_flags = getattr(fc.flag_vars, opt)
+ if prev_flags:
+ # Check that warning was issued
+ assert len(sup.log) == 1
+
+ monkeypatch.delenv(envvar)
+ assert_(new_flags == [new_flag])
+
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index 23a4b7c41..d146739bb 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -28,12 +28,16 @@ def compile(source,
extension='.f'
):
"""
- Build extension module from processing source with f2py.
+ Build extension module from a Fortran 77 source string with f2py.
Parameters
----------
- source : str
+ source : str or bytes
Fortran source of module / subroutine to compile
+
+ .. versionchanged:: 1.16.0
+ Accept str as well as bytes
+
modulename : str, optional
The name of the compiled python module
extra_args : str or list, optional
@@ -55,6 +59,16 @@ def compile(source,
.. versionadded:: 1.11.0
+ Returns
+ -------
+ result : int
+ 0 on success
+
+ Examples
+ --------
+ .. include:: compile_session.dat
+ :literal:
+
"""
import tempfile
import shlex
@@ -67,9 +81,11 @@ def compile(source,
else:
fname = source_fn
+ if not isinstance(source, str):
+ source = str(source, 'utf-8')
try:
with open(fname, 'w') as f:
- f.write(str(source))
+ f.write(source)
args = ['-c', '-m', modulename, f.name]
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index 8750ed0b3..47223151f 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -396,8 +396,25 @@ def dict_append(d_out, d_in):
def run_main(comline_list):
- """Run f2py as if string.join(comline_list,' ') is used as a command line.
- In case of using -h flag, return None.
+ """
+ Equivalent to running::
+
+ f2py <args>
+
+ where ``<args>=string.join(<list>,' ')``, but in Python. Unless
+ ``-h`` is used, this function returns a dictionary containing
+ information on generated modules and their dependencies on source
+ files. For example, the command ``f2py -m scalar scalar.f`` can be
+ executed from Python as follows
+
+ You cannot build extension modules with this function, that is,
+ using ``-c`` is not allowed. Use ``compile`` command instead
+
+ Examples
+ --------
+ .. include:: run_main_session.dat
+ :literal:
+
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py
index 74e0804e2..36abf05f9 100644
--- a/numpy/f2py/tests/test_compile_function.py
+++ b/numpy/f2py/tests/test_compile_function.py
@@ -106,3 +106,20 @@ def test_f2py_init_compile_bad_cmd():
assert_equal(ret_val, 127)
finally:
sys.executable = temp
+
+
+@pytest.mark.parametrize('fsource',
+ ['program test_f2py\nend program test_f2py',
+ b'program test_f2py\nend program test_f2py',])
+def test_compile_from_strings(tmpdir, fsource):
+ # Make sure we can compile str and bytes gh-12796
+ cwd = os.getcwd()
+ try:
+ os.chdir(str(tmpdir))
+ ret_val = numpy.f2py.compile(
+ fsource,
+ modulename='test_compile_from_strings',
+ extension='.f90')
+ assert_equal(ret_val, 0)
+ finally:
+ os.chdir(cwd)
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 30237b76f..463266e28 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -545,6 +545,11 @@ class DataSource(object):
is accessible if it exists in either location.
"""
+
+ # First test for local path
+ if os.path.exists(path):
+ return True
+
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
@@ -554,10 +559,6 @@ class DataSource(object):
from urllib2 import urlopen
from urllib2 import URLError
- # Test local path
- if os.path.exists(path):
- return True
-
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index fd64ecbd6..3356904ab 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -94,8 +94,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
# force a 1d array
ary = np.asanyarray(ary).ravel()
- # we have unit tests enforcing
- # propagation of the dtype of input
+ # enforce propagation of the dtype of input
# ary to returned result
dtype_req = ary.dtype
@@ -106,23 +105,22 @@ def ediff1d(ary, to_end=None, to_begin=None):
if to_begin is None:
l_begin = 0
else:
- to_begin = np.asanyarray(to_begin)
- if not np.can_cast(to_begin, dtype_req):
- raise TypeError("dtype of to_begin must be compatible "
- "with input ary")
-
- to_begin = to_begin.ravel()
+ _to_begin = np.asanyarray(to_begin, dtype=dtype_req)
+ if not np.all(_to_begin == to_begin):
+ raise ValueError("cannot convert 'to_begin' to array with dtype "
+ "'%r' as required for input ary" % dtype_req)
+ to_begin = _to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
- to_end = np.asanyarray(to_end)
- if not np.can_cast(to_end, dtype_req):
- raise TypeError("dtype of to_end must be compatible "
- "with input ary")
-
- to_end = to_end.ravel()
+ _to_end = np.asanyarray(to_end, dtype=dtype_req)
+ # check that casting has not overflowed
+ if not np.all(_to_end == to_end):
+ raise ValueError("cannot convert 'to_end' to array with dtype "
+ "'%r' as required for input ary" % dtype_req)
+ to_end = _to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 5f87c8b2c..d9ce3f8a4 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -2590,6 +2590,7 @@ def blackman(M):
Examples
--------
+ >>> import matplotlib.pyplot as plt
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
@@ -2807,6 +2808,7 @@ def hanning(M):
Plot the window and its frequency response:
+ >>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
@@ -2906,6 +2908,7 @@ def hamming(M):
Plot the window and the frequency response:
+ >>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
@@ -3180,6 +3183,7 @@ def kaiser(M, beta):
Examples
--------
+ >>> import matplotlib.pyplot as plt
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
@@ -3273,6 +3277,7 @@ def sinc(x):
Examples
--------
+ >>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index a17fc66e5..93d4b279f 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -136,8 +136,8 @@ class TestSetOps(object):
np.nan),
# should fail because attempting
# to downcast to smaller int type:
- (np.array([1, 2, 3], dtype=np.int32),
- np.array([5, 7, 2], dtype=np.int64),
+ (np.array([1, 2, 3], dtype=np.int16),
+ np.array([5, 1<<20, 2], dtype=np.int32),
None),
# should fail because attempting to cast
# two special floating point values
@@ -152,8 +152,8 @@ class TestSetOps(object):
# specifically, raise an appropriate
# Exception when attempting to append or
# prepend with an incompatible type
- msg = 'must be compatible'
- with assert_raises_regex(TypeError, msg):
+ msg = 'cannot convert'
+ with assert_raises_regex(ValueError, msg):
ediff1d(ary=ary,
to_end=append,
to_begin=prepend)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 27d848608..5c840b111 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -644,7 +644,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
Examples
--------
- >>> import matplotlib as mpl
+ >>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
@@ -679,7 +679,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
- >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
+ >>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 21bc73e54..c0561137b 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -1139,9 +1139,12 @@ cdef class RandomState:
raise ValueError("'p' must be 1-dimensional")
if p.size != pop_size:
raise ValueError("'a' and 'p' must have same size")
+ p_sum = kahan_sum(pix, d)
+ if np.isnan(p_sum):
+ raise ValueError("probabilities contain NaN")
if np.logical_or.reduce(p < 0):
raise ValueError("probabilities are not non-negative")
- if abs(kahan_sum(pix, d) - 1.) > atol:
+ if abs(p_sum - 1.) > atol:
raise ValueError("probabilities do not sum to 1")
shape = size
@@ -2166,6 +2169,7 @@ cdef class RandomState:
>>> NF = np.histogram(nc_vals, bins=50, density=True)
>>> c_vals = np.random.f(dfnum, dfden, 1000000)
>>> F = np.histogram(c_vals, bins=50, density=True)
+ >>> import matplotlib.pyplot as plt
>>> plt.plot(F[1][1:], F[0])
>>> plt.plot(NF[1][1:], NF[0])
>>> plt.show()
@@ -2443,6 +2447,7 @@ cdef class RandomState:
--------
Draw samples and plot the distribution:
+ >>> import matplotlib.pyplot as plt
>>> s = np.random.standard_cauchy(1000000)
>>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
>>> plt.hist(s, bins=100)
@@ -3279,6 +3284,7 @@ cdef class RandomState:
>>> loc, scale = 10, 1
>>> s = np.random.logistic(loc, scale, 10000)
+ >>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, bins=50)
# plot against distribution
@@ -3479,6 +3485,7 @@ cdef class RandomState:
--------
Draw values from the distribution and plot the histogram
+ >>> from matplotlib.pyplot import hist
>>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True)
Wave heights tend to follow a Rayleigh distribution. If the mean wave
@@ -4233,6 +4240,7 @@ cdef class RandomState:
>>> ngood, nbad, nsamp = 100, 2, 10
# number of good, number of bad, and number of samples
>>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)
+ >>> from matplotlib.pyplot import hist
>>> hist(s)
# note that it is very unlikely to grab both bad items
@@ -4342,6 +4350,7 @@ cdef class RandomState:
>>> a = .6
>>> s = np.random.logseries(a, 10000)
+ >>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s)
# plot against distribution
@@ -4710,6 +4719,7 @@ cdef class RandomState:
>>> s = np.random.dirichlet((10, 5, 3), 20).transpose()
+ >>> import matplotlib.pyplot as plt
>>> plt.barh(range(20), s[0])
>>> plt.barh(range(20), s[1], left=s[0], color='g')
>>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index d0bb92a73..d4721bc62 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -448,6 +448,11 @@ class TestRandomDist(object):
assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, np.random.choice, a, p=p)
+
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
diff --git a/numpy/testing/_private/nosetester.py b/numpy/testing/_private/nosetester.py
index 1728d9d1f..19569a509 100644
--- a/numpy/testing/_private/nosetester.py
+++ b/numpy/testing/_private/nosetester.py
@@ -92,7 +92,7 @@ def run_module_suite(file_to_run=None, argv=None):
Alternatively, calling::
- >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
+ >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") # doctest: +SKIP
from an interpreter will run all the test routine in 'test_matlib.py'.
"""
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 55306e499..36f5c3cfe 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -318,8 +318,9 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
- ...
- <type 'exceptions.AssertionError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Items are not equal:
item=1
ACTUAL: 5
@@ -510,21 +511,24 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
>>> import numpy.testing as npt
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- ACTUAL: 2.3333333333333002
- DESIRED: 2.3333333399999998
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 10 decimals
+ ACTUAL: 2.3333333333333
+ DESIRED: 2.33333334
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
... np.array([1.0,2.33333334]), decimal=9)
- ...
- <type 'exceptions.AssertionError'>:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333333])
- y: array([ 1. , 2.33333334])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 9 decimals
+ Mismatch: 50%
+ Max absolute difference: 6.66669964e-09
+ Max relative difference: 2.85715698e-09
+ x: array([1. , 2.333333333])
+ y: array([1. , 2.33333334])
"""
__tracebackhide__ = True # Hide traceback for py.test
@@ -626,14 +630,15 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
- significant=8)
+ ... significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
- significant=8)
- ...
- <type 'exceptions.AssertionError'>:
+ ... significant=8)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Items are not equal to 8 significant digits:
- ACTUAL: 1.234567e-021
- DESIRED: 1.2345672000000001e-021
+ ACTUAL: 1.234567e-21
+ DESIRED: 1.2345672e-21
the evaluated condition that raises the exception is
@@ -660,10 +665,10 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
- msg = build_err_msg([actual, desired], err_msg,
- header='Items are not equal to %d significant digits:' %
- significant,
- verbose=verbose)
+ msg = build_err_msg(
+ [actual, desired], err_msg,
+ header='Items are not equal to %d significant digits:' % significant,
+ verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
@@ -686,7 +691,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header='', precision=6, equal_nan=True,
equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, isnan, inf, bool_
+ from numpy.core import array, array2string, isnan, inf, bool_, errstate
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
@@ -782,15 +787,33 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
+
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
mismatch = 100.0 * reduced.count(0) / ox.size
- msg = build_err_msg([ox, oy],
- err_msg
- + '\n(mismatch %s%%)' % (mismatch,),
+ remarks = ['Mismatch: {:.3g}%'.format(mismatch)]
+
+ with errstate(invalid='ignore', divide='ignore'):
+ # ignore errors for non-numeric types
+ try:
+ error = abs(x - y)
+ max_abs_error = error.max()
+ remarks.append('Max absolute difference: '
+ + array2string(max_abs_error))
+
+ # note: this definition of relative error matches that one
+ # used by assert_allclose (found in np.isclose)
+ max_rel_error = (error / abs(y)).max()
+ remarks.append('Max relative difference: '
+ + array2string(max_rel_error))
+ except TypeError:
+ pass
+
+ err_msg += '\n' + '\n'.join(remarks)
+ msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
@@ -850,14 +873,15 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
AssertionError:
Arrays are not equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 3.14159265, NaN])
- y: array([ 1. , 3.14159265, NaN])
+ Mismatch: 33.3%
+ Max absolute difference: 4.4408921e-16
+ Max relative difference: 1.41357986e-16
+ x: array([1. , 3.141593, nan])
+ y: array([1. , 3.141593, nan])
Use `assert_allclose` or one of the nulp (number of floating point values)
functions for these cases instead:
@@ -922,26 +946,29 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
- [1.0,2.333,np.nan])
+ ... [1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33339,np.nan], decimal=5)
- ...
- <type 'exceptions.AssertionError'>:
+ Traceback (most recent call last):
+ ...
AssertionError:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33339, NaN])
+ Arrays are not almost equal to 5 decimals
+ Mismatch: 33.3%
+ Max absolute difference: 6.e-05
+ Max relative difference: 2.57136612e-05
+ x: array([1. , 2.33333, nan])
+ y: array([1. , 2.33339, nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33333, 5], decimal=5)
- <type 'exceptions.ValueError'>:
- ValueError:
- Arrays are not almost equal
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33333, 5. ])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 5 decimals
+ x and y nan location mismatch:
+ x: array([1. , 2.33333, nan])
+ y: array([1. , 2.33333, 5. ])
"""
__tracebackhide__ = True # Hide traceback for py.test
@@ -1022,27 +1049,34 @@ def assert_array_less(x, y, err_msg='', verbose=True):
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 1., NaN])
- y: array([ 1., 2., NaN])
+ Mismatch: 33.3%
+ Max absolute difference: 1.
+ Max relative difference: 0.5
+ x: array([ 1., 1., nan])
+ y: array([ 1., 2., nan])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 4.])
+ Mismatch: 50%
+ Max absolute difference: 2.
+ Max relative difference: 0.66666667
+ x: array([1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Arrays are not less-ordered
(shapes (3,), (1,) mismatch)
- x: array([ 1., 2., 3.])
+ x: array([1., 2., 3.])
y: array([4])
"""
@@ -1147,7 +1181,7 @@ def rundocs(filename=None, raise_on_error=True):
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`:
- >>> np.lib.test(doctests=True) #doctest: +SKIP
+ >>> np.lib.test(doctests=True) # doctest: +SKIP
"""
from numpy.compat import npy_load_module
import doctest
@@ -1329,7 +1363,7 @@ def decorate_methods(cls, decorator, testmatch=None):
return
-def measure(code_str,times=1,label=None):
+def measure(code_str, times=1, label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
@@ -1356,9 +1390,9 @@ def measure(code_str,times=1,label=None):
Examples
--------
- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
- ... times=times)
- >>> print("Time for a single execution : ", etime / times, "s")
+ >>> times = 10
+ >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
+ >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP
Time for a single execution : 0.005 s
"""
@@ -1443,7 +1477,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
- >>> assert_allclose(x, y, rtol=1e-5, atol=0)
+ >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
"""
__tracebackhide__ = True # Hide traceback for py.test
@@ -1897,7 +1931,8 @@ class clear_and_catch_warnings(warnings.catch_warnings):
Examples
--------
>>> import warnings
- >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
+ >>> with np.testing.clear_and_catch_warnings(
+ ... modules=[np.core.fromnumeric]):
... warnings.simplefilter('always')
... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
... # do something that raises a warning but ignore those in
@@ -1978,25 +2013,28 @@ class suppress_warnings(object):
Examples
--------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
-
- Or as a decorator:
-
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
+
+ With a context manager::
+
+ with np.testing.suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "Some text")
+ sup.filter(module=np.ma.core)
+ log = sup.record(FutureWarning, "Does this occur?")
+ command_giving_warnings()
+ # The FutureWarning was given once, the filtered warnings were
+ # ignored. All other warnings abide outside settings (may be
+ # printed/error)
+ assert_(len(log) == 1)
+ assert_(len(sup.log) == 1) # also stored in log attribute
+
+ Or as a decorator::
+
+ sup = np.testing.suppress_warnings()
+ sup.filter(module=np.ma.core) # module must match exactly
+ @sup
+ def some_function():
+ # do something which causes a warning in np.ma.core
+ pass
"""
def __init__(self, forwarding_rule="always"):
self._entered = False
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 43afafaa8..c376a3852 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -327,24 +327,22 @@ class TestEqual(TestArrayEqual):
self._test_not_equal(x, y)
def test_error_message(self):
- try:
+ with pytest.raises(AssertionError) as exc_info:
self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
- except AssertionError as e:
- msg = str(e)
- msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
- msg_reference = textwrap.dedent("""\
+ msg = str(exc_info.value)
+ msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
+ msg_reference = textwrap.dedent("""\
- Arrays are not equal
+ Arrays are not equal
- (shapes (2,), (1, 2) mismatch)
- x: array([1, 2])
- y: array([[1, 2]])""")
- try:
- assert_equal(msg, msg_reference)
- except AssertionError:
- assert_equal(msg2, msg_reference)
- else:
- raise AssertionError("Did not raise")
+ (shapes (2,), (1, 2) mismatch)
+ x: array([1, 2])
+ y: array([[1, 2]])""")
+
+ try:
+ assert_equal(msg, msg_reference)
+ except AssertionError:
+ assert_equal(msg2, msg_reference)
class TestArrayAlmostEqual(_GenericTest):
@@ -509,38 +507,53 @@ class TestAlmostEqual(_GenericTest):
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
- # test with a different amount of decimal digits
- # note that we only check for the formatting of the arrays themselves
- b = ('x: array([1.00000000001, 2.00000000002, 3.00003 '
- ' ])\n y: array([1.00000000002, 2.00000000003, 3.00004 ])')
- try:
+ # Test with a different amount of decimal digits
+ with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y, decimal=12)
- except AssertionError as e:
- # remove anything that's not the array string
- assert_equal(str(e).split('%)\n ')[1], b)
-
- # with the default value of decimal digits, only the 3rd element differs
- # note that we only check for the formatting of the arrays themselves
- b = ('x: array([1. , 2. , 3.00003])\n y: array([1. , '
- '2. , 3.00004])')
- try:
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatch: 100%')
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+ assert_equal(
+ msgs[6],
+ ' x: array([1.00000000001, 2.00000000002, 3.00003 ])')
+ assert_equal(
+ msgs[7],
+ ' y: array([1.00000000002, 2.00000000003, 3.00004 ])')
+
+ # With the default value of decimal digits, only the 3rd element
+ # differs. Note that we only check for the formatting of the arrays
+ # themselves.
+ with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
- except AssertionError as e:
- # remove anything that's not the array string
- assert_equal(str(e).split('%)\n ')[1], b)
-
- # Check the error message when input includes inf or nan
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatch: 33.3%')
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+ assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
+ assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')
+
+ # Check the error message when input includes inf
x = np.array([np.inf, 0])
y = np.array([np.inf, 1])
- try:
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatch: 50%')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 1.')
+ assert_equal(msgs[6], ' x: array([inf, 0.])')
+ assert_equal(msgs[7], ' y: array([inf, 1.])')
+
+ # Check the error message when dividing by zero
+ x = np.array([1, 2])
+ y = np.array([0, 0])
+ with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
- except AssertionError as e:
- msgs = str(e).split('\n')
- # assert error percentage is 50%
- assert_equal(msgs[3], '(mismatch 50.0%)')
- # assert output array contains inf
- assert_equal(msgs[4], ' x: array([inf, 0.])')
- assert_equal(msgs[5], ' y: array([inf, 1.])')
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatch: 100%')
+ assert_equal(msgs[4], 'Max absolute difference: 2')
+ assert_equal(msgs[5], 'Max relative difference: inf')
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
@@ -829,12 +842,12 @@ class TestAssertAllclose(object):
def test_report_fail_percentage(self):
a = np.array([1, 1, 1, 1])
b = np.array([1, 1, 1, 2])
- try:
+
+ with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
- msg = ''
- except AssertionError as exc:
- msg = exc.args[0]
- assert_("mismatch 25.0%" in msg)
+ msg = str(exc_info.value)
+ assert_('Mismatch: 25%\nMax absolute difference: 1\n'
+ 'Max relative difference: 0.5' in msg)
def test_equal_nan(self):
a = np.array([np.nan])
@@ -1117,12 +1130,10 @@ class TestStringEqual(object):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
- try:
+ with pytest.raises(AssertionError) as exc_info:
assert_string_equal("foo\nbar", "hello\nbar")
- except AssertionError as exc:
- assert_equal(str(exc), "Differences in strings:\n- foo\n+ hello")
- else:
- raise AssertionError("exception not raised")
+ msg = str(exc_info.value)
+ assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
assert_raises(AssertionError,
lambda: assert_string_equal("foo", "hello"))
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index d389b37a8..521208c36 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -273,3 +273,95 @@ class TestAsArray(object):
# check we avoid the segfault
c_arr[0][0][0]
+
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available on this python installation")
+class TestAsCtypesType(object):
+ """ Test conversion from dtypes to ctypes types """
+ def test_scalar(self):
+ dt = np.dtype('<u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16.__ctype_le__)
+
+ dt = np.dtype('>u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16.__ctype_be__)
+
+ dt = np.dtype('u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16)
+
+ def test_subarray(self):
+ dt = np.dtype((np.int32, (2, 3)))
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, 2 * (3 * ctypes.c_int32))
+
+ def test_structure(self):
+ dt = np.dtype([
+ ('a', np.uint16),
+ ('b', np.uint32),
+ ])
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Structure))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_structure_aligned(self):
+ dt = np.dtype([
+ ('a', np.uint16),
+ ('b', np.uint32),
+ ], align=True)
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Structure))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('', ctypes.c_char * 2), # padding
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_union(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.uint16, np.uint32]
+ ))
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Union))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_padded_union(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.uint16, np.uint32],
+ itemsize=5,
+ ))
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Union))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ('', ctypes.c_char * 5), # padding
+ ])
+
+ def test_overlapping(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 2],
+ formats=[np.uint32, np.uint32]
+ ))
+ assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 9e27cc6ce..e42dc25f9 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -8,7 +8,7 @@ import sys
import os
import pytest
from os.path import join as pathjoin, isfile, dirname
-from subprocess import Popen, PIPE
+import subprocess
import numpy as np
from numpy.compat.py3k import basestring
@@ -17,74 +17,13 @@ from numpy.testing import assert_, assert_equal
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
-def run_command(cmd, check_code=True):
- """ Run command sequence `cmd` returning exit code, stdout, stderr
-
- Parameters
- ----------
- cmd : str or sequence
- string with command name or sequence of strings defining command
- check_code : {True, False}, optional
- If True, raise error for non-zero return code
-
- Returns
- -------
- returncode : int
- return code from execution of `cmd`
- stdout : bytes (python 3) or str (python 2)
- stdout from `cmd`
- stderr : bytes (python 3) or str (python 2)
- stderr from `cmd`
-
- Raises
- ------
- RuntimeError
- If `check_code` is True, and return code !=0
- """
- cmd = [cmd] if isinstance(cmd, basestring) else list(cmd)
- if os.name == 'nt':
- # Quote any arguments with spaces. The quotes delimit the arguments
- # on Windows, and the arguments might be file paths with spaces.
- # On Unix the list elements are each separate arguments.
- cmd = ['"{0}"'.format(c) if ' ' in c else c for c in cmd]
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- stdout, stderr = proc.communicate()
- if proc.poll() is None:
- proc.terminate()
- if check_code and proc.returncode != 0:
- raise RuntimeError('\n'.join(
- ['Command "{0}" failed with',
- 'stdout', '------', '{1}', '',
- 'stderr', '------', '{2}']).format(cmd, stdout, stderr))
- return proc.returncode, stdout, stderr
-
-
-@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
-@pytest.mark.xfail(reason="Test is unreliable")
-def test_f2py():
- # test that we can run f2py script
-
- def try_f2py_commands(cmds):
- success = 0
- for f2py_cmd in cmds:
- try:
- code, stdout, stderr = run_command([f2py_cmd, '-v'])
- assert_equal(stdout.strip(), b'2')
- success += 1
- except Exception:
- pass
- return success
-
+def find_f2py_commands():
if sys.platform == 'win32':
- # Only the single 'f2py' script is installed in windows.
exe_dir = dirname(sys.executable)
if exe_dir.endswith('Scripts'): # virtualenv
- f2py_cmds = [os.path.join(exe_dir, 'f2py')]
+ return [os.path.join(exe_dir, 'f2py')]
else:
- f2py_cmds = [os.path.join(exe_dir, "Scripts", 'f2py')]
- success = try_f2py_commands(f2py_cmds)
- msg = "Warning: f2py not found in path"
- assert_(success == 1, msg)
+ return [os.path.join(exe_dir, "Scripts", 'f2py')]
else:
# Three scripts are installed in Unix-like systems:
# 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
@@ -93,7 +32,18 @@ def test_f2py():
version = sys.version_info
major = str(version.major)
minor = str(version.minor)
- f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor)
- success = try_f2py_commands(f2py_cmds)
- msg = "Warning: not all of %s, %s, and %s are found in path" % f2py_cmds
- assert_(success == 3, msg)
+ return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor]
+
+
+@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
+@pytest.mark.xfail(reason="Test is unreliable")
+@pytest.mark.parametrize('f2py_cmd', find_f2py_commands())
+def test_f2py(f2py_cmd):
+ # test that we can run f2py script
+ stdout = subprocess.check_output([f2py_cmd, '-v'])
+ assert_equal(stdout.strip(), b'2')
+
+
+def test_pep338():
+ stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v'])
+ assert_equal(stdout.strip(), b'2')