summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml9
-rw-r--r--.circleci/config.yml7
-rw-r--r--.codecov.yml29
-rw-r--r--.github/CODE_OF_CONDUCT.md1
-rw-r--r--.github/CONTRIBUTING.md (renamed from CONTRIBUTING.md)6
-rw-r--r--.github/ISSUE_TEMPLATE.md27
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md7
-rw-r--r--.gitignore7
-rw-r--r--.lgtm.yml18
-rw-r--r--.mailmap26
-rw-r--r--.travis.yml15
-rw-r--r--LICENSE.txt4
-rw-r--r--MANIFEST.in35
-rw-r--r--README.md19
-rw-r--r--azure-pipelines.yml97
-rw-r--r--benchmarks/README.rst8
-rw-r--r--benchmarks/asv.conf.json4
-rw-r--r--benchmarks/benchmarks/bench_function_base.py28
-rw-r--r--benchmarks/benchmarks/bench_io.py181
-rw-r--r--benchmarks/benchmarks/bench_lib.py5
-rw-r--r--benchmarks/benchmarks/bench_ma.py4
-rw-r--r--benchmarks/benchmarks/bench_overrides.py69
-rw-r--r--benchmarks/benchmarks/bench_reduce.py6
-rw-r--r--benchmarks/benchmarks/bench_shape_base.py91
-rw-r--r--benchmarks/benchmarks/bench_ufunc.py59
-rw-r--r--doc/CAPI.rst.txt320
-rw-r--r--doc/HOWTO_DOCUMENT.rst.txt2
-rw-r--r--doc/HOWTO_RELEASE.rst.txt60
-rw-r--r--doc/Makefile2
-rw-r--r--doc/Py3K.rst.txt4
-rw-r--r--doc/RELEASE_WALKTHROUGH.rst.txt101
-rw-r--r--doc/cdoc/Doxyfile19
-rwxr-xr-xdoc/cdoc/numpyfilter.py5
-rw-r--r--doc/changelog/1.16.0-changelog.rst616
-rw-r--r--doc/neps/_static/nep-0000.pngbin20813 -> 12925 bytes
-rw-r--r--doc/neps/_static/nep0013_image1.pngbin0 -> 18653 bytes
-rw-r--r--doc/neps/_static/nep0013_image2.pngbin0 -> 6313 bytes
-rw-r--r--doc/neps/_static/nep0013_image3.pngbin0 -> 11704 bytes
-rw-r--r--doc/neps/conf.py11
-rw-r--r--doc/neps/index.rst.tmpl67
-rw-r--r--doc/neps/nep-0000.rst102
-rw-r--r--doc/neps/nep-0001-npy-format.rst12
-rw-r--r--doc/neps/nep-0002-warnfix.rst6
-rw-r--r--doc/neps/nep-0003-math_config_clean.rst6
-rw-r--r--doc/neps/nep-0004-datetime-proposal3.rst12
-rw-r--r--doc/neps/nep-0005-generalized-ufuncs.rst6
-rw-r--r--doc/neps/nep-0006-newbugtracker.rst6
-rw-r--r--doc/neps/nep-0007-datetime-proposal.rst12
-rw-r--r--doc/neps/nep-0008-groupby_additions.rst6
-rw-r--r--doc/neps/nep-0009-structured_array_extensions.rst6
-rw-r--r--doc/neps/nep-0010-new-iterator-ufunc.rst6
-rw-r--r--doc/neps/nep-0011-deferred-ufunc-evaluation.rst6
-rw-r--r--doc/neps/nep-0012-missing-data.rst16
-rw-r--r--doc/neps/nep-0013-ufunc-overrides.rst42
-rw-r--r--doc/neps/nep-0014-dropping-python2.7-proposal.rst8
-rw-r--r--doc/neps/nep-0015-merge-multiarray-umath.rst157
-rw-r--r--doc/neps/nep-0016-abstract-array.rst328
-rw-r--r--doc/neps/nep-0016-benchmark.py48
-rw-r--r--doc/neps/nep-0017-split-out-maskedarray.rst6
-rw-r--r--doc/neps/nep-0018-array-function-protocol.rst775
-rw-r--r--doc/neps/nep-0019-rng-policy.rst263
-rw-r--r--doc/neps/nep-0020-gufunc-signature-enhancement.rst257
-rw-r--r--doc/neps/nep-0021-advanced-indexing.rst661
-rw-r--r--doc/neps/nep-0022-ndarray-duck-typing-overview.rst352
-rw-r--r--doc/neps/nep-0023-backwards-compatibility.rst288
-rw-r--r--doc/neps/nep-0024-missing-data-2.rst210
-rw-r--r--doc/neps/nep-0025-missing-data-3.rst469
-rw-r--r--doc/neps/nep-0026-missing-data-summary.rst730
-rw-r--r--doc/neps/nep-0027-zero-rank-arrarys.rst254
-rw-r--r--doc/neps/nep-template.rst2
-rw-r--r--doc/neps/roadmap.rst115
-rw-r--r--doc/neps/scope.rst46
-rw-r--r--doc/neps/tools/build_index.py4
-rw-r--r--doc/release/1.13.0-notes.rst6
-rw-r--r--doc/release/1.13.1-notes.rst4
-rw-r--r--doc/release/1.14.0-notes.rst8
-rw-r--r--doc/release/1.14.1-notes.rst2
-rw-r--r--doc/release/1.15.0-notes.rst9
-rw-r--r--doc/release/1.16.0-notes.rst534
-rw-r--r--doc/release/1.3.0-notes.rst6
-rw-r--r--doc/release/1.7.0-notes.rst8
-rw-r--r--doc/release/template.rst43
-rw-r--r--doc/release/time_based_proposal.rst4
-rw-r--r--doc/source/_templates/autosummary/attribute.rst10
-rw-r--r--doc/source/_templates/autosummary/member.rst11
-rw-r--r--doc/source/_templates/autosummary/method.rst10
-rw-r--r--doc/source/_templates/indexcontent.html2
-rw-r--r--doc/source/_templates/indexsidebar.html2
-rw-r--r--doc/source/about.rst10
-rw-r--r--doc/source/bugs.rst6
-rw-r--r--doc/source/conf.py10
-rw-r--r--doc/source/contents.rst1
-rw-r--r--doc/source/dev/conduct/code_of_conduct.rst163
-rw-r--r--doc/source/dev/conduct/report_handling_manual.rst220
-rw-r--r--doc/source/dev/development_environment.rst16
-rw-r--r--doc/source/dev/gitwash/development_workflow.rst4
-rw-r--r--doc/source/dev/gitwash/git_links.inc66
-rw-r--r--doc/source/dev/gitwash_links.txt6
-rw-r--r--doc/source/dev/governance/people.rst4
-rw-r--r--doc/source/dev/index.rst3
-rw-r--r--doc/source/dev/releasing.rst16
-rw-r--r--doc/source/dev/style_guide.rst8
-rw-r--r--doc/source/docs/howto_build_docs.rst14
-rw-r--r--doc/source/docs/howto_document.rst4
-rw-r--r--doc/source/f2py/compile_session.dat4
-rw-r--r--doc/source/f2py/getting-started.rst10
-rw-r--r--doc/source/f2py/index.rst5
-rw-r--r--doc/source/f2py/run_main_session.dat12
-rw-r--r--doc/source/f2py/signature-file.rst2
-rw-r--r--doc/source/f2py/usage.rst24
-rw-r--r--doc/source/reference/alignment.rst104
-rw-r--r--doc/source/reference/arrays.datetime.rst3
-rw-r--r--doc/source/reference/arrays.dtypes.rst8
-rw-r--r--doc/source/reference/arrays.indexing.rst17
-rw-r--r--doc/source/reference/arrays.interface.rst2
-rw-r--r--doc/source/reference/arrays.ndarray.rst2
-rw-r--r--doc/source/reference/c-api.array.rst38
-rw-r--r--doc/source/reference/c-api.coremath.rst10
-rw-r--r--doc/source/reference/c-api.dtype.rst2
-rw-r--r--doc/source/reference/c-api.generalized-ufuncs.rst58
-rw-r--r--doc/source/reference/c-api.types-and-structures.rst106
-rw-r--r--doc/source/reference/c-api.ufunc.rst108
-rw-r--r--doc/source/reference/distutils.rst3
-rw-r--r--doc/source/reference/distutils_guide.rst7
-rw-r--r--doc/source/reference/index.rst3
-rw-r--r--doc/source/reference/internals.code-explanations.rst8
-rw-r--r--doc/source/reference/internals.rst1
-rw-r--r--doc/source/reference/maskedarray.generic.rst2
-rw-r--r--doc/source/reference/routines.ctypeslib.rst2
-rw-r--r--doc/source/reference/routines.linalg.rst2
-rw-r--r--doc/source/reference/routines.matlib.rst2
-rw-r--r--doc/source/reference/routines.numarray.rst5
-rw-r--r--doc/source/reference/routines.oldnumeric.rst7
-rw-r--r--doc/source/reference/routines.polynomials.classes.rst15
-rw-r--r--doc/source/reference/routines.polynomials.package.rst2
-rw-r--r--doc/source/reference/routines.polynomials.polynomial.rst2
-rw-r--r--doc/source/reference/routines.random.rst2
-rw-r--r--doc/source/reference/routines.testing.rst2
-rw-r--r--doc/source/release.rst2
-rw-r--r--doc/source/user/basics.broadcasting.rst6
-rw-r--r--doc/source/user/building.rst2
-rw-r--r--doc/source/user/c-info.beyond-basics.rst34
-rw-r--r--doc/source/user/c-info.how-to-extend.rst2
-rw-r--r--doc/source/user/c-info.python-as-glue.rst42
-rw-r--r--doc/source/user/c-info.ufunc-tutorial.rst133
-rw-r--r--doc/source/user/install.rst2
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst6
-rw-r--r--doc/source/user/quickstart.rst12
-rw-r--r--doc/source/user/theory.broadcast_1.gifbin0 -> 2987 bytes
-rw-r--r--doc/source/user/theory.broadcast_2.gifbin0 -> 6641 bytes
-rw-r--r--doc/source/user/theory.broadcast_3.gifbin0 -> 4681 bytes
-rw-r--r--doc/source/user/theory.broadcast_4.gifbin0 -> 7287 bytes
-rw-r--r--doc/source/user/theory.broadcast_5.pngbin0 -> 16721 bytes
-rw-r--r--doc/source/user/theory.broadcasting.rst229
-rw-r--r--numpy/__init__.py27
-rw-r--r--numpy/_globals.py11
-rw-r--r--numpy/_import_tools.py351
-rw-r--r--numpy/_pytesttester.py (renamed from numpy/testing/_private/pytesttester.py)16
-rw-r--r--numpy/compat/_inspect.py5
-rw-r--r--numpy/compat/py3k.py93
-rw-r--r--numpy/conftest.py2
-rw-r--r--numpy/core/__init__.py33
-rw-r--r--numpy/core/_add_newdocs.py (renamed from numpy/add_newdocs.py)2031
-rw-r--r--numpy/core/_aliased_types.py0
-rw-r--r--numpy/core/_dtype.py341
-rw-r--r--numpy/core/_dtype_ctypes.py113
-rw-r--r--numpy/core/_internal.py260
-rw-r--r--numpy/core/_methods.py12
-rw-r--r--numpy/core/_string_helpers.py100
-rw-r--r--numpy/core/_type_aliases.py282
-rw-r--r--numpy/core/arrayprint.py167
-rw-r--r--numpy/core/code_generators/cversions.txt7
-rw-r--r--numpy/core/code_generators/genapi.py11
-rw-r--r--numpy/core/code_generators/generate_numpy_api.py7
-rw-r--r--numpy/core/code_generators/generate_ufunc_api.py5
-rw-r--r--numpy/core/code_generators/generate_umath.py86
-rw-r--r--numpy/core/code_generators/numpy_api.py2
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py148
-rw-r--r--numpy/core/defchararray.py156
-rw-r--r--numpy/core/einsumfunc.py260
-rw-r--r--numpy/core/fromnumeric.py303
-rw-r--r--numpy/core/function_base.py174
-rw-r--r--numpy/core/getlimits.py356
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h8
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h7
-rw-r--r--numpy/core/include/numpy/npy_1_7_deprecated_api.h11
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h83
-rw-r--r--numpy/core/include/numpy/npy_common.h2
-rw-r--r--numpy/core/include/numpy/npy_endian.h43
-rw-r--r--numpy/core/include/numpy/ufuncobject.h46
-rw-r--r--numpy/core/machar.py2
-rw-r--r--numpy/core/memmap.py136
-rw-r--r--numpy/core/multiarray.py1561
-rw-r--r--numpy/core/numeric.py226
-rw-r--r--numpy/core/numerictypes.py435
-rw-r--r--numpy/core/overrides.py241
-rw-r--r--numpy/core/records.py31
-rw-r--r--numpy/core/setup.py177
-rw-r--r--numpy/core/setup_common.py6
-rw-r--r--numpy/core/shape_base.py373
-rw-r--r--numpy/core/src/common/array_assign.c (renamed from numpy/core/src/multiarray/array_assign.c)64
-rw-r--r--numpy/core/src/common/array_assign.h (renamed from numpy/core/src/multiarray/array_assign.h)22
-rw-r--r--numpy/core/src/common/binop_override.h (renamed from numpy/core/src/private/binop_override.h)0
-rw-r--r--numpy/core/src/common/cblasfuncs.c (renamed from numpy/core/src/multiarray/cblasfuncs.c)110
-rw-r--r--numpy/core/src/common/cblasfuncs.h (renamed from numpy/core/src/multiarray/cblasfuncs.h)0
-rw-r--r--numpy/core/src/common/get_attr_string.h (renamed from numpy/core/src/private/get_attr_string.h)0
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h (renamed from numpy/core/src/private/lowlevel_strided_loops.h)14
-rw-r--r--numpy/core/src/common/mem_overlap.c (renamed from numpy/core/src/private/mem_overlap.c)0
-rw-r--r--numpy/core/src/common/mem_overlap.h (renamed from numpy/core/src/private/mem_overlap.h)0
-rw-r--r--numpy/core/src/common/npy_binsearch.h.src (renamed from numpy/core/src/private/npy_binsearch.h.src)0
-rw-r--r--numpy/core/src/common/npy_cblas.h (renamed from numpy/core/src/private/npy_cblas.h)0
-rw-r--r--numpy/core/src/common/npy_config.h (renamed from numpy/core/src/private/npy_config.h)16
-rw-r--r--numpy/core/src/common/npy_ctypes.h49
-rw-r--r--numpy/core/src/common/npy_extint128.h (renamed from numpy/core/src/private/npy_extint128.h)0
-rw-r--r--numpy/core/src/common/npy_fpmath.h (renamed from numpy/core/src/private/npy_fpmath.h)0
-rw-r--r--numpy/core/src/common/npy_import.h (renamed from numpy/core/src/private/npy_import.h)0
-rw-r--r--numpy/core/src/common/npy_longdouble.c (renamed from numpy/core/src/private/npy_longdouble.c)18
-rw-r--r--numpy/core/src/common/npy_longdouble.h (renamed from numpy/core/src/private/npy_longdouble.h)0
-rw-r--r--numpy/core/src/common/npy_partition.h.src (renamed from numpy/core/src/private/npy_partition.h.src)0
-rw-r--r--numpy/core/src/common/npy_pycompat.h (renamed from numpy/core/src/private/npy_pycompat.h)0
-rw-r--r--numpy/core/src/common/npy_sort.h.src83
-rw-r--r--numpy/core/src/common/numpyos.c (renamed from numpy/core/src/multiarray/numpyos.c)28
-rw-r--r--numpy/core/src/common/numpyos.h (renamed from numpy/core/src/multiarray/numpyos.h)7
-rw-r--r--numpy/core/src/common/python_xerbla.c (renamed from numpy/core/src/multiarray/python_xerbla.c)0
-rw-r--r--numpy/core/src/common/templ_common.h.src (renamed from numpy/core/src/private/templ_common.h.src)0
-rw-r--r--numpy/core/src/common/ucsnarrow.c (renamed from numpy/core/src/multiarray/ucsnarrow.c)0
-rw-r--r--numpy/core/src/common/ucsnarrow.h (renamed from numpy/core/src/multiarray/ucsnarrow.h)0
-rw-r--r--numpy/core/src/common/ufunc_override.c121
-rw-r--r--numpy/core/src/common/ufunc_override.h37
-rw-r--r--numpy/core/src/common/umathmodule.h8
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src72
-rw-r--r--numpy/core/src/multiarray/alloc.c31
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c48
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c29
-rw-r--r--numpy/core/src/multiarray/arrayobject.c224
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src183
-rw-r--r--numpy/core/src/multiarray/buffer.c185
-rw-r--r--numpy/core/src/multiarray/calculation.c33
-rw-r--r--numpy/core/src/multiarray/common.c167
-rw-r--r--numpy/core/src/multiarray/common.h71
-rw-r--r--numpy/core/src/multiarray/compiled_base.c156
-rw-r--r--numpy/core/src/multiarray/compiled_base.h2
-rw-r--r--numpy/core/src/multiarray/convert.c16
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c19
-rw-r--r--numpy/core/src/multiarray/ctors.c82
-rw-r--r--numpy/core/src/multiarray/datetime.c66
-rw-r--r--numpy/core/src/multiarray/datetime_strings.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c687
-rw-r--r--numpy/core/src/multiarray/descriptor.h28
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c57
-rw-r--r--numpy/core/src/multiarray/flagsobject.c3
-rw-r--r--numpy/core/src/multiarray/item_selection.c7
-rw-r--r--numpy/core/src/multiarray/iterators.c108
-rw-r--r--numpy/core/src/multiarray/iterators.h11
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src66
-rw-r--r--numpy/core/src/multiarray/mapping.c93
-rw-r--r--numpy/core/src/multiarray/methods.c236
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c372
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c39
-rw-r--r--numpy/core/src/multiarray/number.c104
-rw-r--r--numpy/core/src/multiarray/number.h5
-rw-r--r--numpy/core/src/multiarray/scalarapi.c2
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src80
-rw-r--r--numpy/core/src/multiarray/shape.c17
-rw-r--r--numpy/core/src/multiarray/temp_elide.c2
-rw-r--r--numpy/core/src/npymath/ieee754.c.src151
-rw-r--r--numpy/core/src/private/npy_sort.h204
-rw-r--r--numpy/core/src/private/ufunc_override.c163
-rw-r--r--numpy/core/src/private/ufunc_override.h15
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src139
-rw-r--r--numpy/core/src/umath/cpuid.c4
-rw-r--r--numpy/core/src/umath/extobj.c4
-rw-r--r--numpy/core/src/umath/loops.c.src106
-rw-r--r--numpy/core/src/umath/loops.h.src3
-rw-r--r--numpy/core/src/umath/matmul.c.src402
-rw-r--r--numpy/core/src/umath/matmul.h.src12
-rw-r--r--numpy/core/src/umath/override.c94
-rw-r--r--numpy/core/src/umath/reduction.c4
-rw-r--r--numpy/core/src/umath/scalarmath.c.src21
-rw-r--r--numpy/core/src/umath/simd.inc.src272
-rw-r--r--numpy/core/src/umath/ufunc_object.c1817
-rw-r--r--numpy/core/src/umath/ufunc_object.h28
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c168
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h8
-rw-r--r--numpy/core/src/umath/umathmodule.c156
-rw-r--r--numpy/core/tests/_locales.py8
-rw-r--r--numpy/core/tests/test_arrayprint.py14
-rw-r--r--numpy/core/tests/test_datetime.py185
-rw-r--r--numpy/core/tests/test_defchararray.py26
-rw-r--r--numpy/core/tests/test_deprecations.py39
-rw-r--r--numpy/core/tests/test_dtype.py239
-rw-r--r--numpy/core/tests/test_einsum.py5
-rw-r--r--numpy/core/tests/test_errstate.py14
-rw-r--r--numpy/core/tests/test_extint128.py2
-rw-r--r--numpy/core/tests/test_function_base.py53
-rw-r--r--numpy/core/tests/test_getlimits.py15
-rw-r--r--numpy/core/tests/test_half.py4
-rw-r--r--numpy/core/tests/test_indexing.py1
-rw-r--r--numpy/core/tests/test_mem_overlap.py2
-rw-r--r--numpy/core/tests/test_multiarray.py892
-rw-r--r--numpy/core/tests/test_nditer.py88
-rw-r--r--numpy/core/tests/test_numeric.py30
-rw-r--r--numpy/core/tests/test_numerictypes.py99
-rw-r--r--numpy/core/tests/test_overrides.py388
-rw-r--r--numpy/core/tests/test_print.py73
-rw-r--r--numpy/core/tests/test_records.py47
-rw-r--r--numpy/core/tests/test_regression.py120
-rw-r--r--numpy/core/tests/test_scalarbuffer.py43
-rw-r--r--numpy/core/tests/test_scalarinherit.py5
-rw-r--r--numpy/core/tests/test_scalarmath.py16
-rw-r--r--numpy/core/tests/test_scalarprint.py2
-rw-r--r--numpy/core/tests/test_shape_base.py196
-rw-r--r--numpy/core/tests/test_ufunc.py215
-rw-r--r--numpy/core/tests/test_umath.py98
-rw-r--r--numpy/core/umath.py35
-rw-r--r--numpy/ctypeslib.py92
-rw-r--r--numpy/distutils/__init__.py4
-rw-r--r--numpy/distutils/ccompiler.py88
-rw-r--r--numpy/distutils/command/build_ext.py18
-rw-r--r--numpy/distutils/command/build_src.py2
-rw-r--r--numpy/distutils/command/config.py36
-rw-r--r--numpy/distutils/command/config_compiler.py9
-rw-r--r--numpy/distutils/conv_template.py6
-rw-r--r--numpy/distutils/core.py4
-rw-r--r--numpy/distutils/exec_command.py54
-rw-r--r--numpy/distutils/fcompiler/__init__.py84
-rw-r--r--numpy/distutils/fcompiler/environment.py (renamed from numpy/distutils/environment.py)11
-rw-r--r--numpy/distutils/fcompiler/gnu.py42
-rw-r--r--numpy/distutils/fcompiler/ibm.py13
-rw-r--r--numpy/distutils/fcompiler/pg.py3
-rw-r--r--numpy/distutils/lib2def.py1
-rw-r--r--numpy/distutils/mingw32ccompiler.py1
-rw-r--r--numpy/distutils/misc_util.py16
-rw-r--r--numpy/distutils/npy_pkg_config.py9
-rw-r--r--numpy/distutils/system_info.py29
-rw-r--r--numpy/distutils/tests/test_fcompiler.py44
-rw-r--r--numpy/distutils/tests/test_misc_util.py3
-rw-r--r--numpy/doc/basics.py184
-rw-r--r--numpy/doc/broadcasting.py18
-rw-r--r--numpy/doc/glossary.py6
-rw-r--r--numpy/doc/indexing.py2
-rw-r--r--numpy/doc/misc.py2
-rw-r--r--numpy/doc/structured_arrays.py112
-rw-r--r--numpy/doc/subclassing.py2
-rw-r--r--numpy/dual.py2
-rw-r--r--numpy/f2py/__init__.py55
-rw-r--r--numpy/f2py/__main__.py25
-rw-r--r--numpy/f2py/capi_maps.py5
-rw-r--r--numpy/f2py/common_rules.py8
-rwxr-xr-xnumpy/f2py/crackfortran.py109
-rwxr-xr-xnumpy/f2py/f2py2e.py24
-rw-r--r--numpy/f2py/rules.py2
-rw-r--r--numpy/f2py/setup.py54
-rw-r--r--numpy/f2py/src/test/foomodule.c2
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py23
-rw-r--r--numpy/f2py/tests/test_block_docstring.py1
-rw-r--r--numpy/f2py/tests/test_callback.py6
-rw-r--r--numpy/f2py/tests/test_compile_function.py108
-rw-r--r--numpy/f2py/tests/test_parameter.py1
-rw-r--r--numpy/f2py/tests/test_quoted_character.py35
-rw-r--r--numpy/f2py/tests/test_regression.py1
-rw-r--r--numpy/f2py/tests/test_return_character.py12
-rw-r--r--numpy/f2py/tests/test_return_complex.py12
-rw-r--r--numpy/f2py/tests/test_return_integer.py14
-rw-r--r--numpy/f2py/tests/test_return_logical.py13
-rw-r--r--numpy/f2py/tests/util.py14
-rw-r--r--numpy/fft/__init__.py2
-rw-r--r--numpy/fft/fftpack.py36
-rw-r--r--numpy/fft/helper.py10
-rw-r--r--numpy/lib/__init__.py4
-rw-r--r--numpy/lib/_datasource.py9
-rw-r--r--numpy/lib/_iotools.py4
-rw-r--r--numpy/lib/arraypad.py159
-rw-r--r--numpy/lib/arraysetops.py83
-rw-r--r--numpy/lib/financial.py69
-rw-r--r--numpy/lib/format.py62
-rw-r--r--numpy/lib/function_base.py462
-rw-r--r--numpy/lib/histograms.py105
-rw-r--r--numpy/lib/index_tricks.py155
-rw-r--r--numpy/lib/mixins.py5
-rw-r--r--numpy/lib/nanfunctions.py97
-rw-r--r--numpy/lib/npyio.py149
-rw-r--r--numpy/lib/polynomial.py102
-rw-r--r--numpy/lib/recfunctions.py429
-rw-r--r--numpy/lib/scimath.py38
-rw-r--r--numpy/lib/shape_base.py96
-rw-r--r--numpy/lib/stride_tricks.py27
-rw-r--r--numpy/lib/tests/test__datasource.py23
-rw-r--r--numpy/lib/tests/test__iotools.py3
-rw-r--r--numpy/lib/tests/test_arraypad.py159
-rw-r--r--numpy/lib/tests/test_arraysetops.py75
-rw-r--r--numpy/lib/tests/test_format.py47
-rw-r--r--numpy/lib/tests/test_function_base.py122
-rw-r--r--numpy/lib/tests/test_histograms.py91
-rw-r--r--numpy/lib/tests/test_index_tricks.py89
-rw-r--r--numpy/lib/tests/test_io.py115
-rw-r--r--numpy/lib/tests/test_mixins.py11
-rw-r--r--numpy/lib/tests/test_polynomial.py198
-rw-r--r--numpy/lib/tests/test_recfunctions.py82
-rw-r--r--numpy/lib/tests/test_shape_base.py56
-rw-r--r--numpy/lib/tests/test_stride_tricks.py14
-rw-r--r--numpy/lib/tests/test_ufunclike.py15
-rw-r--r--numpy/lib/tests/test_utils.py32
-rw-r--r--numpy/lib/twodim_base.py48
-rw-r--r--numpy/lib/type_check.py82
-rw-r--r--numpy/lib/ufunclike.py66
-rw-r--r--numpy/lib/utils.py19
-rw-r--r--numpy/linalg/__init__.py2
-rw-r--r--numpy/linalg/linalg.py168
-rw-r--r--numpy/linalg/tests/test_linalg.py278
-rw-r--r--numpy/linalg/umath_linalg.c.src14
-rw-r--r--numpy/ma/README.txt4
-rw-r--r--numpy/ma/__init__.py2
-rw-r--r--numpy/ma/core.py144
-rw-r--r--numpy/ma/mrecords.py19
-rw-r--r--numpy/ma/tests/test_core.py110
-rw-r--r--numpy/ma/tests/test_extras.py5
-rw-r--r--numpy/ma/tests/test_mrecords.py17
-rw-r--r--numpy/ma/tests/test_old_ma.py10
-rw-r--r--numpy/ma/tests/test_regression.py7
-rw-r--r--numpy/matrixlib/__init__.py2
-rw-r--r--numpy/matrixlib/defmatrix.py6
-rw-r--r--numpy/matrixlib/setup.py2
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py16
-rw-r--r--numpy/matrixlib/tests/test_masked_matrix.py13
-rw-r--r--numpy/matrixlib/tests/test_matrix_linalg.py2
-rw-r--r--numpy/matrixlib/tests/test_multiarray.py2
-rw-r--r--numpy/matrixlib/tests/test_numeric.py2
-rw-r--r--numpy/matrixlib/tests/test_regression.py2
-rw-r--r--numpy/polynomial/__init__.py2
-rw-r--r--numpy/polynomial/_polybase.py95
-rw-r--r--numpy/polynomial/chebyshev.py34
-rw-r--r--numpy/polynomial/hermite.py22
-rw-r--r--numpy/polynomial/hermite_e.py18
-rw-r--r--numpy/polynomial/laguerre.py20
-rw-r--r--numpy/polynomial/legendre.py27
-rw-r--r--numpy/polynomial/polynomial.py32
-rw-r--r--numpy/polynomial/tests/test_chebyshev.py11
-rw-r--r--numpy/polynomial/tests/test_classes.py50
-rw-r--r--numpy/polynomial/tests/test_hermite.py11
-rw-r--r--numpy/polynomial/tests/test_hermite_e.py11
-rw-r--r--numpy/polynomial/tests/test_laguerre.py11
-rw-r--r--numpy/polynomial/tests/test_legendre.py11
-rw-r--r--numpy/polynomial/tests/test_polynomial.py11
-rw-r--r--numpy/random/__init__.py64
-rw-r--r--numpy/random/info.py138
-rw-r--r--numpy/random/mtrand/distributions.c3
-rw-r--r--numpy/random/mtrand/mtrand.pyx168
-rw-r--r--numpy/random/mtrand/randint_helpers.pxi.in6
-rw-r--r--numpy/random/mtrand/randomkit.c2
-rw-r--r--numpy/random/setup.py6
-rw-r--r--numpy/random/tests/test_random.py20
-rw-r--r--numpy/testing/__init__.py2
-rw-r--r--numpy/testing/_private/noseclasses.py2
-rw-r--r--numpy/testing/_private/nosetester.py20
-rw-r--r--numpy/testing/_private/parameterized.py2
-rw-r--r--numpy/testing/_private/utils.py49
-rw-r--r--numpy/testing/decorators.py4
-rw-r--r--numpy/testing/noseclasses.py4
-rw-r--r--numpy/testing/nosetester.py4
-rwxr-xr-xnumpy/testing/setup.py2
-rw-r--r--numpy/testing/tests/test_decorators.py4
-rw-r--r--numpy/testing/tests/test_utils.py107
-rw-r--r--numpy/testing/utils.py4
-rw-r--r--numpy/tests/test_ctypeslib.py127
-rw-r--r--numpy/tests/test_public_api.py89
-rw-r--r--numpy/tests/test_reloading.py8
-rw-r--r--numpy/tests/test_scripts.py47
-rw-r--r--pavement.py610
-rwxr-xr-xruntests.py69
-rwxr-xr-xsetup.py48
-rw-r--r--shippable.yml71
-rw-r--r--site.cfg.example14
-rw-r--r--tools/allocation_tracking/sorttable.js8
-rwxr-xr-xtools/changelog.py6
-rw-r--r--tools/swig/pyfragments.swg6
-rw-r--r--tools/test-installed-numpy.py4
-rwxr-xr-xtools/travis-before-install.sh12
-rwxr-xr-xtools/travis-test.sh75
-rw-r--r--tox.ini4
480 files changed, 25450 insertions, 11221 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index 99936267a..01440c6a0 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -20,11 +20,6 @@ environment:
NPY_NUM_BUILD_JOBS: 4
matrix:
- - PYTHON: C:\Python34-x64
- PYTHON_VERSION: 3.4
- PYTHON_ARCH: 64
- TEST_MODE: fast
-
- PYTHON: C:\Python36
PYTHON_VERSION: 3.6
PYTHON_ARCH: 32
@@ -44,11 +39,13 @@ environment:
PYTHON_VERSION: 3.6
PYTHON_ARCH: 64
TEST_MODE: full
+ INSTALL_PICKLE5: 1
- PYTHON: C:\Python37-x64
PYTHON_VERSION: 3.7
PYTHON_ARCH: 64
TEST_MODE: full
+ INSTALL_PICKLE5: 1
init:
- "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
@@ -106,6 +103,8 @@ install:
# Upgrade to the latest pip.
- 'python -m pip install -U pip setuptools wheel'
+ - if [%INSTALL_PICKLE5%]==[1] echo pickle5 >> tools/ci/appveyor/requirements.txt
+
# Install the numpy test dependencies.
- 'pip install -U --timeout 5 --retries 2 -r tools/ci/appveyor/requirements.txt'
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 906e96a83..e0fb28c99 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -20,7 +20,7 @@ jobs:
command: |
python3 -m venv venv
. venv/bin/activate
- pip install cython sphinx matplotlib
+ pip install cython sphinx==1.7.9 matplotlib
sudo apt-get update
sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
@@ -48,9 +48,8 @@ jobs:
cd doc/neps
make html
- # - store_artifacts:
- # path: doc/build/html/
- # destination: devdocs
+ - store_artifacts:
+ path: doc/build/html/
# - store_artifacts:
diff --git a/.codecov.yml b/.codecov.yml
new file mode 100644
index 000000000..cb3ee230b
--- /dev/null
+++ b/.codecov.yml
@@ -0,0 +1,29 @@
+codecov:
+ ci:
+ # we don't require appveyor or
+ # circleCI to pass to report
+ # coverage, which currently only
+ # comes from a single Python 3.6 job
+ # in Travis
+ - !appveyor
+ - !circle
+ notify:
+ # don't require all travis builds to pass;
+ # as long as the coverage job succeeds it
+ # can report the % coverage, even if another
+ # job needs a restart for whatever reason
+ - require_ci_to_pass: no
+ # we should only require a single build before
+ # reporting the % coverage because there's only
+ # one coverage job in Travis
+ - after_n_builds: 1
+coverage:
+ status:
+ project:
+ default:
+ # Require 1% coverage, i.e., always succeed
+ target: 1
+comment: off
+
+ignore:
+ - "**/setup.py"
diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..57c98060e
--- /dev/null
+++ b/.github/CODE_OF_CONDUCT.md
@@ -0,0 +1 @@
+NumPy has a Code of Conduct, please see: https://www.numpy.org/devdocs/dev/conduct/code_of_conduct.html
diff --git a/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index e70585d0c..01d9a537e 100644
--- a/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -16,12 +16,12 @@ Thanks for your interest in contributing code to numpy!
+ If this is your first time contributing to a project on GitHub, please read
through our
-[guide to contributing to numpy](http://docs.scipy.org/doc/numpy/dev/index.html)
+[guide to contributing to numpy](https://docs.scipy.org/doc/numpy/dev/index.html)
+ If you have contributed to other projects on GitHub you can go straight to our
-[development workflow](http://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html)
+[development workflow](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html)
Either way, please be sure to follow our
-[convention for commit messages](http://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message).
+[convention for commit messages](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message).
If you are writing new C code, please follow the style described in
``doc/C_STYLE_GUIDE``.
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..3a25eeb1e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,27 @@
+
+<!-- Please describe the issue in detail here, and fill in the fields below -->
+
+### Reproducing code example:
+
+<!-- A short code example that reproduces the problem/missing feature. It should be
+self-contained, i.e., possible to run as-is via 'python myproblem.py' -->
+
+```python
+import numpy as np
+<< your code here >>
+```
+
+<!-- Remove these sections for a feature request -->
+
+### Error message:
+
+<!-- If you are reporting a segfault please include a GDB traceback, which you
+can generate by following
+https://github.com/numpy/numpy/blob/master/doc/source/dev/development_environment.rst#debugging -->
+
+<!-- Full error message, if any (starting from line Traceback: ...) -->
+
+### Numpy/Python version information:
+
+<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' -->
+
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..b6da4b772
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,7 @@
+<!-- Please be sure you are following the instructions in the dev guidelines
+http://www.numpy.org/devdocs/dev/gitwash/development_workflow.html
+-->
+
+<!-- We'd appreciate it if your commit message is properly formatted
+http://www.numpy.org/devdocs/dev/gitwash/development_workflow.html#writing-the-commit-message
+-->
diff --git a/.gitignore b/.gitignore
index fbdd4f784..0a1e1909f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -99,6 +99,7 @@ Icon?
.gdb_history
ehthumbs.db
Thumbs.db
+.directory
# pytest generated files #
##########################
@@ -123,6 +124,7 @@ numpy/core/include/numpy/config.h
numpy/core/include/numpy/multiarray_api.txt
numpy/core/include/numpy/ufunc_api.txt
numpy/core/lib/
+numpy/core/src/multiarray/_multiarray_tests.c
numpy/core/src/multiarray/arraytypes.c
numpy/core/src/multiarray/einsum.c
numpy/core/src/multiarray/lowlevel_strided_loops.c
@@ -141,6 +143,10 @@ numpy/core/src/npysort/sort.c
numpy/core/src/private/npy_binsearch.h
numpy/core/src/private/npy_partition.h
numpy/core/src/private/templ_common.h
+numpy/core/src/umath/_operand_flag_tests.c
+numpy/core/src/umath/_rational_tests.c
+numpy/core/src/umath/_struct_ufunc_tests.c
+numpy/core/src/umath/_umath_tests.c
numpy/core/src/umath/scalarmath.c
numpy/core/src/umath/funcs.inc
numpy/core/src/umath/loops.[ch]
@@ -153,6 +159,7 @@ numpy/distutils/__config__.py
numpy/linalg/umath_linalg.c
doc/source/reference/generated
benchmarks/results
+benchmarks/html
benchmarks/env
benchmarks/numpy
# cythonized files
diff --git a/.lgtm.yml b/.lgtm.yml
new file mode 100644
index 000000000..c1c54ec3c
--- /dev/null
+++ b/.lgtm.yml
@@ -0,0 +1,18 @@
+path_classifiers:
+ library:
+ - tools
+ generated:
+ # The exports defined in __init__.py are defined in the Cython module
+ # np.random.mtrand. By excluding this file we suppress a number of
+ # "undefined export" alerts
+ - numpy/random/__init__.py
+
+extraction:
+ python:
+ python_setup:
+ requirements:
+ - cython>=0.29
+ cpp:
+ index:
+ build_command:
+ - python3 setup.py build
diff --git a/.mailmap b/.mailmap
index 6464e4b24..db67cfeb3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -9,6 +9,8 @@
# gives no duplicates.
Aaron Baecker <abaecker@localhost> abaecker <abaecker@localhost>
+Alan Fontenot <logeaux@yahoo.com> logeaux <logeaux@yahoo.com>
+Alan Fontenot <logeaux@yahoo.com> logeaux <36168460+logeaux@users.noreply.github.com>
Abdul Muneer <abdulmuneer@gmail.com> abdulmuneer <abdulmuneer@gmail.com>
Adam Ginsburg <adam.g.ginsburg@gmail.com> Adam Ginsburg <keflavich@gmail.com>
Albert Jornet Puig <albert.jornet@ic3.cat> jurnix <albert.jornet@ic3.cat>
@@ -19,10 +21,12 @@ Alex Griffing <argriffi@ncsu.edu> argriffing <argriffing@users.noreply.github.co
Alex Thomas <alexthomas93@users.noreply.github.com> alexthomas93 <alexthomas93@users.noreply.github.com>
Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
+Alexander Belopolsky <abalkin@enlnt.com> sasha <sasha@localhost>
Alexander Shadchin <alexandr.shadchin@gmail.com> Alexandr Shadchin <alexandr.shadchin@gmail.com>
Alexander Shadchin <alexandr.shadchin@gmail.com> shadchin <alexandr.shadchin@gmail.com>
Allan Haldane <allan.haldane@gmail.com> ahaldane <ealloc@gmail.com>
Alok Singhal <gandalf013@gmail.com> Alok Singhal <alok@merfinllc.com>
+Alyssa Quek <alyssaquek@gmail.com> alyssaq <alyssaquek@gmail.com>
Amir Sarabadani <ladsgroup@gmail.com> amir <ladsgroup@gmail.com>
Anatoly Techtonik <techtonik@gmail.com> anatoly techtonik <techtonik@gmail.com>
Andrei Kucharavy <ank@andreikucharavy.com> chiffa <ank@andreikucharavy.com>
@@ -47,10 +51,12 @@ Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@laptop.local>
Carl Kleffner <cmkleffner@gmail.com> carlkl <cmkleffner@gmail.com>
Chris Burns <chris.burns@localhost> chris.burns <chris.burns@localhost>
Chris Kerr <debdepba@dasganma.tk> Chris Kerr <cjk34@cam.ac.uk>
+Christian Clauss <cclauss@bluewin.ch> cclauss <cclauss@bluewin.ch>
Christopher Hanley <chanley@gmail.com> chanley <chanley@gmail.com>
Christoph Gohlke <cgohlke@uci.edu> cgholke <?@?>
Christoph Gohlke <cgohlke@uci.edu> cgohlke <cgohlke@uci.edu>
Christoph Gohlke <cgohlke@uci.edu> Christolph Gohlke <cgohlke@uci.edu>
+Daniel B Allan <daniel.b.allan@gmail.com> danielballan <daniel.b.allan@gmail.com>
Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <daniel@meltingwax.net>
Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <var.mail.daniel@gmail.com>
Daniel J Farrell <danieljfarrel@me.com> danieljfarrell <danieljfarrel@me.com>
@@ -61,11 +67,13 @@ David Huard <david.huard@gmail.com> dhuard <dhuard@localhost>
David M Cooke <cookedm@localhost> cookedm <cookedm@localhost>
David Nicholson <davidjn@google.com> davidjn <dnic12345@gmail.com>
David Ochoa <ochoadavid@gmail.com> ochoadavid <ochoadavid@gmail.com>
+Dawid Zych <dawid.zych@yandex.com> silenc3r <dawid.zych@yandex.com>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeier <dhomeie@gwdg.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeir <derek@astro.phsik.uni-goettingen.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homier <derek@astro.physik.uni-goettingen.de>
Derrick Williams <myutat@gmail.com> derrick <myutat@gmail.com>
Dmitriy Shalyga <zuko3d@gmail.com> zuko3d <zuko3d@gmail.com>
+Ed Schofield <edschofield@localhost> edschofield <edschofield@localhost>
Egor Zindy <ezindy@gmail.com> zindy <ezindy@gmail.com>
Endolith <endolith@gmail.com>
Eric Fode <ericfode@gmail.com> Eric Fode <ericfode@linuxlaptop.(none)>
@@ -97,15 +105,19 @@ Irvin Probst <irvin.probst@ensta-bretagne.fr> I--P <irvin.probst@ensta-bretagne.
Jaime Fernandez <jaime.frio@gmail.com> Jaime Fernandez <jaime.fernandez@hp.com>
Jaime Fernandez <jaime.frio@gmail.com> jaimefrio <jaime.frio@gmail.com>
Jaime Fernandez <jaime.frio@gmail.com> Jaime <jaime.frio@gmail.com>
+James Webber <jamestwebber@gmail.com> jamestwebber <jamestwebber@gmail.com>
Jarrod Millman <millman@berkeley.edu> Jarrod Millman <jarrod.millman@gmail.com>
Jason Grout <jason-github@creativetrax.com> Jason Grout <jason.grout@drake.edu>
Jason King <pizza@netspace.net.au> jason king <pizza@netspace.net.au>
Jay Bourque <jay.bourque@continuum.io> jayvius <jay.bourque@continuum.io>
Jean Utke <jutke@allstate.com> jutke <jutke@allstate.com>
+Jeffrey Yancey <jeffrey@octane5.com> Jeff <3820914+jeffyancey@users.noreply.github.com>
Jerome Kelleher <jerome.kelleher@ed.ac.uk> jeromekelleher <jerome.kelleher@ed.ac.uk>
Johannes Schönberger <hannesschoenberger@gmail.com> Johannes Schönberger <jschoenberger@demuc.de>
+John Darbyshire <24256554+attack68@users.noreply.github.com> attack68 <24256554+attack68@users.noreply.github.com>
Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <joseph.r.fox-rabinovitz@nasa.gov>
-Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Mad Physicist <madphysicist@users.noreply.github.com>
+Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <madphysicist@users.noreply.github.com>
+Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Mad Physicist <madphysicist@users.noreply.github.com>
Joseph Martinot-Lagarde <contrebasse@gmail.com> Joseph Martinot-Lagarde <joseph.martinot-lagarde@onera.fr>
Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <jtaylor.debian@googlemail.com>
Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <juliantaylor108@googlemail.com>
@@ -115,10 +127,12 @@ Khaled Ben Abdallah Okuda <khaled.ben.okuda@gmail.com> KhaledTo <khaled.ben.okud
Konrad Kapp <k_kapp@yahoo.com> k_kapp@yahoo.com <k_kapp@yahoo.com>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <l.buitinck@esciencecenter.nl>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <L.J.Buitinck@uva.nl>
+Lars Grüter <lagru@mailbox.org> Lars G <lagru@mailbox.org>
Luis Pedro Coelho <luis@luispedro.org> Luis Pedro Coelho <lpc@cmu.edu>
Luke Zoltan Kelley <lkelley@cfa.harvard.edu> lzkelley <lkelley@cfa.harvard.edu>
Manoj Kumar <manojkumarsivaraj334@gmail.com> MechCoder <manojkumarsivaraj334@gmail.com>
Mark DePristo <mdepristo@synapdx.com> markdepristo <mdepristo@synapdx.com>
+Mark Weissman <mw9050@gmail.com> m-d-w <mw9050@gmail.com>
Mark Wiebe <mwwiebe@gmail.com> Mark <mwwiebe@gmail.com>
Mark Wiebe <mwwiebe@gmail.com> Mark Wiebe <mwiebe@continuum.io>
Mark Wiebe <mwwiebe@gmail.com> Mark Wiebe <mwiebe@enthought.com>
@@ -134,6 +148,7 @@ Michael Droettboom <mdboom@gmail.com> mdroe <mdroe@localhost>
Michael K. Tran <trankmichael@gmail.com> mtran <trankmichael@gmail.com>
Michael Martin <mmartin4242@gmail.com> mmartin <mmartin4242@gmail.com>
Michael Schnaitter <schnaitterm@knights.ucf.edu> schnaitterm <schnaitterm@users.noreply.github.com>
+Muhammad Kasim <firman.kasim@gmail.com> mfkasim91 <firman.kasim@gmail.com>
Nathaniel J. Smith <njs@pobox.com> njsmith <njs@pobox.com>
Naveen Arunachalam <notatroll.troll@gmail.com> naveenarun <notatroll.troll@gmail.com>
Nicolas Scheffer <nicolas.scheffer@sri.com> Nicolas Scheffer <scheffer@speech.sri.com>
@@ -175,15 +190,22 @@ Stephan Hoyer <shoyer@gmail.com> Stephan Hoyer <shoyer@climate.com>
Steven J Kern <kern.steven0@gmail.com>
Thomas A Caswell <tcaswell@gmail.com> Thomas A Caswell <tcaswell@bnl.gov>
Tim Cera <tim@cerazone.net> tim cera <tcera@sjrwmd.com>
+Tim Teichmann <t.teichmann@dashdos.com> tteichmann <t.teichmann@dashdos.com>
+Tim Teichmann <t.teichmann@dashdos.com> tteichmann <44259103+tteichmann@users.noreply.github.com>
Tom Boyd <pezcore@users.noreply.github.com> pezcore <pezcore@users.noreply.github.com>
Tom Poole <t.b.poole@gmail.com> tpoole <t.b.poole@gmail.com>
Travis Oliphant <travis@continuum.io> Travis E. Oliphant <teoliphant@gmail.com>
Travis Oliphant <travis@continuum.io> Travis Oliphant <oliphant@enthought.com>
Valentin Haenel <valentin@haenel.co> Valentin Haenel <valentin.haenel@gmx.de>
Warren Weckesser <warren.weckesser@enthought.com> Warren Weckesser <warren.weckesser@gmail.com>
+Weitang Li <liwt31@163.com> wtli@Dirac <liwt31@163.com>
+Weitang Li <liwt31@163.com> wtli <liwt31@163.com>
Wendell Smith <wendellwsmith@gmail.com> Wendell Smith <wackywendell@gmail.com>
William Spotz <wfspotz@sandia.gov@localhost> wfspotz@sandia.gov <wfspotz@sandia.gov@localhost>
+Wim Glenn <wim.glenn@melbourneit.com.au> wim glenn <wim.glenn@melbourneit.com.au>
Wojtek Ruszczewski <git@wr.waw.pl> wrwrwr <git@wr.waw.pl>
+Yuji Kanagawa <yuji.kngw.80s.revive@gmail.com> kngwyu <yuji.kngw.80s.revive@gmail.com>
Zixu Zhao <zixu.zhao.tireless@gmail.com> ZZhaoTireless <zixu.zhao.tireless@gmail.com>
Ziyan Zhou<ziyan.zhou@mujin.co.jp> Ziyan <ziyan.zhou@mujin.co.jp>
-luzpaz <luzpaz@users.noreply.github.com> luz.paz <luzpaz@users.noreply.github.com>
+luzpaz <kunda@scribus.net> luz.paz <luzpaz@users.noreply.github.com>
+luzpaz <kunda@scribus.net> luzpaz <luzpaz@users.noreply.github.com>
diff --git a/.travis.yml b/.travis.yml
index 119e4f24b..491fcefea 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,7 @@
# After changing this file, check it on:
# http://lint.travis-ci.org/
language: python
-
+group: travis_latest
# Run jobs on container-based infrastructure, can be overridden per job
sudo: false
@@ -32,7 +32,6 @@ env:
python:
- 2.7
- - 3.4
- 3.5
- 3.6
matrix:
@@ -40,8 +39,9 @@ matrix:
- python: 3.7
dist: xenial # Required for Python 3.7
sudo: true # travis-ci/travis-ci#9069
+ env: INSTALL_PICKLE5=1
- python: 3.6
- env: USE_CHROOT=1 ARCH=i386 DIST=bionic PYTHON=3.6
+ env: USE_CHROOT=1 ARCH=i386 DIST=bionic
sudo: true
addons:
apt:
@@ -49,7 +49,9 @@ matrix:
packages:
- dpkg
- debootstrap
- - python: 3.4
+ - python: 3.5
+ dist: xenial # Required for python3.5-dbg
+ sudo: true # travis-ci/travis-ci#9069
env: USE_DEBUG=1
addons:
apt:
@@ -60,7 +62,7 @@ matrix:
- python3-dev
- python3-setuptools
- python: 3.6
- env: USE_WHEEL=1 RUN_FULL_TESTS=1
+ env: USE_WHEEL=1 RUN_FULL_TESTS=1 RUN_COVERAGE=1 INSTALL_PICKLE5=1
- python: 2.7
env: USE_WHEEL=1 RUN_FULL_TESTS=1 PYTHON_OPTS="-3 -OO"
- python: 3.6
@@ -78,6 +80,9 @@ matrix:
- BLAS=None
- LAPACK=None
- ATLAS=None
+ - python: 3.6
+ env:
+ - NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1
before_install:
- ./tools/travis-before-install.sh
diff --git a/LICENSE.txt b/LICENSE.txt
index 0065d465f..b9731f734 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2005-2017, NumPy Developers.
+Copyright (c) 2005-2019, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,7 @@ License: 2-clause BSD
Name: scipy-sphinx-theme
Files: doc/scipy-sphinx-theme/*
License: 3-clause BSD, PSF and Apache 2.0
- For details, see doc/sphinxext/LICENSE.txt
+ For details, see doc/scipy-sphinx-theme/LICENSE.txt
Name: lapack-lite
Files: numpy/linalg/lapack_lite/*
diff --git a/MANIFEST.in b/MANIFEST.in
index eff19e20a..e15e0e58a 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,8 @@
#
# Use .add_data_files and .add_data_dir methods in a appropriate
# setup.py files to include non-python files such as documentation,
-# data, etc files to distribution. Avoid using MANIFEST.in for that.
+# data, etc files to distribution (*for installation*).
+# Avoid using MANIFEST.in for that.
#
include MANIFEST.in
include pytest.ini
@@ -12,21 +13,27 @@ recursive-include numpy/random/mtrand *.pyx *.pxd
# Add build support that should go in sdist, but not go in bdist/be installed
recursive-include numpy/_build_utils *
recursive-include numpy/linalg/lapack_lite *.c *.h
-include tox.ini
+include runtests.py
+include tox.ini pytest.ini .coveragerc
+recursive-include tools *
# Add sdist files whose use depends on local configuration.
-include numpy/core/src/multiarray/cblasfuncs.c
-include numpy/core/src/multiarray/python_xerbla.c
+include numpy/core/src/common/cblasfuncs.c
+include numpy/core/src/common/python_xerbla.c
# Adding scons build related files not found by distutils
recursive-include numpy/core/code_generators *.py *.txt
recursive-include numpy/core *.in *.h
-# Add documentation: we don't use add_data_dir since we do not want to include
-# this at installation, only for sdist-generated tarballs
-include doc/Makefile doc/postprocess.py
-recursive-include doc/release *
-recursive-include doc/source *
-recursive-include doc/sphinxext *
-recursive-include tools/allocation_tracking *
-recursive-include tools/swig *
-recursive-include doc/scipy-sphinx-theme *
-
+# Add documentation and benchmarks: we don't use add_data_dir since we do not
+# want to include this at installation, only for sdist-generated tarballs
+# Docs:
+recursive-include doc *
+prune doc/build
+prune doc/source/generated
+# Benchmarks:
+recursive-include benchmarks *
+prune benchmarks/env
+prune benchmarks/results
+prune benchmarks/html
+prune benchmarks/numpy
+# Exclude generated files
+prune */__pycache__
global-exclude *.pyc *.pyo *.pyd *.swp *.bak *~
diff --git a/README.md b/README.md
index cd11b7bc5..a7fd4be4b 100644
--- a/README.md
+++ b/README.md
@@ -1,11 +1,17 @@
# <img alt="NumPy" src="https://cdn.rawgit.com/numpy/numpy/master/branding/icons/numpylogo.svg" height="60">
-[![Travis](https://img.shields.io/travis/numpy/numpy/master.svg?label=Travis%20CI)](https://travis-ci.org/numpy/numpy)
-[![AppVeyor](https://img.shields.io/appveyor/ci/charris/numpy/master.svg?label=AppVeyor)](https://ci.appveyor.com/project/charris/numpy)
+[![Travis](https://img.shields.io/travis/numpy/numpy/master.svg?label=Travis%20CI)](
+ https://travis-ci.org/numpy/numpy)
+[![AppVeyor](https://img.shields.io/appveyor/ci/charris/numpy/master.svg?label=AppVeyor)](
+ https://ci.appveyor.com/project/charris/numpy)
+[![Azure](https://dev.azure.com/numpy/numpy/_apis/build/status/azure-pipeline%20numpy.numpy)](
+ https://dev.azure.com/numpy/numpy/_build/latest?definitionId=5)
+[![codecov](https://codecov.io/gh/numpy/numpy/branch/master/graph/badge.svg)](
+ https://codecov.io/gh/numpy/numpy)
NumPy is the fundamental package needed for scientific computing with Python.
-- **Website (including documentation):** http://www.numpy.org
+- **Website (including documentation):** https://www.numpy.org
- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion
- **Source:** https://github.com/numpy/numpy
- **Bug reports:** https://github.com/numpy/numpy/issues
@@ -17,7 +23,12 @@ It provides:
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
-If ``nose`` is installed, tests can be run after installation with:
+Testing:
+
+- NumPy versions &ge; 1.15 require `pytest`
+- NumPy versions &lt; 1.15 require `nose`
+
+Tests can then be run after installation with:
python -c 'import numpy; numpy.test()'
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index c99b56e37..7a98bedd6 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,4 +1,31 @@
+trigger:
+ # start a new build for every push
+ batch: False
+ branches:
+ include:
+ - master
+ - maintenance/*
jobs:
+- job: Linux_Python_36_32bit_full_with_asserts
+ pool:
+ vmIMage: 'ubuntu-16.04'
+ steps:
+ - script: |
+ docker pull i386/ubuntu:bionic
+ docker run -v $(pwd):/numpy i386/ubuntu:bionic /bin/bash -c "cd numpy && \
+ apt-get -y update && \
+ apt-get -y install python3.6-dev python3-pip locales && \
+ locale-gen fr_FR && update-locale && \
+ pip3 install setuptools nose cython==0.29.0 pytest pytz pickle5 && \
+ apt-get -y install libopenblas-dev gfortran && \
+ NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1 \
+ F77=gfortran-5 F90=gfortran-5 CFLAGS=-UNDEBUG \
+ python3 runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml"
+ displayName: 'Run 32-bit Ubuntu Docker Build / Tests'
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ testRunTitle: 'Publish test results for Python 3.6-32 bit'
- job: macOS
pool:
# NOTE: at time of writing, there is a danger
@@ -26,12 +53,14 @@ jobs:
# two C compilers, but with homebrew looks like we're
# now stuck getting the full gcc toolchain instead of
# just pulling in gfortran
- - script: brew install gcc
+ - script: HOMEBREW_NO_AUTO_UPDATE=1 brew install gcc
displayName: 'make gfortran available on mac os vm'
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest
+ - script: python -m pip install cython nose pytz pytest pickle5 vulture
displayName: 'Install dependencies; some are optional to avoid test skips'
+ - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'"
+ displayName: 'Check for unreachable code paths in Python modules'
# NOTE: init_dgelsd failed init issue with current ACCELERATE /
# LAPACK configuration on Azure macos image; at the time of writing
# this plagues homebrew / macports NumPy builds, but we will
@@ -57,6 +86,11 @@ jobs:
- job: Windows
pool:
vmIMage: 'VS2017-Win2016'
+ variables:
+ # openblas URLs from numpy-wheels
+ # appveyor / Windows config
+ OPENBLAS_32: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.3-186-g701ea883-win32-gcc_7_1_0.zip"
+ OPENBLAS_64: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.3-186-g701ea883-win_amd64-gcc_7_1_0.zip"
strategy:
maxParallel: 6
matrix:
@@ -64,26 +98,40 @@ jobs:
PYTHON_VERSION: '3.6'
PYTHON_ARCH: 'x86'
TEST_MODE: fast
+ OPENBLAS: $(OPENBLAS_32)
+ BITS: 32
Python37-32bit-fast:
PYTHON_VERSION: '3.7'
PYTHON_ARCH: 'x86'
TEST_MODE: fast
+ OPENBLAS: $(OPENBLAS_32)
+ BITS: 32
Python27-64bit-fast:
PYTHON_VERSION: '2.7'
PYTHON_ARCH: 'x64'
TEST_MODE: fast
+ OPENBLAS: $(OPENBLAS_64)
+ BITS: 64
Python35-64bit-full:
PYTHON_VERSION: '3.5'
PYTHON_ARCH: 'x64'
TEST_MODE: full
+ OPENBLAS: $(OPENBLAS_64)
+ BITS: 64
Python36-64bit-full:
PYTHON_VERSION: '3.6'
PYTHON_ARCH: 'x64'
TEST_MODE: full
+ INSTALL_PICKLE5: 1
+ OPENBLAS: $(OPENBLAS_64)
+ BITS: 64
Python37-64bit-full:
PYTHON_VERSION: '3.7'
PYTHON_ARCH: 'x64'
TEST_MODE: full
+ INSTALL_PICKLE5: 1
+ OPENBLAS: $(OPENBLAS_64)
+ BITS: 64
steps:
- task: UsePythonVersion@0
inputs:
@@ -92,20 +140,51 @@ jobs:
architecture: $(PYTHON_ARCH)
# as noted by numba project, currently need
# specific VC install for Python 2.7
- # NOTE: had some issues splitting powershell
- # command into bits and / or using condition
- # directive, so squeezing operation to a single
- # line for now
- - powershell: if ($env:PYTHON_VERSION -eq 2.7) {$wc = New-Object net.webclient; $wc.Downloadfile("https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi", "VCForPython27.msi"); Start-Process "VCForPython27.msi" /qn -Wait}
+ - powershell: |
+ $wc = New-Object net.webclient
+ $wc.Downloadfile("https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi", "VCForPython27.msi")
+ Start-Process "VCForPython27.msi" /qn -Wait
displayName: 'Install VC 9.0'
+ condition: eq(variables['PYTHON_VERSION'], '2.7')
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
+ - powershell: |
+ $wc = New-Object net.webclient
+ $wc.Downloadfile("$(OPENBLAS)", "openblas.zip")
+ $tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ }
+ Expand-Archive "openblas.zip" $tmpdir
+ $pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])"
+ Write-Host "Python Version: $pyversion"
+ $target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a"
+ Write-Host "target path: $target"
+ cp $tmpdir\$(BITS)\lib\libopenblas_v0.3.3-186-g701ea883-gcc_7_1_0.a $target
+ displayName: 'Download / Install OpenBLAS'
+ - powershell: |
+ choco install -y mingw --forcex86 --force
+ displayName: 'Install 32-bit mingw for 32-bit builds'
+ condition: eq(variables['BITS'], 32)
- script: python -m pip install cython nose pytz pytest
displayName: 'Install dependencies; some are optional to avoid test skips'
# NOTE: for Windows builds it seems much more tractable to use runtests.py
# vs. manual setup.py and then runtests.py for testing only
- - script: python runtests.py --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml
- displayName: 'Build NumPy & Run Full NumPy Test Suite'
+ - script: if [%INSTALL_PICKLE5%]==[1] python -m pip install pickle5
+ displayName: 'Install optional pickle5 backport (only for python3.6 and 3.7)'
+ - powershell: |
+ If ($(BITS) -eq 32) {
+ $env:NPY_DISTUTILS_APPEND_FLAGS = 1
+ $env:CFLAGS = "-m32"
+ $env:LDFLAGS = "-m32"
+ $env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH
+ refreshenv
+ }
+ pip wheel -v -v -v --wheel-dir=dist .
+
+ ls dist -r | Foreach-Object {
+ pip install $_.FullName
+ }
+ displayName: 'Build NumPy'
+ - script: python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml
+ displayName: 'Run NumPy Test Suite'
- task: PublishTestResults@2
inputs:
testResultsFiles: '**/test-*.xml'
diff --git a/benchmarks/README.rst b/benchmarks/README.rst
index f4f0b0de9..b67994ce0 100644
--- a/benchmarks/README.rst
+++ b/benchmarks/README.rst
@@ -60,3 +60,11 @@ Some things to consider:
- Preparing arrays etc. should generally be put in the ``setup`` method rather
than the ``time_`` methods, to avoid counting preparation time together with
the time of the benchmarked operation.
+
+- Be mindful that large arrays created with ``np.empty`` or ``np.zeros`` might
+ not be allocated in physical memory until the memory is accessed. If this is
+ desired behaviour, make sure to comment it in your setup function. If
+ you are benchmarking an algorithm, it is unlikely that a user will be
+ executing said algorithm on a newly created empty/zero array. One can force
+ pagefaults to occur in the setup phase either by calling ``np.ones`` or
+ ``arr.fill(value)`` after creating the array,
diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json
index d837b0d67..45da9533f 100644
--- a/benchmarks/asv.conf.json
+++ b/benchmarks/asv.conf.json
@@ -7,7 +7,7 @@
"project": "numpy",
// The project's homepage
- "project_url": "http://numpy.org/",
+ "project_url": "https://www.numpy.org/",
// The URL or local path of the source code repository for the
// project being benchmarked
@@ -35,7 +35,7 @@
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
- "pythons": ["2.7"],
+ "pythons": ["3.6"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py
index a45525793..9ef03262b 100644
--- a/benchmarks/benchmarks/bench_function_base.py
+++ b/benchmarks/benchmarks/bench_function_base.py
@@ -105,14 +105,6 @@ class Sort(Benchmark):
self.equal = np.ones(10000)
self.many_equal = np.sort(np.arange(10000) % 10)
- # quicksort median of 3 worst case
- self.worst = np.arange(1000000)
- x = self.worst
- while x.size > 3:
- mid = x.size // 2
- x[mid], x[-2] = x[-2], x[mid]
- x = x[:-2]
-
def time_sort(self):
np.sort(self.e)
@@ -128,9 +120,6 @@ class Sort(Benchmark):
def time_sort_many_equal(self):
self.many_equal.sort()
- def time_sort_worst(self):
- np.sort(self.worst)
-
def time_argsort(self):
self.e.argsort()
@@ -138,6 +127,23 @@ class Sort(Benchmark):
self.o.argsort()
+class SortWorst(Benchmark):
+ def setup(self):
+ # quicksort median of 3 worst case
+ self.worst = np.arange(1000000)
+ x = self.worst
+ while x.size > 3:
+ mid = x.size // 2
+ x[mid], x[-2] = x[-2], x[mid]
+ x = x[:-2]
+
+ def time_sort_worst(self):
+ np.sort(self.worst)
+
+ # Retain old benchmark name for backward compatability
+ time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
+
+
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py
index 782d4ab30..879f9b69e 100644
--- a/benchmarks/benchmarks/bench_io.py
+++ b/benchmarks/benchmarks/bench_io.py
@@ -3,6 +3,7 @@ from __future__ import absolute_import, division, print_function
from .common import Benchmark, get_squares
import numpy as np
+from io import StringIO
class Copy(Benchmark):
@@ -20,6 +21,10 @@ class Copy(Benchmark):
def time_memcpy(self, typename):
self.d[...] = self.e_d
+ def time_memcpy_large_out_of_place(self, typename):
+ l = np.ones(1024**2, dtype=np.dtype(typename))
+ l.copy()
+
def time_cont_assign(self, typename):
self.d[...] = 1
@@ -62,3 +67,179 @@ class Savez(Benchmark):
def time_vb_savez_squares(self):
np.savez('tmp.npz', self.squares)
+
+class LoadtxtCSVComments(Benchmark):
+ # benchmarks for np.loadtxt comment handling
+ # when reading in CSV files
+
+ params = [10, int(1e2), int(1e4), int(1e5)]
+ param_names = ['num_lines']
+
+ def setup(self, num_lines):
+ data = [u'1,2,3 # comment'] * num_lines
+ # unfortunately, timeit will only run setup()
+ # between repeat events, but not for iterations
+ # within repeats, so the StringIO object
+ # will have to be rewinded in the benchmark proper
+ self.data_comments = StringIO(u'\n'.join(data))
+
+ def time_comment_loadtxt_csv(self, num_lines):
+ # benchmark handling of lines with comments
+ # when loading in from csv files
+
+ # inspired by similar benchmark in pandas
+ # for read_csv
+
+ # need to rewind StringIO object (unfortunately
+ # confounding timing result somewhat) for every
+ # call to timing test proper
+ np.loadtxt(self.data_comments,
+ delimiter=u',')
+ self.data_comments.seek(0)
+
+class LoadtxtCSVdtypes(Benchmark):
+ # benchmarks for np.loadtxt operating with
+ # different dtypes parsed / cast from CSV files
+
+ params = (['float32', 'float64', 'int32', 'int64',
+ 'complex128', 'str', 'object'],
+ [10, int(1e2), int(1e4), int(1e5)])
+ param_names = ['dtype', 'num_lines']
+
+ def setup(self, dtype, num_lines):
+ data = [u'5, 7, 888'] * num_lines
+ self.csv_data = StringIO(u'\n'.join(data))
+
+ def time_loadtxt_dtypes_csv(self, dtype, num_lines):
+ # benchmark loading arrays of various dtypes
+ # from csv files
+
+ # state-dependent timing benchmark requires
+ # rewind of StringIO object
+
+ np.loadtxt(self.csv_data,
+ delimiter=u',',
+ dtype=dtype)
+ self.csv_data.seek(0)
+
+class LoadtxtCSVStructured(Benchmark):
+ # benchmarks for np.loadtxt operating with
+ # a structured data type & CSV file
+
+ def setup(self):
+ num_lines = 50000
+ data = [u"M, 21, 72, X, 155"] * num_lines
+ self.csv_data = StringIO(u'\n'.join(data))
+
+ def time_loadtxt_csv_struct_dtype(self):
+ # obligate rewind of StringIO object
+ # between iterations of a repeat:
+
+ np.loadtxt(self.csv_data,
+ delimiter=u',',
+ dtype=[('category_1', 'S1'),
+ ('category_2', 'i4'),
+ ('category_3', 'f8'),
+ ('category_4', 'S1'),
+ ('category_5', 'f8')])
+ self.csv_data.seek(0)
+
+
+class LoadtxtCSVSkipRows(Benchmark):
+ # benchmarks for loadtxt row skipping when
+ # reading in csv file data; a similar benchmark
+ # is present in the pandas asv suite
+
+ params = [0, 500, 10000]
+ param_names = ['skiprows']
+
+ def setup(self, skiprows):
+ np.random.seed(123)
+ test_array = np.random.rand(100000, 3)
+ self.fname = 'test_array.csv'
+ np.savetxt(fname=self.fname,
+ X=test_array,
+ delimiter=',')
+
+ def time_skiprows_csv(self, skiprows):
+ np.loadtxt(self.fname,
+ delimiter=',',
+ skiprows=skiprows)
+
+class LoadtxtReadUint64Integers(Benchmark):
+ # pandas has a similar CSV reading benchmark
+ # modified to suit np.loadtxt
+
+ params = [550, 1000, 10000]
+ param_names = ['size']
+
+ def setup(self, size):
+ arr = np.arange(size).astype('uint64') + 2**63
+ self.data1 = StringIO(u'\n'.join(arr.astype(str).tolist()))
+ arr = arr.astype(object)
+ arr[500] = -1
+ self.data2 = StringIO(u'\n'.join(arr.astype(str).tolist()))
+
+ def time_read_uint64(self, size):
+ # mandatory rewind of StringIO object
+ # between iterations of a repeat:
+ np.loadtxt(self.data1)
+ self.data1.seek(0)
+
+ def time_read_uint64_neg_values(self, size):
+ # mandatory rewind of StringIO object
+ # between iterations of a repeat:
+ np.loadtxt(self.data2)
+ self.data2.seek(0)
+
+class LoadtxtUseColsCSV(Benchmark):
+ # benchmark selective column reading from CSV files
+ # using np.loadtxt
+
+ params = [2, [1, 3], [1, 3, 5, 7]]
+ param_names = ['usecols']
+
+ def setup(self, usecols):
+ num_lines = 5000
+ data = [u'0, 1, 2, 3, 4, 5, 6, 7, 8, 9'] * num_lines
+ self.csv_data = StringIO(u'\n'.join(data))
+
+ def time_loadtxt_usecols_csv(self, usecols):
+ # must rewind StringIO because of state
+ # dependence of file reading
+ np.loadtxt(self.csv_data,
+ delimiter=u',',
+ usecols=usecols)
+ self.csv_data.seek(0)
+
+class LoadtxtCSVDateTime(Benchmark):
+ # benchmarks for np.loadtxt operating with
+ # datetime data in a CSV file
+
+ params = [20, 200, 2000, 20000]
+ param_names = ['num_lines']
+
+ def setup(self, num_lines):
+ # create the equivalent of a two-column CSV file
+ # with date strings in the first column and random
+ # floating point data in the second column
+ dates = np.arange('today', 20, dtype=np.datetime64)
+ np.random.seed(123)
+ values = np.random.rand(20)
+ date_line = u''
+
+ for date, value in zip(dates, values):
+ date_line += (str(date) + ',' + str(value) + '\n')
+
+ # expand data to specified number of lines
+ data = date_line * (num_lines // 20)
+ self.csv_data = StringIO(data)
+
+ def time_loadtxt_csv_datetime(self, num_lines):
+ # rewind StringIO object -- the timing iterations
+ # are state-dependent
+ X = np.loadtxt(self.csv_data,
+ delimiter=u',',
+ dtype=([('dates', 'M8[us]'),
+ ('values', 'float64')]))
+ self.csv_data.seek(0)
diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py
index 83f26c9d1..e6c91a27c 100644
--- a/benchmarks/benchmarks/bench_lib.py
+++ b/benchmarks/benchmarks/bench_lib.py
@@ -19,7 +19,10 @@ class Pad(Benchmark):
]
def setup(self, shape, pad_width, mode):
- self.array = np.empty(shape)
+ # avoid np.zeros or np.empty's lazy allocation.
+ # np.full causes pagefaults to occur during setup
+ # instead of during the benchmark
+ self.array = np.full(shape, 0)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
diff --git a/benchmarks/benchmarks/bench_ma.py b/benchmarks/benchmarks/bench_ma.py
index d313f01dc..aff78df0a 100644
--- a/benchmarks/benchmarks/bench_ma.py
+++ b/benchmarks/benchmarks/bench_ma.py
@@ -89,7 +89,9 @@ class Concatenate(Benchmark):
]
def setup(self, mode, n):
- normal = np.zeros((n, n), int)
+ # avoid np.zeros's lazy allocation that cause page faults during benchmark.
+ # np.fill will cause pagefaults to happen during setup.
+ normal = np.full((n, n), 0, int)
unmasked = np.ma.zeros((n, n), int)
masked = np.ma.array(normal, mask=True)
diff --git a/benchmarks/benchmarks/bench_overrides.py b/benchmarks/benchmarks/bench_overrides.py
new file mode 100644
index 000000000..155d44fa9
--- /dev/null
+++ b/benchmarks/benchmarks/bench_overrides.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+try:
+ from numpy.core.overrides import array_function_dispatch
+except ImportError:
+ # Don't fail at import time with old Numpy versions
+ def array_function_dispatch(*args, **kwargs):
+ def wrap(*args, **kwargs):
+ return None
+ return wrap
+
+import numpy as np
+
+
+def _broadcast_to_dispatcher(array, shape, subok=None):
+ return (array,)
+
+
+@array_function_dispatch(_broadcast_to_dispatcher)
+def mock_broadcast_to(array, shape, subok=False):
+ pass
+
+
+def _concatenate_dispatcher(arrays, axis=None, out=None):
+ for array in arrays:
+ yield array
+ if out is not None:
+ yield out
+
+
+@array_function_dispatch(_concatenate_dispatcher)
+def mock_concatenate(arrays, axis=0, out=None):
+ pass
+
+
+class DuckArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ pass
+
+
+class ArrayFunction(Benchmark):
+
+ def setup(self):
+ self.numpy_array = np.array(1)
+ self.numpy_arrays = [np.array(1), np.array(2)]
+ self.many_arrays = 500 * self.numpy_arrays
+ self.duck_array = DuckArray()
+ self.duck_arrays = [DuckArray(), DuckArray()]
+ self.mixed_arrays = [np.array(1), DuckArray()]
+
+ def time_mock_broadcast_to_numpy(self):
+ mock_broadcast_to(self.numpy_array, ())
+
+ def time_mock_broadcast_to_duck(self):
+ mock_broadcast_to(self.duck_array, ())
+
+ def time_mock_concatenate_numpy(self):
+ mock_concatenate(self.numpy_arrays, axis=0)
+
+ def time_mock_concatenate_many(self):
+ mock_concatenate(self.many_arrays, axis=0)
+
+ def time_mock_concatenate_duck(self):
+ mock_concatenate(self.duck_arrays, axis=0)
+
+ def time_mock_concatenate_mixed(self):
+ mock_concatenate(self.mixed_arrays, axis=0)
diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py
index 353eb980c..ffc148cd2 100644
--- a/benchmarks/benchmarks/bench_reduce.py
+++ b/benchmarks/benchmarks/bench_reduce.py
@@ -29,8 +29,10 @@ class AddReduceSeparate(Benchmark):
class AnyAll(Benchmark):
def setup(self):
- self.zeros = np.zeros(100000, bool)
- self.ones = np.ones(100000, bool)
+ # avoid np.zeros's lazy allocation that would
+ # cause page faults during benchmark
+ self.zeros = np.full(100000, 0, bool)
+ self.ones = np.full(100000, 0, bool)
def time_all_fast(self):
self.zeros.all()
diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py
index 9d0f0ae04..187b923cd 100644
--- a/benchmarks/benchmarks/bench_shape_base.py
+++ b/benchmarks/benchmarks/bench_shape_base.py
@@ -23,7 +23,9 @@ class Block(Benchmark):
self.four_1d = np.ones(6 * n)
self.five_0d = np.ones(1 * n)
self.six_1d = np.ones(5 * n)
- self.zero_2d = np.zeros((2 * n, 6 * n))
+ # avoid np.zeros's lazy allocation that might cause
+ # page faults during benchmark
+ self.zero_2d = np.full((2 * n, 6 * n), 0)
self.one = np.ones(3 * n)
self.two = 2 * np.ones((3, 3 * n))
@@ -31,19 +33,9 @@ class Block(Benchmark):
self.four = 4 * np.ones(3 * n)
self.five = 5 * np.ones(1 * n)
self.six = 6 * np.ones(5 * n)
- self.zero = np.zeros((2 * n, 6 * n))
-
- self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1
-
- self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2
- self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3
- self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4
-
- self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5
- self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6
- self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7
-
- self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8
+ # avoid np.zeros's lazy allocation that might cause
+ # page faults during benchmark
+ self.zero = np.full((2 * n, 6 * n), 0)
def time_block_simple_row_wise(self, n):
np.block([self.a_2d, self.b_2d])
@@ -72,8 +64,56 @@ class Block(Benchmark):
[self.zero]
])
- def time_3d(self, n):
- np.block([
+ def time_no_lists(self, n):
+ np.block(1)
+ np.block(np.eye(3 * n))
+
+
+class Block2D(Benchmark):
+ params = [[(16, 16), (32, 32), (64, 64), (128, 128), (256, 256), (512, 512), (1024, 1024)],
+ ['uint8', 'uint16', 'uint32', 'uint64'],
+ [(2, 2), (4, 4)]]
+ param_names = ['shape', 'dtype', 'n_chunks']
+
+ def setup(self, shape, dtype, n_chunks):
+
+ self.block_list = [
+ [np.full(shape=[s//n_chunk for s, n_chunk in zip(shape, n_chunks)],
+ fill_value=1, dtype=dtype) for _ in range(n_chunks[1])]
+ for _ in range(n_chunks[0])
+ ]
+
+ def time_block2d(self, shape, dtype, n_chunks):
+ np.block(self.block_list)
+
+
+class Block3D(Benchmark):
+ """This benchmark concatenates an array of size ``(5n)^3``"""
+ # Having copy as a `mode` of the block3D
+ # allows us to directly compare the benchmark of block
+ # to that of a direct memory copy into new buffers with
+ # the ASV framework.
+ # block and copy will be plotted on the same graph
+ # as opposed to being displayed as separate benchmarks
+ params = [[1, 10, 100],
+ ['block', 'copy']]
+ param_names = ['n', 'mode']
+
+ def setup(self, n, mode):
+ # Slow setup method: hence separated from the others above
+ self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1
+
+ self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2
+ self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3
+ self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4
+
+ self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5
+ self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6
+ self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7
+
+ self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8
+
+ self.block = [
[
[self.a000, self.a001],
[self.a010, self.a011],
@@ -82,8 +122,17 @@ class Block(Benchmark):
[self.a100, self.a101],
[self.a110, self.a111],
]
- ])
-
- def time_no_lists(self, n):
- np.block(1)
- np.block(np.eye(3 * n))
+ ]
+ self.arr_list = [a
+ for two_d in self.block
+ for one_d in two_d
+ for a in one_d]
+
+ def time_3d(self, n, mode):
+ if mode == 'block':
+ np.block(self.block)
+ else: # mode == 'copy'
+ [arr.copy() for arr in self.arr_list]
+
+ # Retain old benchmark name for backward compat
+ time_3d.benchmark_name = "bench_shape_base.Block.time_3d"
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index eb9c3cf3b..a7e385f70 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -150,3 +150,62 @@ class Scalar(Benchmark):
def time_add_scalar_conv_complex(self):
(self.y + self.z)
+
+
+class ArgPack(object):
+ __slots__ = ['args', 'kwargs']
+ def __init__(self, *args, **kwargs):
+ self.args = args
+ self.kwargs = kwargs
+ def __repr__(self):
+ return '({})'.format(', '.join(
+ [repr(a) for a in self.args] +
+ ['{}={}'.format(k, repr(v)) for k, v in self.kwargs.items()]
+ ))
+
+
+class ArgParsing(Benchmark):
+ # In order to benchmark the speed of argument parsing, all but the
+ # out arguments are chosen such that they have no effect on the
+ # calculation. In particular, subok=True and where=True are
+ # defaults, and the dtype is the correct one (the latter will
+ # still have some effect on the search for the correct inner loop).
+ x = np.array(1.)
+ y = np.array(2.)
+ out = np.array(3.)
+ param_names = ['arg_kwarg']
+ params = [[
+ ArgPack(x, y),
+ ArgPack(x, y, out),
+ ArgPack(x, y, out=out),
+ ArgPack(x, y, out=(out,)),
+ ArgPack(x, y, out=out, subok=True, where=True),
+ ArgPack(x, y, subok=True),
+ ArgPack(x, y, subok=True, where=True),
+ ArgPack(x, y, out, subok=True, where=True)
+ ]]
+
+ def time_add_arg_parsing(self, arg_pack):
+ np.add(*arg_pack.args, **arg_pack.kwargs)
+
+
+class ArgParsingReduce(Benchmark):
+ # In order to benchmark the speed of argument parsing, all but the
+ # out arguments are chosen such that they have minimal effect on the
+ # calculation.
+ a = np.arange(2.)
+ out = np.array(0.)
+ param_names = ['arg_kwarg']
+ params = [[
+ ArgPack(a,),
+ ArgPack(a, 0),
+ ArgPack(a, axis=0),
+ ArgPack(a, 0, None),
+ ArgPack(a, axis=0, dtype=None),
+ ArgPack(a, 0, None, out),
+ ArgPack(a, axis=0, dtype=None, out=out),
+ ArgPack(a, out=out)
+ ]]
+
+ def time_add_reduce_arg_parsing(self, arg_pack):
+ np.add.reduce(*arg_pack.args, **arg_pack.kwargs)
diff --git a/doc/CAPI.rst.txt b/doc/CAPI.rst.txt
deleted file mode 100644
index f38815e2a..000000000
--- a/doc/CAPI.rst.txt
+++ /dev/null
@@ -1,320 +0,0 @@
-===============
-C-API for NumPy
-===============
-
-:Author: Travis Oliphant
-:Discussions to: `numpy-discussion@python.org`__
-:Created: October 2005
-
-__ http://scipy.org/scipylib/mailing-lists.html
-
-The C API of NumPy is (mostly) backward compatible with Numeric.
-
-There are a few non-standard Numeric usages (that were not really part
-of the API) that will need to be changed:
-
-* If you used any of the function pointers in the ``PyArray_Descr``
- structure you will have to modify your usage of those. First,
- the pointers are all under the member named ``f``. So ``descr->cast``
- is now ``descr->f->cast``. In addition, the
- casting functions have eliminated the strides argument (use
- ``PyArray_CastTo`` if you need strided casting). All functions have
- one or two ``PyArrayObject *`` arguments at the end. This allows the
- flexible arrays and mis-behaved arrays to be handled.
-
-* The ``descr->zero`` and ``descr->one`` constants have been replaced with
- function calls, ``PyArray_Zero``, and ``PyArray_One`` (be sure to read the
- code and free the resulting memory if you use these calls).
-
-* If you passed ``array->dimensions`` and ``array->strides`` around
- to functions, you will need to fix some code. These are now
- ``npy_intp*`` pointers. On 32-bit systems there won't be a problem.
- However, on 64-bit systems, you will need to make changes to avoid
- errors and segfaults.
-
-
-The header files ``arrayobject.h`` and ``ufuncobject.h`` contain many defines
-that you may find useful. The files ``__ufunc_api.h`` and
-``__multiarray_api.h`` contain the available C-API function calls with
-their function signatures.
-
-All of these headers are installed to
-``<YOUR_PYTHON_LOCATION>/site-packages/numpy/core/include``
-
-
-Getting arrays in C-code
-=========================
-
-All new arrays can be created using ``PyArray_NewFromDescr``. A simple interface
-equivalent to ``PyArray_FromDims`` is ``PyArray_SimpleNew(nd, dims, typenum)``
-and to ``PyArray_FromDimsAndData`` is
-``PyArray_SimpleNewFromData(nd, dims, typenum, data)``.
-
-This is a very flexible function.
-
-::
-
- PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr,
- int nd, npy_intp *dims,
- npy_intp *strides, char *data,
- int flags, PyObject *obj);
-
-``subtype`` : ``PyTypeObject *``
- The subtype that should be created (either pass in
- ``&PyArray_Type``, or ``obj->ob_type``,
- where ``obj`` is an instance of a subtype (or subclass) of
- ``PyArray_Type``).
-
-``descr`` : ``PyArray_Descr *``
- The type descriptor for the array. This is a Python object (this
- function steals a reference to it). The easiest way to get one is
- using ``PyArray_DescrFromType(<typenum>)``. If you want to use a
- flexible size array, then you need to use
- ``PyArray_DescrNewFromType(<flexible typenum>)`` and set its ``elsize``
- parameter to the desired size. The typenum in both of these cases
- is one of the ``PyArray_XXXX`` enumerated types.
-
-``nd`` : ``int``
- The number of dimensions (<``MAX_DIMS``)
-
-``*dims`` : ``npy_intp *``
- A pointer to the size in each dimension. Information will be
- copied from here.
-
-``*strides`` : ``npy_intp *``
- The strides this array should have. For new arrays created by this
- routine, this should be ``NULL``. If you pass in memory for this array
- to use, then you can pass in the strides information as well
- (otherwise it will be created for you and default to C-contiguous
- or Fortran contiguous). Any strides will be copied into the array
- structure. Do not pass in bad strides information!!!!
-
- ``PyArray_CheckStrides(...)`` can help but you must call it if you are
- unsure. You cannot pass in strides information when data is ``NULL``
- and this routine is creating its own memory.
-
-``*data`` : ``char *``
- ``NULL`` for creating brand-new memory. If you want this array to wrap
- another memory area, then pass the pointer here. You are
- responsible for deleting the memory in that case, but do not do so
- until the new array object has been deleted. The best way to
- handle that is to get the memory from another Python object,
- ``INCREF`` that Python object after passing it's data pointer to this
- routine, and set the ``->base`` member of the returned array to the
- Python object. *You are responsible for* setting ``PyArray_BASE(ret)``
- to the base object. Failure to do so will create a memory leak.
-
- If you pass in a data buffer, the ``flags`` argument will be the flags
- of the new array. If you create a new array, a non-zero flags
- argument indicates that you want the array to be in Fortran order.
-
-``flags`` : ``int``
- Either the flags showing how to interpret the data buffer passed
- in, or if a new array is created, nonzero to indicate a Fortran
- order array. See below for an explanation of the flags.
-
-``obj`` : ``PyObject *``
- If subtypes is ``&PyArray_Type``, this argument is
- ignored. Otherwise, the ``__array_finalize__`` method of the subtype
- is called (if present) and passed this object. This is usually an
- array of the type to be created (so the ``__array_finalize__`` method
- must handle an array argument. But, it can be anything...)
-
-Note: The returned array object will be uninitialized unless the type is
-``PyArray_OBJECT`` in which case the memory will be set to ``NULL``.
-
-``PyArray_SimpleNew(nd, dims, typenum)`` is a drop-in replacement for
-``PyArray_FromDims`` (except it takes ``npy_intp*`` dims instead of ``int*`` dims
-which matters on 64-bit systems) and it does not initialize the memory
-to zero.
-
-``PyArray_SimpleNew`` is just a macro for ``PyArray_New`` with default arguments.
-Use ``PyArray_FILLWBYTE(arr, 0)`` to fill with zeros.
-
-The ``PyArray_FromDims`` and family of functions are still available and
-are loose wrappers around this function. These functions still take
-``int *`` arguments. This should be fine on 32-bit systems, but on 64-bit
-systems you may run into trouble if you frequently passed
-``PyArray_FromDims`` the dimensions member of the old ``PyArrayObject`` structure
-because ``sizeof(npy_intp) != sizeof(int)``.
-
-
-Getting an arrayobject from an arbitrary Python object
-======================================================
-
-``PyArray_FromAny(...)``
-
-This function replaces ``PyArray_ContiguousFromObject`` and friends (those
-function calls still remain but they are loose wrappers around the
-``PyArray_FromAny`` call).
-
-::
-
- static PyObject *
- PyArray_FromAny(PyObject *op, PyArray_Descr *dtype, int min_depth,
- int max_depth, int requires, PyObject *context)
-
-
-``op`` : ``PyObject *``
- The Python object to "convert" to an array object
-
-``dtype`` : ``PyArray_Descr *``
- The desired data-type descriptor. This can be ``NULL``, if the
- descriptor should be determined by the object. Unless ``FORCECAST`` is
- present in ``flags``, this call will generate an error if the data
- type cannot be safely obtained from the object.
-
-``min_depth`` : ``int``
- The minimum depth of array needed or 0 if doesn't matter
-
-``max_depth`` : ``int``
- The maximum depth of array allowed or 0 if doesn't matter
-
-``requires`` : ``int``
- A flag indicating the "requirements" of the returned array. These
- are the usual ndarray flags (see `NDArray flags`_ below). In
- addition, there are three flags used only for the ``FromAny``
- family of functions:
-
- - ``ENSURECOPY``: always copy the array. Returned arrays always
- have ``CONTIGUOUS``, ``ALIGNED``, and ``WRITEABLE`` set.
- - ``ENSUREARRAY``: ensure the returned array is an ndarray.
- - ``FORCECAST``: cause a cast to occur regardless of whether or
- not it is safe.
-
-``context`` : ``PyObject *``
- If the Python object ``op`` is not a numpy array, but has an
- ``__array__`` method, context is passed as the second argument to
- that method (the first is the typecode). Almost always this
- parameter is ``NULL``.
-
-
-``PyArray_ContiguousFromAny(op, typenum, min_depth, max_depth)`` is
-equivalent to ``PyArray_ContiguousFromObject(...)`` (which is still
-available), except it will return the subclass if op is already a
-subclass of the ndarray. The ``ContiguousFromObject`` version will
-always return an ndarray.
-
-Passing Data Type information to C-code
-=======================================
-
-All datatypes are handled using the ``PyArray_Descr *`` structure.
-This structure can be obtained from a Python object using
-``PyArray_DescrConverter`` and ``PyArray_DescrConverter2``. The former
-returns the default ``PyArray_LONG`` descriptor when the input object
-is None, while the latter returns ``NULL`` when the input object is ``None``.
-
-See the ``arraymethods.c`` and ``multiarraymodule.c`` files for many
-examples of usage.
-
-Getting at the structure of the array.
---------------------------------------
-
-You should use the ``#defines`` provided to access array structure portions:
-
-- ``PyArray_DATA(obj)`` : returns a ``void *`` to the array data
-- ``PyArray_BYTES(obj)`` : return a ``char *`` to the array data
-- ``PyArray_ITEMSIZE(obj)``
-- ``PyArray_NDIM(obj)``
-- ``PyArray_DIMS(obj)``
-- ``PyArray_DIM(obj, n)``
-- ``PyArray_STRIDES(obj)``
-- ``PyArray_STRIDE(obj,n)``
-- ``PyArray_DESCR(obj)``
-- ``PyArray_BASE(obj)``
-
-see more in ``arrayobject.h``
-
-
-NDArray Flags
-=============
-
-The ``flags`` attribute of the ``PyArrayObject`` structure contains important
-information about the memory used by the array (pointed to by the data member)
-This flags information must be kept accurate or strange results and even
-segfaults may result.
-
-There are 6 (binary) flags that describe the memory area used by the
-data buffer. These constants are defined in ``arrayobject.h`` and
-determine the bit-position of the flag. Python exposes a nice attribute-
-based interface as well as a dictionary-like interface for getting
-(and, if appropriate, setting) these flags.
-
-Memory areas of all kinds can be pointed to by an ndarray, necessitating
-these flags. If you get an arbitrary ``PyArrayObject`` in C-code,
-you need to be aware of the flags that are set.
-If you need to guarantee a certain kind of array
-(like ``NPY_CONTIGUOUS`` and ``NPY_BEHAVED``), then pass these requirements into the
-PyArray_FromAny function.
-
-
-``NPY_CONTIGUOUS``
- True if the array is (C-style) contiguous in memory.
-``NPY_FORTRAN``
- True if the array is (Fortran-style) contiguous in memory.
-
-Notice that contiguous 1-d arrays are always both ``NPY_FORTRAN`` contiguous
-and C contiguous. Both of these flags can be checked and are convenience
-flags only as whether or not an array is ``NPY_CONTIGUOUS`` or ``NPY_FORTRAN``
-can be determined by the ``strides``, ``dimensions``, and ``itemsize``
-attributes.
-
-``NPY_OWNDATA``
- True if the array owns the memory (it will try and free it using
- ``PyDataMem_FREE()`` on deallocation --- so it better really own it).
-
-These three flags facilitate using a data pointer that is a memory-mapped
-array, or part of some larger record array. But, they may have other uses...
-
-``NPY_ALIGNED``
- True if the data buffer is aligned for the type and the strides
- are multiples of the alignment factor as well. This can be
- checked.
-
-``NPY_WRITEABLE``
- True only if the data buffer can be "written" to.
-
-``NPY_WRITEBACKIFCOPY``
- This is a special flag that is set if this array represents a copy
- made because a user required certain flags in ``PyArray_FromAny`` and
- a copy had to be made of some other array (and the user asked for
- this flag to be set in such a situation). The base attribute then
- points to the "misbehaved" array (which is set read_only). If you use
- this flag, you are must call ``PyArray_ResolveWritebackIfCopy`` before
- deallocating this array (i.e. before calling ``Py_DECREF`` the last time)
- which will write the data contents back to the "misbehaved" array (casting
- if necessary) and will reset the "misbehaved" array to ``WRITEABLE``. If
- the "misbehaved" array was not ``WRITEABLE`` to begin with then
- ``PyArray_FromAny`` would have returned an error because ``WRITEBACKIFCOPY``
- would not have been possible. In error conditions, call
- ``PyArray_DiscardWritebackIfCopy`` to throw away the scratch buffer, then
- ``Py_DECREF`` or ``Py_XDECREF``.
-
-``NPY_UPDATEIFCOPY``
- Similar to ``NPY_WRITEBACKIFCOPY``, but deprecated since it copied the
- contents back when the array is deallocated, which is not explicit and
- relies on refcount semantics. Refcount semantics are unreliable on
- alternative implementations of python such as PyPy.
-
-``PyArray_UpdateFlags(obj, flags)`` will update the ``obj->flags`` for
-``flags`` which can be any of ``NPY_CONTIGUOUS``, ``NPY_FORTRAN``, ``NPY_ALIGNED``, or
-``NPY_WRITEABLE``.
-
-Some useful combinations of these flags:
-
-- ``NPY_BEHAVED = NPY_ALIGNED | NPY_WRITEABLE``
-- ``NPY_CARRAY = NPY_DEFAULT = NPY_CONTIGUOUS | NPY_BEHAVED``
-- ``NPY_CARRAY_RO = NPY_CONTIGUOUS | NPY_ALIGNED``
-- ``NPY_FARRAY = NPY_FORTRAN | NPY_BEHAVED``
-- ``NPY_FARRAY_RO = NPY_FORTRAN | NPY_ALIGNED``
-
-The macro ``PyArray_CHECKFLAGS(obj, flags)`` can test any combination of flags.
-There are several default combinations defined as macros already
-(see ``arrayobject.h``)
-
-In particular, there are ``ISBEHAVED``, ``ISBEHAVED_RO``, ``ISCARRAY``
-and ``ISFARRAY`` macros that also check to make sure the array is in
-native byte order (as determined) by the data-type descriptor.
-
-There are more C-API enhancements which you can discover in the code,
-or buy the book (http://www.trelgol.com)
diff --git a/doc/HOWTO_DOCUMENT.rst.txt b/doc/HOWTO_DOCUMENT.rst.txt
index 6b3640d8d..8f0d2fbae 100644
--- a/doc/HOWTO_DOCUMENT.rst.txt
+++ b/doc/HOWTO_DOCUMENT.rst.txt
@@ -1 +1 @@
-This document has been replaced, see https://numpydoc.readthedocs.io/en/latest/
+This document has been replaced, see https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index 3ed15e99c..a6a8fe8ab 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -18,16 +18,16 @@ Source tree
NumPy Docs
----------
* https://github.com/numpy/numpy/blob/master/doc/HOWTO_RELEASE.rst.txt
-* http://projects.scipy.org/numpy/wiki/MicrosoftToolchainSupport
+* http://projects.scipy.org/numpy/wiki/MicrosoftToolchainSupport (dead link)
SciPy.org wiki
--------------
-* http://www.scipy.org/Installing_SciPy and links on that page.
-* http://new.scipy.org/building/windows.html
+* https://www.scipy.org/Installing_SciPy and links on that page.
+* http://new.scipy.org/building/windows.html (dead link)
Doc wiki
--------
-* http://docs.scipy.org/numpy/docs/numpy-docs/user/install.rst/
+* http://docs.scipy.org/numpy/docs/numpy-docs/user/install.rst/ (dead link)
Release Scripts
---------------
@@ -56,7 +56,7 @@ Windows
We build 32- and 64-bit wheels for Python 2.7, 3.4, 3.5 on Windows. Windows
XP, Vista, 7, 8 and 10 are supported. We build numpy using the MSVC compilers
on Appveyor, but we are hoping to update to a `mingw-w64 toolchain
-<http://mingwpy.github.io>`_. The Windows wheels use ATLAS for BLAS / LAPACK.
+<https://mingwpy.github.io>`_. The Windows wheels use ATLAS for BLAS / LAPACK.
Linux
-----
@@ -101,7 +101,7 @@ Building source archives and wheels
You will need write permission for numpy-wheels in order to trigger wheel
builds.
-* Python(s) from `python.org <http://python.org>`_ or linux distro.
+* Python(s) from `python.org <https://python.org>`_ or linux distro.
* cython
* virtualenv (pip)
* Paver (pip)
@@ -131,7 +131,7 @@ Generating author/pr lists
--------------------------
You will need an personal access token
-`<https://help.github.com/articles/creating-an-access-token-for-command-line-use/>`_
+`<https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/>`_
so that scripts can access the github numpy repository
* gitpython (pip)
@@ -206,7 +206,7 @@ Do::
to check that the documentation is in a buildable state. See
doc/HOWTO_BUILD_DOCS.rst.txt for more details and for how to update
-http://docs.scipy.org.
+https://docs.scipy.org.
Check deprecations
------------------
@@ -322,7 +322,7 @@ message). Unfortunately the name of a tag can be changed without breaking the
signature, the contents of the message cannot.
See : https://github.com/scipy/scipy/issues/4919 for a discussion of signing
-release tags, and http://keyring.debian.org/creating-key.html for instructions
+release tags, and https://keyring.debian.org/creating-key.html for instructions
on creating a GPG key if you do not have one.
To make your key more readily identifiable as you, consider sending your key
@@ -387,7 +387,7 @@ The tar-files and binary releases for distribution should be uploaded to SourceF
together with the Release Notes and the Changelog. Uploading can be done
through a web interface or, more efficiently, through scp/sftp/rsync as
described in the SourceForge
-`upload guide <https://sourceforge.net/apps/trac/sourceforge/wiki/Release%20files%20for%20download>`_.
+`upload guide <https://sourceforge.net/apps/trac/sourceforge/wiki/Release%20files%20for%20download>`_ (dead link).
For example::
scp <filename> <username>,numpy@frs.sourceforge.net:/home/frs/project/n/nu/numpy/NumPy/<releasedir>/
@@ -403,44 +403,18 @@ expecting a binary wheel.
You can do this automatically using the ``wheel-uploader`` script from
https://github.com/MacPython/terryfy. Here is the recommended incantation for
-downloading all the Windows, Manylinux, OSX wheels and uploading to PyPI.
+downloading all the Windows, Manylinux, OSX wheels and uploading to PyPI. ::
-::
-
- cd ~/wheelhouse # local directory to cache wheel downloads
+ NPY_WHLS=~/wheelhouse # local directory to cache wheel downloads
CDN_URL=https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com
- wheel-uploader -u $CDN_URL -w warehouse -v -s -t win numpy 1.11.1rc1
+ wheel-uploader -u $CDN_URL -w $NPY_WHLS -v -s -t win numpy 1.11.1rc1
wheel-uploader -u $CDN_URL -w warehouse -v -s -t macosx numpy 1.11.1rc1
wheel-uploader -u $CDN_URL -w warehouse -v -s -t manylinux1 numpy 1.11.1rc1
The ``-v`` flag gives verbose feedback, ``-s`` causes the script to sign the
-wheels with your GPG key before upload. ``-r warehouse`` causes the upload to
-use the Warehouse PyPI server. This is a good idea because the Warehouse
-server seems to be a lot more reliable in receiving automated wheel uploads.
-For this flag to work, you will need a ``warehouse`` section in your
-``~/.pypirc`` file, of form:
-
- [distutils]
- index-servers =
- pypi
- warehouse
-
- [pypi]
- username:your_user_name
- password:your_password
-
- [warehouse]
- repository: https://upload.pypi.io/legacy/
- username: your_user_name
- password: your_password
-
- [server-login]
- username:your_user_name
- password:your_password
-
-Don't forget to upload the wheels before the source tarball, so there is no
-period for which people switch from an expected binary install to a source
-install from PyPI.
+wheels with your GPG key before upload. Don't forget to upload the wheels
+before the source tarball, so there is no period for which people switch from
+an expected binary install to a source install from PyPI.
There are two ways to update the source release on PyPI, the first one is::
@@ -472,7 +446,7 @@ repository.
Update docs.scipy.org
---------------------
-All documentation for a release can be updated on http://docs.scipy.org/ with:
+All documentation for a release can be updated on https://docs.scipy.org/ with:
make dist
make upload USERNAME=<yourname> RELEASE=1.11.0
diff --git a/doc/Makefile b/doc/Makefile
index 667dbef29..d61d115f0 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,7 +1,7 @@
# Makefile for Sphinx documentation
#
-PYVER = 3.6
+PYVER = 3
PYTHON = python$(PYVER)
# You can set these variables from the command line.
diff --git a/doc/Py3K.rst.txt b/doc/Py3K.rst.txt
index 44111eeb5..f78b9e5db 100644
--- a/doc/Py3K.rst.txt
+++ b/doc/Py3K.rst.txt
@@ -22,8 +22,8 @@ Resources
Information on porting to 3K:
-- http://wiki.python.org/moin/cporting
-- http://wiki.python.org/moin/PortingExtensionModulesToPy3k
+- https://wiki.python.org/moin/cporting
+- https://wiki.python.org/moin/PortingExtensionModulesToPy3k
Prerequisites
diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt
index ad14c16c1..960bb3f3e 100644
--- a/doc/RELEASE_WALKTHROUGH.rst.txt
+++ b/doc/RELEASE_WALKTHROUGH.rst.txt
@@ -1,23 +1,41 @@
-This file contains a walkthrough of the NumPy 1.14.4 release on Linux.
+This file contains a walkthrough of the NumPy 1.14.5 release on Linux.
The commands can be copied into the command line, but be sure to
-replace 1.14.4 by the correct version.
+replace 1.14.5 by the correct version.
+
Release Walkthrough
====================
+
+Backport Pull Requests
+----------------------
+
+Changes that have been marked for this release must be backported to the
+maintenance/1.14.x branch.
+
+
Update Release documentation
----------------------------
-The file ``doc/changelog/1.14.4-changelog.rst`` should be updated to reflect
+The file ``doc/changelog/1.14.5-changelog.rst`` should be updated to reflect
the final list of changes and contributors. This text can be generated by::
- $ python tools/changelog.py $GITHUB v1.14.3..maintenance/1.14.x > doc/changelog/1.14.4-changelog.rst
+ $ python tools/changelog.py $GITHUB v1.14.4..maintenance/1.14.x > doc/changelog/1.14.5-changelog.rst
where ``GITHUB`` contains your github access token. This text may also be
-appended to ``doc/release/1.14.4-notes.rst`` for release updates, though not
-for new releases like ``1.14.0``, as the changelogs for latter tend to be
+appended to ``doc/release/1.14.5-notes.rst`` for release updates, though not
+for new releases like ``1.14.0``, as the changelogs for ``*.0`` releases tend to be
excessively long. The ``doc/source/release.rst`` file should also be
-updated with a link to the new release notes.
+updated with a link to the new release notes. These changes should be committed
+to the maintenance branch, and later will be forward ported to master.
+
+
+Finish the Release Note
+-----------------------
+
+Fill out the release note ``doc/release/1.14.5-notes.rst`` calling out
+significant changes.
+
Prepare the release commit
--------------------------
@@ -28,12 +46,12 @@ repository::
$ git checkout maintenance/1.14.x
$ git pull upstream maintenance/1.14.x
$ git submodule update
- $ git clean -xdf
+ $ git clean -xdf > /dev/null
Edit pavement.py and setup.py as detailed in HOWTO_RELEASE::
$ gvim pavement.py setup.py
- $ git commit -a -m"REL: NumPy 1.14.4 release."
+ $ git commit -a -m"REL: NumPy 1.14.5 release."
Sanity check::
@@ -47,14 +65,17 @@ requires write permission to the numpy repository::
As an example, see the 1.14.3 REL commit: `<https://github.com/numpy/numpy/commit/73299826729be58cec179b52c656adfcaefada93>`_.
+
Build source releases
---------------------
Paver is used to build the source releases. It will create the ``release`` and
``release/installers`` directories and put the ``*.zip`` and ``*.tar.gz``
-source releases in the latter.
+source releases in the latter. ::
+
+ $ cython --version # check that you have the correct cython version
+ $ paver sdist # sdist will do a git clean -xdf, so we omit that
- $ paver sdist # sdist will do a git clean -xdf, so we omit that
Build wheels
------------
@@ -73,7 +94,7 @@ Edit the ``.travis.yml`` and ``.appveyor.yml`` files to make sure they have the
correct version, and put in the commit hash for the ``REL`` commit created
above for ``BUILD_COMMIT``, see the _example from `v1.14.3`::
- $ gvim .travis.yml appveyor.yml
+ $ gvim .travis.yml .appveyor.yml
$ git commit -a
$ git push origin HEAD
@@ -88,6 +109,7 @@ need to restart them.
.. example_: https://github.com/MacPython/numpy-wheels/commit/fed9c04629c155e7804282eb803d81097244598d
+
Download wheels
---------------
@@ -102,13 +124,22 @@ upload later using ``twine``::
$ git pull origin master
$ CDN_URL=https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com
$ NPY_WHLS=../numpy/release/installers
- $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t win numpy 1.14.4
- $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t manylinux1 numpy 1.14.4
- $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t macosx numpy 1.14.4
+ $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t win numpy 1.14.5
+ $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t manylinux1 numpy 1.14.5
+ $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t macosx numpy 1.14.5
If you do this often, consider making CDN_URL and NPY_WHLS part of your default
-environment. Note that we need local copies of the files in order to generate
-hashes to include in the README files generated later.
+environment.
+
+Generate the README files
+-------------------------
+
+This needs to be done after all installers are present, but before the pavement
+file is updated for continued development.
+
+ $ cd ../numpy
+ $ paver write_release
+
Tag the release
---------------
@@ -117,7 +148,7 @@ Once the wheels have been built and downloaded without errors, go back to your
numpy repository in the maintenance branch and tag the ``REL`` commit, signing
it with your gpg key, and build the source distribution archives::
- $ git tag -s v1.14.4
+ $ git tag -s v1.14.5
You should upload your public gpg key to github, so that the tag will appear
"verified" there.
@@ -125,11 +156,12 @@ You should upload your public gpg key to github, so that the tag will appear
Check that the files in ``release/installers`` have the correct versions, then
push the tag upstream::
- $ git push upstream v1.14.4
+ $ git push upstream v1.14.5
We wait until this point to push the tag because it is very difficult to change
the tag after it has been pushed.
+
Reset the maintenance branch into a development state
-----------------------------------------------------
@@ -140,9 +172,6 @@ Add another ``REL`` commit to the numpy maintenance branch, which resets the
$ git commit -a -m"REL: prepare 1.14.x for further development"
$ git push upstream maintenance/1.14.x
-This strategy is copied from the scipy release procedure and was used in numpy
-for the first time in 1.14.3. It needed to be modified a little since numpy
-has more strict requirements for the version number.
Upload to PyPI
--------------
@@ -152,7 +181,7 @@ after recent PyPI changes, version ``1.11.0`` was used here. ::
$ cd ../numpy
$ twine upload release/installers/*.whl
- $ twine upload release/installers/numpy-1.14.4.zip # Upload last.
+ $ twine upload release/installers/numpy-1.14.5.zip # Upload last.
If one of the commands breaks in the middle, which is not uncommon, you may
need to selectively upload the remaining files because PyPI does not allow the
@@ -161,22 +190,19 @@ avoid synchronization problems if pip users access the files while this is in
process. Note that PyPI only allows a single source distribution, here we have
chosen the zip archive.
+
Upload files to github
----------------------
-Generate the ``release/README.*`` files::
-
- $ paver write_release_and_log
-
-Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.4
+Go to `<https://github.com/numpy/numpy/releases>`_, there should be a ``v1.14.5
tag``, click on it and hit the edit button for that tag. There are two ways to
add files, using an editable text window and as binary uploads.
- Cut and paste the ``release/README.md`` file contents into the text window.
-- Upload ``release/installers/numpy-1.14.4.tar.gz`` as a binary file.
-- Upload ``release/installers/numpy-1.14.4.zip`` as a binary file.
+- Upload ``release/installers/numpy-1.14.5.tar.gz`` as a binary file.
+- Upload ``release/installers/numpy-1.14.5.zip`` as a binary file.
- Upload ``release/README.rst`` as a binary file.
-- Upload ``doc/changelog/1.14.4-changelog.rst`` as a binary file.
+- Upload ``doc/changelog/1.14.5-changelog.rst`` as a binary file.
- Check the pre-release button if this is a pre-releases.
- Hit the ``{Publish,Update} release`` button at the bottom.
@@ -191,7 +217,7 @@ upload the documentation. Otherwise::
$ pushd doc
$ make dist
- $ make upload USERNAME=<yourname> RELEASE=v1.14.4
+ $ make upload USERNAME=<yourname> RELEASE=v1.14.5
$ popd
If the release series is a new one, you will need to rebuild and upload the
@@ -212,13 +238,14 @@ This assumes that you have forked `<https://github.com/scipy/scipy.org>`_::
$ cd ../scipy.org
$ git checkout master
$ git pull upstream master
- $ git checkout -b numpy-1.14.4
+ $ git checkout -b numpy-1.14.5
$ gvim www/index.rst # edit the News section
$ git commit -a
$ git push origin HEAD
Now go to your fork and make a pull request for the branch.
+
Announce to mailing lists
-------------------------
@@ -226,3 +253,11 @@ The release should be announced on the numpy-discussion, scipy-devel,
scipy-user, and python-announce-list mailing lists. Look at previous
announcements for the basic template. The contributor and PR lists
are the same as generated for the release notes above.
+
+
+Post-Release Tasks
+------------------
+
+Forward port the documentation changes ``doc/release/1.14.5-notes.rst``,
+``doc/changelog/1.14.5-changelog.rst`` and add the release note to
+``doc/source/release.rst``.
diff --git a/doc/cdoc/Doxyfile b/doc/cdoc/Doxyfile
index d80e98558..886a3440e 100644
--- a/doc/cdoc/Doxyfile
+++ b/doc/cdoc/Doxyfile
@@ -18,7 +18,7 @@
# that follow. The default is UTF-8 which is also the encoding used for all
# text before the first occurrence of this tag. Doxygen uses libiconv (or the
# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
+# https://www.gnu.org/software/libiconv for the list of possible encodings.
DOXYFILE_ENCODING = UTF-8
@@ -596,7 +596,7 @@ INPUT = ../../numpy/core/src \
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# into libc) for the transcoding. See https://www.gnu.org/software/libiconv for
# the list of possible encodings.
INPUT_ENCODING = UTF-8
@@ -739,7 +739,7 @@ REFERENCES_LINK_SOURCE = YES
# If the USE_HTAGS tag is set to YES then the references to source code
# will point to the HTML generated by the htags(1) tool instead of doxygen
# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
+# tagging system (see https://www.gnu.org/software/global/global.html). You
# will need version 4.8.6 or higher.
USE_HTAGS = NO
@@ -843,7 +843,8 @@ HTML_DYNAMIC_SECTIONS = NO
# directory and running "make install" will install the docset in
# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html (dead link)
+# for more information.
GENERATE_DOCSET = NO
@@ -920,30 +921,30 @@ QCH_FILE =
# The QHP_NAMESPACE tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
+# https://doc.qt.io/qt-5/qthelpproject.html#namespace
QHP_NAMESPACE = org.doxygen.Project
# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+# https://doc.qt.io/qt-5/qthelpproject.html#virtual-folders
QHP_VIRTUAL_FOLDER = doc
# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add.
# For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
+# https://doc.qt.io/qt-5/qthelpproject.html#custom-filters
QHP_CUST_FILTER_NAME =
# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>.
+# <a href="https://doc.qt.io/qt-5/qthelpproject.html#custom-filters">Qt Help Project / Custom Filters</a>.
QHP_CUST_FILTER_ATTRS =
# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's
# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>.
+# <a href="https://doc.qt.io/qt-5/qthelpproject.html#filter-attributes">Qt Help Project / Filter Attributes</a>.
QHP_SECT_FILTER_ATTRS =
diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py
index 614c50771..0ec50697e 100755
--- a/doc/cdoc/numpyfilter.py
+++ b/doc/cdoc/numpyfilter.py
@@ -14,10 +14,7 @@ import os
import textwrap
import optparse
-if sys.version_info[0] >= 3:
- import pickle
-else:
- import cPickle as pickle
+from numpy.core.numeric import pickle
CACHE_FILE = 'build/rst-cache.pck'
diff --git a/doc/changelog/1.16.0-changelog.rst b/doc/changelog/1.16.0-changelog.rst
new file mode 100644
index 000000000..8aca5e643
--- /dev/null
+++ b/doc/changelog/1.16.0-changelog.rst
@@ -0,0 +1,616 @@
+
+Contributors
+============
+
+A total of 113 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alan Fontenot +
+* Allan Haldane
+* Alon Hershenhorn +
+* Alyssa Quek +
+* Andreas Nussbaumer +
+* Anner +
+* Anthony Sottile +
+* Antony Lee
+* Ayappan P +
+* Bas van Schaik +
+* C.A.M. Gerlach +
+* Charles Harris
+* Chris Billington
+* Christian Clauss
+* Christoph Gohlke
+* Christopher Pezley +
+* Daniel B Allan +
+* Daniel Smith
+* Dawid Zych +
+* Derek Kim +
+* Dima Pasechnik +
+* Edgar Giovanni Lepe +
+* Elena Mokeeva +
+* Elliott Sales de Andrade +
+* Emil Hessman +
+* Eric Larson
+* Eric Schles +
+* Eric Wieser
+* Giulio Benetti +
+* Guillaume Gautier +
+* Guo Ci
+* Heath Henley +
+* Isuru Fernando +
+* J. Lewis Muir +
+* Jack Vreeken +
+* Jaime Fernandez
+* James Bourbeau
+* Jeff VanOss
+* Jeffrey Yancey +
+* Jeremy Chen +
+* Jeremy Manning +
+* Jeroen Demeyer
+* John Darbyshire +
+* John Kirkham
+* John Zwinck
+* Jonas Jensen +
+* Joscha Reimer +
+* Juan Azcarreta +
+* Julian Taylor
+* Kevin Sheppard
+* Krzysztof Chomski +
+* Kyle Sunden
+* Lars Grüter
+* Lilian Besson +
+* MSeifert04
+* Mark Harfouche
+* Marten van Kerkwijk
+* Martin Thoma
+* Matt Harrigan +
+* Matthew Bowden +
+* Matthew Brett
+* Matthias Bussonnier
+* Matti Picus
+* Max Aifer +
+* Michael Hirsch, Ph.D +
+* Michael James Jamie Schnaitter +
+* MichaelSaah +
+* Mike Toews
+* Minkyu Lee +
+* Mircea Akos Bruma +
+* Mircea-Akos Brumă +
+* Moshe Looks +
+* Muhammad Kasim +
+* Nathaniel J. Smith
+* Nikita Titov +
+* Paul Müller +
+* Paul van Mulbregt
+* Pauli Virtanen
+* Pierre Glaser +
+* Pim de Haan
+* Ralf Gommers
+* Robert Kern
+* Robin Aggleton +
+* Rohit Pandey +
+* Roman Yurchak +
+* Ryan Soklaski
+* Sebastian Berg
+* Sho Nakamura +
+* Simon Gibbons
+* Stan Seibert +
+* Stefan Otte
+* Stefan van der Walt
+* Stephan Hoyer
+* Stuart Archibald
+* Taylor Smith +
+* Tim Felgentreff +
+* Tim Swast +
+* Tim Teichmann +
+* Toshiki Kataoka
+* Travis Oliphant
+* Tyler Reddy
+* Uddeshya Singh +
+* Warren Weckesser
+* Weitang Li +
+* Wenjamin Petrenko +
+* William D. Irons
+* Yannick Jadoul +
+* Yaroslav Halchenko
+* Yug Khanna +
+* Yuji Kanagawa +
+* Yukun Guo +
+* @ankokumoyashi +
+* @lerbuke +
+
+Pull requests merged
+====================
+
+A total of 490 pull requests were merged for this release.
+
+* `#6256 <https://github.com/numpy/numpy/pull/6256>`__: NEP: Add proposal for oindex and vindex.
+* `#6377 <https://github.com/numpy/numpy/pull/6377>`__: BUG: define "uint-alignment", fixes complex64 alignment
+* `#8206 <https://github.com/numpy/numpy/pull/8206>`__: ENH: add padding options to diff
+* `#8923 <https://github.com/numpy/numpy/pull/8923>`__: ENH: Add 'stone' estimator to np.histogram
+* `#8955 <https://github.com/numpy/numpy/pull/8955>`__: ENH: Allow ufunc.identity to be any python object
+* `#9022 <https://github.com/numpy/numpy/pull/9022>`__: BUG: don't silence `__array_wrap__` errors in `ufunc.reduce`
+* `#10551 <https://github.com/numpy/numpy/pull/10551>`__: BUG: memmap close files when it shouldn't, load leaves them open...
+* `#10602 <https://github.com/numpy/numpy/pull/10602>`__: MAINT: Move dtype string functions to python
+* `#10704 <https://github.com/numpy/numpy/pull/10704>`__: NEP 15: Merging multiarray and umath
+* `#10797 <https://github.com/numpy/numpy/pull/10797>`__: DEP: Updated `unravel_index()` to support `shape` kwarg
+* `#10915 <https://github.com/numpy/numpy/pull/10915>`__: ENH: implement nep 0015: merge multiarray and umath
+* `#10998 <https://github.com/numpy/numpy/pull/10998>`__: DOC: removed spurious FIXME comment in number.c
+* `#11002 <https://github.com/numpy/numpy/pull/11002>`__: MAINT: add clearer message to assist users with failed builds.
+* `#11016 <https://github.com/numpy/numpy/pull/11016>`__: ENH: Add AARCH32 support.
+* `#11084 <https://github.com/numpy/numpy/pull/11084>`__: DOC: link to TESTS.rst.txt testing guidelines, tweak testing...
+* `#11119 <https://github.com/numpy/numpy/pull/11119>`__: ENH: Chain exceptions to give better error messages for invalid...
+* `#11175 <https://github.com/numpy/numpy/pull/11175>`__: ENH: Generalized ufunc signature expansion for frozen and flexible...
+* `#11197 <https://github.com/numpy/numpy/pull/11197>`__: BUG/ENH: Removed non-standard scaling of the covariance matrix...
+* `#11234 <https://github.com/numpy/numpy/pull/11234>`__: DOC: Update einsum docs
+* `#11282 <https://github.com/numpy/numpy/pull/11282>`__: MAINT: move comparison operator special-handling out of ufunc...
+* `#11297 <https://github.com/numpy/numpy/pull/11297>`__: NEP: Expansion of gufunc signatures.
+* `#11299 <https://github.com/numpy/numpy/pull/11299>`__: BUG: Prevent crashes on 0-length structured void scalars
+* `#11303 <https://github.com/numpy/numpy/pull/11303>`__: DOC: revision of NEP-18 (`__array_function__`)
+* `#11312 <https://github.com/numpy/numpy/pull/11312>`__: WIP: DOC: slightly tweak the directions to create a release
+* `#11318 <https://github.com/numpy/numpy/pull/11318>`__: REL: Setup master for 1.16 development.
+* `#11323 <https://github.com/numpy/numpy/pull/11323>`__: DEP: Actually deprecate the normed argument to histogram
+* `#11324 <https://github.com/numpy/numpy/pull/11324>`__: MAINT: Don't use dtype strings when the dtypes themselves can...
+* `#11326 <https://github.com/numpy/numpy/pull/11326>`__: DOC: Update master after NumPy 1.14.5 release.
+* `#11328 <https://github.com/numpy/numpy/pull/11328>`__: MAINT: Misc numeric cleanup
+* `#11335 <https://github.com/numpy/numpy/pull/11335>`__: DOC: Change array lengths/entries in `broadcast_arrays` example...
+* `#11336 <https://github.com/numpy/numpy/pull/11336>`__: BUG: decref in failure path; replace `PyObject_Type` by `Py_TYPE`
+* `#11338 <https://github.com/numpy/numpy/pull/11338>`__: MAINT: Ensure ufunc override call each class only once, plus...
+* `#11340 <https://github.com/numpy/numpy/pull/11340>`__: BUG: sctypeDict['f8'] randomly points to double or longdouble...
+* `#11345 <https://github.com/numpy/numpy/pull/11345>`__: BUG/ENH: Einsum optimization path updates and bug fixes.
+* `#11347 <https://github.com/numpy/numpy/pull/11347>`__: DOC: Silence many sphinx warnings
+* `#11348 <https://github.com/numpy/numpy/pull/11348>`__: ENH: Improve support for pathlib.Path objects in load functions
+* `#11349 <https://github.com/numpy/numpy/pull/11349>`__: DOC: document new functions
+* `#11351 <https://github.com/numpy/numpy/pull/11351>`__: MAINT: Improve speed of ufunc kwargs parsing
+* `#11353 <https://github.com/numpy/numpy/pull/11353>`__: DOC, MAINT: HTTP -> HTTPS, and other linkrot fixes
+* `#11356 <https://github.com/numpy/numpy/pull/11356>`__: NEP: Update NEP 19: RNG Policy
+* `#11357 <https://github.com/numpy/numpy/pull/11357>`__: MAINT: Add new `_test.c` files and `benchmarks/html` to `gitignore`
+* `#11365 <https://github.com/numpy/numpy/pull/11365>`__: BUG: add missing NpyIter_Close in einsum
+* `#11366 <https://github.com/numpy/numpy/pull/11366>`__: BUG/TST: String indexing should just fail, not emit a futurewarning
+* `#11371 <https://github.com/numpy/numpy/pull/11371>`__: DOC: Clarify requirement that histogram bins are monotonic.
+* `#11373 <https://github.com/numpy/numpy/pull/11373>`__: TST: Show that histogramdd's normed argument is histogram's density
+* `#11374 <https://github.com/numpy/numpy/pull/11374>`__: WIP: additional revision for NEP-18 (`__array_function__`)
+* `#11376 <https://github.com/numpy/numpy/pull/11376>`__: ENH: Remove NpyIter_Close
+* `#11379 <https://github.com/numpy/numpy/pull/11379>`__: BUG: changed hardcoded axis to 0 for checking indices
+* `#11382 <https://github.com/numpy/numpy/pull/11382>`__: DEP: deprecate undocumented, unused dtype type dicts
+* `#11383 <https://github.com/numpy/numpy/pull/11383>`__: ENH: Allow size=0 in numpy.random.choice
+* `#11385 <https://github.com/numpy/numpy/pull/11385>`__: BUG: Make scalar.squeeze accept axis arg
+* `#11390 <https://github.com/numpy/numpy/pull/11390>`__: REL,MAINT: Update numpyconfig.h for 1.15.
+* `#11391 <https://github.com/numpy/numpy/pull/11391>`__: MAINT: Update mailmap
+* `#11396 <https://github.com/numpy/numpy/pull/11396>`__: TST: Added regression test for #11395
+* `#11405 <https://github.com/numpy/numpy/pull/11405>`__: BUG: Ensure comparisons on scalar strings pass without warning.
+* `#11406 <https://github.com/numpy/numpy/pull/11406>`__: BUG: Ensure out is returned in einsum.
+* `#11409 <https://github.com/numpy/numpy/pull/11409>`__: DOC: Update testing section of README.
+* `#11414 <https://github.com/numpy/numpy/pull/11414>`__: DOC: major revision of NEP 21, advanced indexing
+* `#11422 <https://github.com/numpy/numpy/pull/11422>`__: BENCH: Add benchmarks for np.loadtxt reading from CSV format...
+* `#11424 <https://github.com/numpy/numpy/pull/11424>`__: ENH: Allow use of svd on empty arrays
+* `#11425 <https://github.com/numpy/numpy/pull/11425>`__: DOC: Clear up confusion between np.where(cond) and np.where(cond,...
+* `#11428 <https://github.com/numpy/numpy/pull/11428>`__: BUG: Fix incorrect deprecation logic for histogram(normed=...)...
+* `#11429 <https://github.com/numpy/numpy/pull/11429>`__: NEP: accept NEP 20 partially (frozen, flexible, but not broadcastable...
+* `#11432 <https://github.com/numpy/numpy/pull/11432>`__: MAINT: Refactor differences between cblas_matrixproduct and PyArray_MatrixProduct2
+* `#11434 <https://github.com/numpy/numpy/pull/11434>`__: MAINT: add PyPI classifier for Python 3.7
+* `#11436 <https://github.com/numpy/numpy/pull/11436>`__: DOC: Document average return type
+* `#11440 <https://github.com/numpy/numpy/pull/11440>`__: BUG: fix interpolation with inf and NaN present
+* `#11444 <https://github.com/numpy/numpy/pull/11444>`__: DOC: Fix documentation for fromfunction
+* `#11449 <https://github.com/numpy/numpy/pull/11449>`__: BUG: Revert #10229 to fix DLL loads on Windows.
+* `#11450 <https://github.com/numpy/numpy/pull/11450>`__: MAINT/DEP: properly implement `ndarray.__pos__`
+* `#11453 <https://github.com/numpy/numpy/pull/11453>`__: BENCH: add ufunc argument parsing benchmarks.
+* `#11455 <https://github.com/numpy/numpy/pull/11455>`__: BENCH: belated addition of lcm, gcd to ufunc benchmark.
+* `#11459 <https://github.com/numpy/numpy/pull/11459>`__: NEP: Add some text to NEP 0 to clarify how a NEP is accepted
+* `#11461 <https://github.com/numpy/numpy/pull/11461>`__: MAINT: Add discussion link to NEP 15
+* `#11462 <https://github.com/numpy/numpy/pull/11462>`__: Add NEP 22, a high level overview for the duck array work
+* `#11463 <https://github.com/numpy/numpy/pull/11463>`__: MAINT: Produce a more readable repr of argument packs in benchmark
+* `#11464 <https://github.com/numpy/numpy/pull/11464>`__: BUG: Don't convert inputs to `np.float64` in digitize
+* `#11468 <https://github.com/numpy/numpy/pull/11468>`__: BUG: Advanced indexing assignment incorrectly took 1-D fastpath
+* `#11470 <https://github.com/numpy/numpy/pull/11470>`__: BLD: Don't leave the build task running if runtests.py is interrupted
+* `#11471 <https://github.com/numpy/numpy/pull/11471>`__: MAINT: Remove python-side docstrings from add_newdocs.
+* `#11472 <https://github.com/numpy/numpy/pull/11472>`__: DOC: include NEP number on each NEP page
+* `#11473 <https://github.com/numpy/numpy/pull/11473>`__: MAINT: Move pytesttester outside of np.testing, to avoid creating...
+* `#11474 <https://github.com/numpy/numpy/pull/11474>`__: MAINT: Move add_newdocs into core, since it only adds docs to...
+* `#11479 <https://github.com/numpy/numpy/pull/11479>`__: BUG: Fix #define for ppc64 and ppc64le
+* `#11480 <https://github.com/numpy/numpy/pull/11480>`__: MAINT: move ufunc override code to umath and multiarray as much...
+* `#11482 <https://github.com/numpy/numpy/pull/11482>`__: DOC: Include warning in np.resize() docs
+* `#11484 <https://github.com/numpy/numpy/pull/11484>`__: BUG: Increase required cython version on python 3.7
+* `#11487 <https://github.com/numpy/numpy/pull/11487>`__: DOC: extend sanity check message
+* `#11488 <https://github.com/numpy/numpy/pull/11488>`__: NEP: clarify bugfix policy for legacy RandomState.
+* `#11501 <https://github.com/numpy/numpy/pull/11501>`__: MAINT: Tidy cython invocation
+* `#11503 <https://github.com/numpy/numpy/pull/11503>`__: MAINT: improve error message for isposinf and isneginf on complex...
+* `#11512 <https://github.com/numpy/numpy/pull/11512>`__: DOC: Add templates for issues and PRs
+* `#11514 <https://github.com/numpy/numpy/pull/11514>`__: Prefer the same-python cython to the on-PATH cython
+* `#11515 <https://github.com/numpy/numpy/pull/11515>`__: BUG: decref of field title caused segfault
+* `#11518 <https://github.com/numpy/numpy/pull/11518>`__: MAINT: Speed up normalize_axis_tuple by about 30%
+* `#11522 <https://github.com/numpy/numpy/pull/11522>`__: BUG: fix np.load() of empty .npz file
+* `#11525 <https://github.com/numpy/numpy/pull/11525>`__: MAINT: Append `*FLAGS` instead of overriding
+* `#11526 <https://github.com/numpy/numpy/pull/11526>`__: ENH: add multi-field assignment helpers in np.lib.recfunctions
+* `#11527 <https://github.com/numpy/numpy/pull/11527>`__: DOC: Note that method is the polar form of Box-Muller.
+* `#11528 <https://github.com/numpy/numpy/pull/11528>`__: ENH: Add support for ipython latex printing to polynomial
+* `#11531 <https://github.com/numpy/numpy/pull/11531>`__: ENH: Add density argument to histogramdd.
+* `#11533 <https://github.com/numpy/numpy/pull/11533>`__: DOC: Fixed example code for cheb2poly and poly2cheb (see #11519)
+* `#11534 <https://github.com/numpy/numpy/pull/11534>`__: DOC: Minor improvements to np.concatenate docstring
+* `#11535 <https://github.com/numpy/numpy/pull/11535>`__: MAINT: Improve memory usage in PEP3118 format parsing
+* `#11553 <https://github.com/numpy/numpy/pull/11553>`__: DOC: Tiny typo on numpy/reference/arrays.dtypes.html
+* `#11556 <https://github.com/numpy/numpy/pull/11556>`__: BUG: Make assert_string_equal check str equality simply without...
+* `#11559 <https://github.com/numpy/numpy/pull/11559>`__: NEP: accept nep 0015
+* `#11560 <https://github.com/numpy/numpy/pull/11560>`__: NEP: accept nep 0019
+* `#11562 <https://github.com/numpy/numpy/pull/11562>`__: DOC: update release notes for LDFLAGS append behavior (gh-11525).
+* `#11565 <https://github.com/numpy/numpy/pull/11565>`__: MAINT: convert the doctests for polynomial to regular tests
+* `#11566 <https://github.com/numpy/numpy/pull/11566>`__: BLD: Do not use gcc warnings flags when 'gcc' is actually clang.
+* `#11567 <https://github.com/numpy/numpy/pull/11567>`__: TST: Integrate codecov testing
+* `#11568 <https://github.com/numpy/numpy/pull/11568>`__: BLD: Modify cpu detection and printing to get working aarch64...
+* `#11571 <https://github.com/numpy/numpy/pull/11571>`__: DOC: Updated array2string description
+* `#11572 <https://github.com/numpy/numpy/pull/11572>`__: DOC: Updated Slice Description
+* `#11573 <https://github.com/numpy/numpy/pull/11573>`__: TST: add broadcast_arrays() kwarg unit test for TypeError
+* `#11580 <https://github.com/numpy/numpy/pull/11580>`__: MAINT: refactor ufunc iter operand flags handling
+* `#11591 <https://github.com/numpy/numpy/pull/11591>`__: MAINT: update runtests.py node id example for pytest usage
+* `#11592 <https://github.com/numpy/numpy/pull/11592>`__: DOC: add Stefan van der Walt to Steering Council
+* `#11593 <https://github.com/numpy/numpy/pull/11593>`__: ENH: handle empty matrices in qr decomposition
+* `#11594 <https://github.com/numpy/numpy/pull/11594>`__: ENH: support for empty matrices in linalg.lstsq
+* `#11595 <https://github.com/numpy/numpy/pull/11595>`__: BUG:warn on Nan in minimum,maximum for scalars, float16
+* `#11596 <https://github.com/numpy/numpy/pull/11596>`__: NEP: backwards compatibility and deprecation policy
+* `#11598 <https://github.com/numpy/numpy/pull/11598>`__: TST: Add Python 3.7 to CI testing
+* `#11601 <https://github.com/numpy/numpy/pull/11601>`__: BUG: Make np.array([[1], 2]) and np.array([1, [2]]) behave in...
+* `#11606 <https://github.com/numpy/numpy/pull/11606>`__: DOC: Post 1.15.0 release updates for master.
+* `#11607 <https://github.com/numpy/numpy/pull/11607>`__: DOC: minor clarification and typo fix to NEP 21 (outer indexing).
+* `#11610 <https://github.com/numpy/numpy/pull/11610>`__: TST: including C source line coverage for CI / codecov
+* `#11611 <https://github.com/numpy/numpy/pull/11611>`__: NEP: Add roadmap section and subdocuments to NEPs
+* `#11613 <https://github.com/numpy/numpy/pull/11613>`__: BUG: have geometric() raise ValueError on p=0
+* `#11615 <https://github.com/numpy/numpy/pull/11615>`__: BUG: Clip uses wrong memory order in output
+* `#11616 <https://github.com/numpy/numpy/pull/11616>`__: DOC: add a brief note on "Protocols for methods" to NEP 18
+* `#11621 <https://github.com/numpy/numpy/pull/11621>`__: DOC: Use "real symmetric" rather than "symmetric" in ``eigh``...
+* `#11626 <https://github.com/numpy/numpy/pull/11626>`__: DOC: Show plot in meshgrid example.
+* `#11630 <https://github.com/numpy/numpy/pull/11630>`__: DOC: Include the versionadded to the isnat documentation.
+* `#11634 <https://github.com/numpy/numpy/pull/11634>`__: MAINT: Filter Cython warnings in `__init__.py`
+* `#11637 <https://github.com/numpy/numpy/pull/11637>`__: ENH: np.angle: Remove unnecessary multiplication, and allow subclasses...
+* `#11638 <https://github.com/numpy/numpy/pull/11638>`__: ENH: Make expand_dims work on subclasses
+* `#11642 <https://github.com/numpy/numpy/pull/11642>`__: BUG: Fixes for unicode field names in Python 2
+* `#11643 <https://github.com/numpy/numpy/pull/11643>`__: DOC: Insert up to date link to Spyder website in Dev Env doc...
+* `#11644 <https://github.com/numpy/numpy/pull/11644>`__: BUG: Fix doc source links to unwrap decorators
+* `#11652 <https://github.com/numpy/numpy/pull/11652>`__: BUG: Ensure singleton dimensions are not dropped when converting...
+* `#11660 <https://github.com/numpy/numpy/pull/11660>`__: ENH: Add Nan warnings for maximum, minimum on more dtypes
+* `#11669 <https://github.com/numpy/numpy/pull/11669>`__: BUG: Fix regression in `void_getitem`
+* `#11670 <https://github.com/numpy/numpy/pull/11670>`__: MAINT: trivially refactor mapped indexing
+* `#11673 <https://github.com/numpy/numpy/pull/11673>`__: DOC: Add geomspace to "See also" of linspace
+* `#11679 <https://github.com/numpy/numpy/pull/11679>`__: TST: ignore setup.py files for codecov reports
+* `#11688 <https://github.com/numpy/numpy/pull/11688>`__: DOC: Update broadcasting doc with current exception details
+* `#11691 <https://github.com/numpy/numpy/pull/11691>`__: BUG: Make matrix_power again work for object arrays.
+* `#11692 <https://github.com/numpy/numpy/pull/11692>`__: MAINT: Remove duplicate code.
+* `#11693 <https://github.com/numpy/numpy/pull/11693>`__: NEP: Mark NEP 18 as accepted
+* `#11694 <https://github.com/numpy/numpy/pull/11694>`__: BUG: Fix pickle and memoryview for datetime64, timedelta64 scalars
+* `#11695 <https://github.com/numpy/numpy/pull/11695>`__: BUG: Add missing PyErr_NoMemory after failing malloc
+* `#11703 <https://github.com/numpy/numpy/pull/11703>`__: MAINT: Remove np.pkgload, which seems to be unusable anyway
+* `#11708 <https://github.com/numpy/numpy/pull/11708>`__: BUG: Fix regression in np.loadtxt for bz2 text files in Python...
+* `#11710 <https://github.com/numpy/numpy/pull/11710>`__: BUG: Check for compiler used in env['CC'], then config_vars['CC']
+* `#11711 <https://github.com/numpy/numpy/pull/11711>`__: BUG: Fix undefined functions on big-endian systems.
+* `#11715 <https://github.com/numpy/numpy/pull/11715>`__: TST: Fix urlopen stubbing.
+* `#11717 <https://github.com/numpy/numpy/pull/11717>`__: MAINT: Make einsum optimize default to False.
+* `#11718 <https://github.com/numpy/numpy/pull/11718>`__: BUG: Revert use of `console_scripts`.
+* `#11722 <https://github.com/numpy/numpy/pull/11722>`__: MAINT: Remove duplicate docstring and correct location of `__all__`...
+* `#11725 <https://github.com/numpy/numpy/pull/11725>`__: BUG: Fix Fortran kind detection for aarch64 & s390x.
+* `#11727 <https://github.com/numpy/numpy/pull/11727>`__: BUG: Fix printing of longdouble on ppc64le.
+* `#11729 <https://github.com/numpy/numpy/pull/11729>`__: DOC: fix capitalization of kilojoules
+* `#11731 <https://github.com/numpy/numpy/pull/11731>`__: DOC: fix typo in vectorize docstring
+* `#11733 <https://github.com/numpy/numpy/pull/11733>`__: DOC: recommend polynomial.Polynomial over np.polyfit
+* `#11735 <https://github.com/numpy/numpy/pull/11735>`__: BUG: Fix test sensitive to platform byte order.
+* `#11738 <https://github.com/numpy/numpy/pull/11738>`__: TST, MAINT: add lgtm.yml to tweak LGTM.com analysis
+* `#11739 <https://github.com/numpy/numpy/pull/11739>`__: BUG: disallow setting flag to writeable after fromstring, frombuffer
+* `#11740 <https://github.com/numpy/numpy/pull/11740>`__: BUG: Deprecation triggers segfault
+* `#11742 <https://github.com/numpy/numpy/pull/11742>`__: DOC: Reduce warnings and cleanup redundant c-api documentation
+* `#11745 <https://github.com/numpy/numpy/pull/11745>`__: DOC: Small docstring fixes for old polyfit.
+* `#11754 <https://github.com/numpy/numpy/pull/11754>`__: BUG: check return value of `_buffer_format_string`
+* `#11755 <https://github.com/numpy/numpy/pull/11755>`__: MAINT: Fix typos in random.hypergeometric's notes
+* `#11756 <https://github.com/numpy/numpy/pull/11756>`__: MAINT: Make assert_array_compare more generic.
+* `#11765 <https://github.com/numpy/numpy/pull/11765>`__: DOC: Move documentation from `help(ndarray.ctypes)` to `help(some_array.ctypes)`
+* `#11771 <https://github.com/numpy/numpy/pull/11771>`__: BUG: Make `random.shuffle` work on 1-D instances of `ndarray`...
+* `#11774 <https://github.com/numpy/numpy/pull/11774>`__: BUG: Fix regression in intersect1d.
+* `#11778 <https://github.com/numpy/numpy/pull/11778>`__: BUG: Avoid signed overflow in histogram
+* `#11783 <https://github.com/numpy/numpy/pull/11783>`__: MAINT: check `_append_char` return value
+* `#11784 <https://github.com/numpy/numpy/pull/11784>`__: MAINT: reformat line spacing before test methods
+* `#11797 <https://github.com/numpy/numpy/pull/11797>`__: DOC: Update docs after 1.15.1 release.
+* `#11800 <https://github.com/numpy/numpy/pull/11800>`__: DOC: document use when f2py is not in the PATH
+* `#11802 <https://github.com/numpy/numpy/pull/11802>`__: ENH: Use entry_points to install the f2py scripts.
+* `#11805 <https://github.com/numpy/numpy/pull/11805>`__: BUG: add type cast check for ediff1d
+* `#11806 <https://github.com/numpy/numpy/pull/11806>`__: DOC: Polybase augmented assignment notes
+* `#11812 <https://github.com/numpy/numpy/pull/11812>`__: DOC: edit setup.py docstring that is displayed on PyPI.
+* `#11813 <https://github.com/numpy/numpy/pull/11813>`__: BUG: fix array_split incorrect behavior with array size bigger...
+* `#11814 <https://github.com/numpy/numpy/pull/11814>`__: DOC, MAINT: Fixes for errstate() and README.md documentation.
+* `#11817 <https://github.com/numpy/numpy/pull/11817>`__: DOC: add examples and extend existing dos for polynomial subclasses
+* `#11818 <https://github.com/numpy/numpy/pull/11818>`__: TST: add missing tests for all polynomial subclass pow fns.
+* `#11823 <https://github.com/numpy/numpy/pull/11823>`__: TST: add test for array2string unexpected kwarg
+* `#11830 <https://github.com/numpy/numpy/pull/11830>`__: MAINT: reduce void type repr code duplication
+* `#11834 <https://github.com/numpy/numpy/pull/11834>`__: MAINT, DOC: Replace 'an' by 'a' in some docstrings.
+* `#11837 <https://github.com/numpy/numpy/pull/11837>`__: DOC: Make clear the connection between numpy types and C types
+* `#11840 <https://github.com/numpy/numpy/pull/11840>`__: BUG: Let 0-D arrays of Python timedelta convert to np.timedelta64.
+* `#11843 <https://github.com/numpy/numpy/pull/11843>`__: MAINT: remove surviving, unused, list comprehension
+* `#11849 <https://github.com/numpy/numpy/pull/11849>`__: TST: reorder duplicate mem_overlap.c compile
+* `#11850 <https://github.com/numpy/numpy/pull/11850>`__: DOC: add comment to remove fn after python 2 support is dropped
+* `#11852 <https://github.com/numpy/numpy/pull/11852>`__: BUG: timedelta64 now accepts NumPy ints
+* `#11858 <https://github.com/numpy/numpy/pull/11858>`__: DOC: add docstrings for numeric types
+* `#11862 <https://github.com/numpy/numpy/pull/11862>`__: BUG: Re-add `_ones_like` to numpy.core.umath.
+* `#11864 <https://github.com/numpy/numpy/pull/11864>`__: TST: Update travis testing to use latest virtualenv.
+* `#11865 <https://github.com/numpy/numpy/pull/11865>`__: DOC: add a Code of Conduct document.
+* `#11866 <https://github.com/numpy/numpy/pull/11866>`__: TST: Drop Python 3.4 testing
+* `#11868 <https://github.com/numpy/numpy/pull/11868>`__: MAINT: include benchmarks, complete docs, dev tool files in sdist.
+* `#11870 <https://github.com/numpy/numpy/pull/11870>`__: MAINT: dtype(unicode) should raise TypeError on failure
+* `#11874 <https://github.com/numpy/numpy/pull/11874>`__: BENCH: split out slow setup method in bench_shape_base.Block
+* `#11877 <https://github.com/numpy/numpy/pull/11877>`__: BUG: Fix memory leak in pyfragments.swg
+* `#11880 <https://github.com/numpy/numpy/pull/11880>`__: BUG: The multiarray/ufunc merge broke old wheels.
+* `#11882 <https://github.com/numpy/numpy/pull/11882>`__: DOC: Recommend the use of `np.ndim` over `np.isscalar`, and explain...
+* `#11889 <https://github.com/numpy/numpy/pull/11889>`__: BENCH: Split bench_function_base.Sort into Sort and SortWorst.
+* `#11891 <https://github.com/numpy/numpy/pull/11891>`__: MAINT: remove exec_command() from build_ext
+* `#11892 <https://github.com/numpy/numpy/pull/11892>`__: TST: Parametrize PEP3118 scalar tests.
+* `#11893 <https://github.com/numpy/numpy/pull/11893>`__: TST: Fix duplicated test name.
+* `#11894 <https://github.com/numpy/numpy/pull/11894>`__: TST: Parametrize f2py tests.
+* `#11895 <https://github.com/numpy/numpy/pull/11895>`__: TST: Parametrize some linalg tests over types.
+* `#11896 <https://github.com/numpy/numpy/pull/11896>`__: BUG: Fix matrix PendingDeprecationWarning suppression for pytest...
+* `#11898 <https://github.com/numpy/numpy/pull/11898>`__: MAINT: remove exec_command usage from ccompiler.py
+* `#11899 <https://github.com/numpy/numpy/pull/11899>`__: MAINT: remove exec_command from system_info.py
+* `#11900 <https://github.com/numpy/numpy/pull/11900>`__: MAINT: remove exec_command from gnu.py
+* `#11901 <https://github.com/numpy/numpy/pull/11901>`__: MAINT: remove exec_command usage in ibm.py
+* `#11904 <https://github.com/numpy/numpy/pull/11904>`__: Use pytest for some already-parametrized core tests
+* `#11905 <https://github.com/numpy/numpy/pull/11905>`__: TST: Start testing with "-std=c99" on travisCI.
+* `#11906 <https://github.com/numpy/numpy/pull/11906>`__: TST: add shippable ARMv8 to CI
+* `#11907 <https://github.com/numpy/numpy/pull/11907>`__: Link HOWTO_DOCUMENT to specific section on docstrings
+* `#11909 <https://github.com/numpy/numpy/pull/11909>`__: MAINT: flake8 cleanups
+* `#11910 <https://github.com/numpy/numpy/pull/11910>`__: MAINT: test, refactor design of recursive closures
+* `#11912 <https://github.com/numpy/numpy/pull/11912>`__: DOC: dtype offset and itemsize is limited by range of C int
+* `#11914 <https://github.com/numpy/numpy/pull/11914>`__: DOC: Clarify difference between PySequence_GETITEM, PyArray_GETITEM
+* `#11916 <https://github.com/numpy/numpy/pull/11916>`__: DEP: deprecate np.set_numeric_ops and friends
+* `#11920 <https://github.com/numpy/numpy/pull/11920>`__: TST: Fix 'def' test_numerictypes.py::TestSctypeDict to 'class'...
+* `#11921 <https://github.com/numpy/numpy/pull/11921>`__: MAINT: Don't rely on `__name__` in bitname - use the information...
+* `#11922 <https://github.com/numpy/numpy/pull/11922>`__: TST: Add tests for maximum_sctype
+* `#11929 <https://github.com/numpy/numpy/pull/11929>`__: DOC: #defining -> #define / Added a short explanation for Numeric
+* `#11930 <https://github.com/numpy/numpy/pull/11930>`__: DOC: fix scipy-sphinx-theme license path
+* `#11932 <https://github.com/numpy/numpy/pull/11932>`__: MAINT: Move `np.dtype.name.__get__` to python
+* `#11933 <https://github.com/numpy/numpy/pull/11933>`__: TST: Fix unit tests that used to call unittest.TestCase.fail
+* `#11934 <https://github.com/numpy/numpy/pull/11934>`__: NEP: Revert "NEP: Mark NEP 18 as accepted"
+* `#11935 <https://github.com/numpy/numpy/pull/11935>`__: MAINT: remove usage of exec_command in config.py
+* `#11937 <https://github.com/numpy/numpy/pull/11937>`__: MAINT: remove exec_command() from f2py init
+* `#11941 <https://github.com/numpy/numpy/pull/11941>`__: BUG: Ensure einsum(optimize=True) dispatches tensordot deterministically
+* `#11943 <https://github.com/numpy/numpy/pull/11943>`__: DOC: Add warning/clarification about backwards compat in NEP-18
+* `#11948 <https://github.com/numpy/numpy/pull/11948>`__: DEP: finish making all comparisons to NaT false
+* `#11949 <https://github.com/numpy/numpy/pull/11949>`__: MAINT: Small tidy-ups to `np.core._dtype`
+* `#11950 <https://github.com/numpy/numpy/pull/11950>`__: MAINT: Extract tangential improvements made in #11175
+* `#11952 <https://github.com/numpy/numpy/pull/11952>`__: MAINT: test NPY_INTERNAL_BUILD only if defined
+* `#11953 <https://github.com/numpy/numpy/pull/11953>`__: TST: codecov.yml improvements
+* `#11957 <https://github.com/numpy/numpy/pull/11957>`__: ENH: mark that large allocations can use huge pages
+* `#11958 <https://github.com/numpy/numpy/pull/11958>`__: TST: Add a test for np.pad where constant_values is an object
+* `#11959 <https://github.com/numpy/numpy/pull/11959>`__: MAINT: Explicitely cause pagefaults to happen before starting...
+* `#11961 <https://github.com/numpy/numpy/pull/11961>`__: TST: Add more tests for np.pad
+* `#11962 <https://github.com/numpy/numpy/pull/11962>`__: ENH: maximum lines of content to be read from numpy.loadtxt
+* `#11965 <https://github.com/numpy/numpy/pull/11965>`__: BENCH: Add a benchmark comparing block to copy in the 3D case
+* `#11966 <https://github.com/numpy/numpy/pull/11966>`__: MAINT: Rewrite shape normalization in pad function
+* `#11967 <https://github.com/numpy/numpy/pull/11967>`__: BUG: fix refcount leak in PyArray_AdaptFlexibleDType
+* `#11971 <https://github.com/numpy/numpy/pull/11971>`__: MAINT: Block algorithm with a single copy per call to `block`
+* `#11973 <https://github.com/numpy/numpy/pull/11973>`__: BUG: fix cached allocations without the GIL
+* `#11976 <https://github.com/numpy/numpy/pull/11976>`__: MAINT/DOC: Show the location of an empty list in np.block
+* `#11979 <https://github.com/numpy/numpy/pull/11979>`__: MAINT: Ensure that a copy of the array is returned when calling...
+* `#11989 <https://github.com/numpy/numpy/pull/11989>`__: BUG: Ensure boolean indexing of subclasses sets base correctly.
+* `#11991 <https://github.com/numpy/numpy/pull/11991>`__: MAINT: speed up `_block` by avoiding a recursive closure
+* `#11996 <https://github.com/numpy/numpy/pull/11996>`__: TST: Parametrize and break apart dtype tests
+* `#11997 <https://github.com/numpy/numpy/pull/11997>`__: MAINT: Extract string helpers to a new private file
+* `#12002 <https://github.com/numpy/numpy/pull/12002>`__: Revert "NEP: Revert "NEP: Mark NEP 18 as accepted""
+* `#12004 <https://github.com/numpy/numpy/pull/12004>`__: BUG: Fix f2py compile function testing.
+* `#12005 <https://github.com/numpy/numpy/pull/12005>`__: ENH: initial implementation of core `__array_function__` machinery
+* `#12008 <https://github.com/numpy/numpy/pull/12008>`__: MAINT: Reassociate `np.cast` with the comment describing it
+* `#12009 <https://github.com/numpy/numpy/pull/12009>`__: MAINT: Eliminate the private `numerictypes._typestr`
+* `#12011 <https://github.com/numpy/numpy/pull/12011>`__: ENH: implementation of array_reduce_ex
+* `#12012 <https://github.com/numpy/numpy/pull/12012>`__: MAINT: Extract the crazy number of type aliases to their own...
+* `#12014 <https://github.com/numpy/numpy/pull/12014>`__: TST: prefer pytest.skip() over SkipTest
+* `#12015 <https://github.com/numpy/numpy/pull/12015>`__: TST: improve warnings parallel test safety
+* `#12017 <https://github.com/numpy/numpy/pull/12017>`__: NEP: add 3 missing data NEPs rescued from 2011-2012
+* `#12018 <https://github.com/numpy/numpy/pull/12018>`__: MAINT: Simplify parts of `_type_aliases`
+* `#12019 <https://github.com/numpy/numpy/pull/12019>`__: DOC: MAINT: address comments @eric-wieser on NEP 24-26 PR.
+* `#12020 <https://github.com/numpy/numpy/pull/12020>`__: TST: Add tests for np.sctype2char
+* `#12021 <https://github.com/numpy/numpy/pull/12021>`__: DOC: Post NumPy 1.15.2 release updates.[ci skip]
+* `#12024 <https://github.com/numpy/numpy/pull/12024>`__: MAINT: Normalize axes the normal way in fftpack.py
+* `#12027 <https://github.com/numpy/numpy/pull/12027>`__: DOC: Add docstrings for abstract types in scalar type hierarchy
+* `#12030 <https://github.com/numpy/numpy/pull/12030>`__: DOC: use "import numpy as np" style
+* `#12032 <https://github.com/numpy/numpy/pull/12032>`__: BUG: check return value from PyArray_PromoteTypes
+* `#12033 <https://github.com/numpy/numpy/pull/12033>`__: TST: Mark check for f2py script xfail.
+* `#12034 <https://github.com/numpy/numpy/pull/12034>`__: MAINT: Add version deprecated to some deprecation messages.
+* `#12035 <https://github.com/numpy/numpy/pull/12035>`__: BUG: Fix memory leak in PY3K buffer code.
+* `#12041 <https://github.com/numpy/numpy/pull/12041>`__: MAINT: remove duplicate imports
+* `#12042 <https://github.com/numpy/numpy/pull/12042>`__: MAINT: cleanup and better document core/overrides.py
+* `#12045 <https://github.com/numpy/numpy/pull/12045>`__: BUG: fix memory leak of buffer format string
+* `#12048 <https://github.com/numpy/numpy/pull/12048>`__: BLD: pin sphinx to 1.7.9
+* `#12051 <https://github.com/numpy/numpy/pull/12051>`__: TST: add macos azure testing to CI
+* `#12054 <https://github.com/numpy/numpy/pull/12054>`__: MAINT: avoid modifying mutable default values
+* `#12056 <https://github.com/numpy/numpy/pull/12056>`__: MAINT: The crackfortran function is called with an extra argument
+* `#12057 <https://github.com/numpy/numpy/pull/12057>`__: MAINT: remove unused imports
+* `#12058 <https://github.com/numpy/numpy/pull/12058>`__: MAINT: remove redundant assignment
+* `#12060 <https://github.com/numpy/numpy/pull/12060>`__: MAINT: remove unused stdlib imports
+* `#12061 <https://github.com/numpy/numpy/pull/12061>`__: MAINT: remove redundant imports
+* `#12062 <https://github.com/numpy/numpy/pull/12062>`__: BUG: `OBJECT_to_*` should check for errors
+* `#12064 <https://github.com/numpy/numpy/pull/12064>`__: MAINT: delay initialization of getlimits (circular imports)
+* `#12072 <https://github.com/numpy/numpy/pull/12072>`__: BUG: test_path() now uses Path.resolve()
+* `#12073 <https://github.com/numpy/numpy/pull/12073>`__: MAINT Avoid some memory copies in numpy.polynomial.hermite
+* `#12079 <https://github.com/numpy/numpy/pull/12079>`__: MAINT: Blacklist some MSVC complex functions.
+* `#12081 <https://github.com/numpy/numpy/pull/12081>`__: TST: add Windows test matrix to Azure CI
+* `#12082 <https://github.com/numpy/numpy/pull/12082>`__: TST: Add Python 3.5 to Azure windows CI.
+* `#12088 <https://github.com/numpy/numpy/pull/12088>`__: BUG: limit default for get_num_build_jobs() to 8
+* `#12089 <https://github.com/numpy/numpy/pull/12089>`__: BUG: Fix in-place permutation
+* `#12090 <https://github.com/numpy/numpy/pull/12090>`__: TST, MAINT: Update pickling tests by making them loop over all...
+* `#12091 <https://github.com/numpy/numpy/pull/12091>`__: TST: Install pickle5 for CI testing with python 3.6/7
+* `#12093 <https://github.com/numpy/numpy/pull/12093>`__: Provide information about what kind is actually not integer kind
+* `#12099 <https://github.com/numpy/numpy/pull/12099>`__: ENH: Validate dispatcher functions in array_function_dispatch
+* `#12102 <https://github.com/numpy/numpy/pull/12102>`__: TST: improve coverage of nd_grid
+* `#12103 <https://github.com/numpy/numpy/pull/12103>`__: MAINT: Add azure-pipeline status badge to README.md
+* `#12106 <https://github.com/numpy/numpy/pull/12106>`__: TST, MAINT: Skip some f2py tests on Mac.
+* `#12108 <https://github.com/numpy/numpy/pull/12108>`__: BUG: Allow boolean subtract in histogram
+* `#12109 <https://github.com/numpy/numpy/pull/12109>`__: TST: add unit test for issctype
+* `#12112 <https://github.com/numpy/numpy/pull/12112>`__: ENH: check getfield arguments to prevent invalid memory access
+* `#12115 <https://github.com/numpy/numpy/pull/12115>`__: ENH: `__array_function__` support for most of `numpy.core`
+* `#12116 <https://github.com/numpy/numpy/pull/12116>`__: ENH: `__array_function__` support for `np.lib`, part 1/2
+* `#12117 <https://github.com/numpy/numpy/pull/12117>`__: ENH: `__array_function__` support for `np.fft` and `np.linalg`
+* `#12119 <https://github.com/numpy/numpy/pull/12119>`__: ENH: `__array_function__` support for `np.lib`, part 2/2
+* `#12120 <https://github.com/numpy/numpy/pull/12120>`__: ENH: add timedelta modulus operator support (mm)
+* `#12121 <https://github.com/numpy/numpy/pull/12121>`__: MAINT: Clarify the error message for resize failure
+* `#12123 <https://github.com/numpy/numpy/pull/12123>`__: DEP: deprecate asscalar
+* `#12124 <https://github.com/numpy/numpy/pull/12124>`__: BUG: refactor float error status to support Alpine linux
+* `#12125 <https://github.com/numpy/numpy/pull/12125>`__: TST: expand cases in test_issctype()
+* `#12127 <https://github.com/numpy/numpy/pull/12127>`__: BUG: Fix memory leak in mapping.c
+* `#12131 <https://github.com/numpy/numpy/pull/12131>`__: BUG: fix PyDataType_ISBOOL
+* `#12133 <https://github.com/numpy/numpy/pull/12133>`__: MAINT, TST refactor pickle imports and tests
+* `#12134 <https://github.com/numpy/numpy/pull/12134>`__: DOC: Remove duplicated sentence in numpy.multiply
+* `#12137 <https://github.com/numpy/numpy/pull/12137>`__: TST: error tests for fill_diagonal()
+* `#12138 <https://github.com/numpy/numpy/pull/12138>`__: TST: error tests for diag_indices_from()
+* `#12140 <https://github.com/numpy/numpy/pull/12140>`__: DOC: fixups for NEP-18 based on the implementation
+* `#12141 <https://github.com/numpy/numpy/pull/12141>`__: DOC: minor tweak to CoC (update NumFOCUS contact address).
+* `#12145 <https://github.com/numpy/numpy/pull/12145>`__: MAINT: Update ndarrayobject.h `__cplusplus` block.
+* `#12146 <https://github.com/numpy/numpy/pull/12146>`__: MAINT: Fix typo in comment
+* `#12147 <https://github.com/numpy/numpy/pull/12147>`__: MAINT: Move duplicated type_reso_error code into a helper function
+* `#12148 <https://github.com/numpy/numpy/pull/12148>`__: DOC: document NEP-18 overrides in release notes
+* `#12151 <https://github.com/numpy/numpy/pull/12151>`__: TST: byte_bounds contiguity handling
+* `#12153 <https://github.com/numpy/numpy/pull/12153>`__: DOC, TST: cover setdiff1d assume_unique
+* `#12154 <https://github.com/numpy/numpy/pull/12154>`__: ENH: `__array_function__` for `np.core.defchararray`
+* `#12155 <https://github.com/numpy/numpy/pull/12155>`__: MAINT: Define Py_SETREF for pre-3.5.2 python and use in code
+* `#12157 <https://github.com/numpy/numpy/pull/12157>`__: ENH: Add support for third-party path-like objects by backporting...
+* `#12159 <https://github.com/numpy/numpy/pull/12159>`__: MAINT: remove unused nd_grid `__len__`.
+* `#12163 <https://github.com/numpy/numpy/pull/12163>`__: ENH: `__array_function__` for `np.einsum` and `np.block`
+* `#12165 <https://github.com/numpy/numpy/pull/12165>`__: Mark NEP 22 as accepted, and add "Informational" NEPs to NEP...
+* `#12166 <https://github.com/numpy/numpy/pull/12166>`__: NEP: Add zero-rank arrays historical info NEP
+* `#12173 <https://github.com/numpy/numpy/pull/12173>`__: NEP: add notes about updates to NEP-18
+* `#12174 <https://github.com/numpy/numpy/pull/12174>`__: NEP 16 abstract arrays: rebased and marked as "Withdrawn"
+* `#12175 <https://github.com/numpy/numpy/pull/12175>`__: ENH: `__array_function__` for multiarray functions
+* `#12176 <https://github.com/numpy/numpy/pull/12176>`__: TST: add test for weighted histogram mismatch
+* `#12177 <https://github.com/numpy/numpy/pull/12177>`__: MAINT: remove unused `_assertSquareness()`
+* `#12179 <https://github.com/numpy/numpy/pull/12179>`__: MAINT: Move `_kind_to_stem` to `np.core._dtype`, so that it can...
+* `#12180 <https://github.com/numpy/numpy/pull/12180>`__: NEP: change toc titles, cross reference, mark 16 superseded
+* `#12181 <https://github.com/numpy/numpy/pull/12181>`__: MAINT: fix depreciation message typo for np.sum
+* `#12185 <https://github.com/numpy/numpy/pull/12185>`__: TST: test multi_dot with 2 arrays
+* `#12199 <https://github.com/numpy/numpy/pull/12199>`__: TST: add Azure CI triggers
+* `#12209 <https://github.com/numpy/numpy/pull/12209>`__: Delay import of distutils.msvccompiler to avoid warning on non-Windows.
+* `#12211 <https://github.com/numpy/numpy/pull/12211>`__: DOC: Clarify the examples for argmax and argmin
+* `#12212 <https://github.com/numpy/numpy/pull/12212>`__: MAINT: `ndarray.__repr__` should not rely on `__array_function__`
+* `#12214 <https://github.com/numpy/numpy/pull/12214>`__: TST: add test for tensorinv()
+* `#12215 <https://github.com/numpy/numpy/pull/12215>`__: TST: test dims match on lstsq()
+* `#12216 <https://github.com/numpy/numpy/pull/12216>`__: TST: test invalid histogram range
+* `#12217 <https://github.com/numpy/numpy/pull/12217>`__: TST: test histogram bins dims
+* `#12219 <https://github.com/numpy/numpy/pull/12219>`__: ENH: make matmul into a ufunc
+* `#12222 <https://github.com/numpy/numpy/pull/12222>`__: TST: unit tests for column_stack.
+* `#12224 <https://github.com/numpy/numpy/pull/12224>`__: BUG: Fix MaskedArray fill_value type conversion.
+* `#12229 <https://github.com/numpy/numpy/pull/12229>`__: MAINT: Fix typo in comment
+* `#12236 <https://github.com/numpy/numpy/pull/12236>`__: BUG: maximum, minimum no longer emit warnings on NAN
+* `#12240 <https://github.com/numpy/numpy/pull/12240>`__: BUG: Fix crash in repr of void subclasses
+* `#12241 <https://github.com/numpy/numpy/pull/12241>`__: TST: arg handling tests in histogramdd
+* `#12243 <https://github.com/numpy/numpy/pull/12243>`__: BUG: Fix misleading assert message in assert_almost_equal #12200
+* `#12245 <https://github.com/numpy/numpy/pull/12245>`__: TST: tests for sort_complex()
+* `#12246 <https://github.com/numpy/numpy/pull/12246>`__: DOC: Update docs after NumPy 1.15.3 release.
+* `#12249 <https://github.com/numpy/numpy/pull/12249>`__: BUG: Dealloc cached buffer info
+* `#12250 <https://github.com/numpy/numpy/pull/12250>`__: DOC: add missing docs
+* `#12251 <https://github.com/numpy/numpy/pull/12251>`__: MAINT: improved error message when no `__array_function__` implementation...
+* `#12254 <https://github.com/numpy/numpy/pull/12254>`__: MAINT: Move ctype -> dtype conversion to python
+* `#12257 <https://github.com/numpy/numpy/pull/12257>`__: BUG: Fix fill value in masked array '==' and '!=' ops.
+* `#12259 <https://github.com/numpy/numpy/pull/12259>`__: TST: simplify how the different code paths for block are tested.
+* `#12265 <https://github.com/numpy/numpy/pull/12265>`__: BUG: Revert linspace import for concatenation funcs
+* `#12266 <https://github.com/numpy/numpy/pull/12266>`__: BUG: Avoid SystemErrors by checking the return value of PyPrint
+* `#12268 <https://github.com/numpy/numpy/pull/12268>`__: DOC: add broadcasting article from scipy old-wiki
+* `#12270 <https://github.com/numpy/numpy/pull/12270>`__: MAINT: set `__module__` for more `array_function_dispatch` uses
+* `#12276 <https://github.com/numpy/numpy/pull/12276>`__: MAINT: remove unused parse_index()
+* `#12279 <https://github.com/numpy/numpy/pull/12279>`__: NEP: tweak and mark NEP 0027 as final
+* `#12280 <https://github.com/numpy/numpy/pull/12280>`__: DEP: deprecate passing a generator to stack functions
+* `#12281 <https://github.com/numpy/numpy/pull/12281>`__: NEP: revise note for NEP 27
+* `#12285 <https://github.com/numpy/numpy/pull/12285>`__: ENH: array does not need to be writable to use as input to take
+* `#12286 <https://github.com/numpy/numpy/pull/12286>`__: ENH: Do not emit compiler warning if forcing old API
+* `#12288 <https://github.com/numpy/numpy/pull/12288>`__: BUILD: force LGTM to use cython>=0.29
+* `#12291 <https://github.com/numpy/numpy/pull/12291>`__: MAINT: `_set_out_array()` syntax fix
+* `#12292 <https://github.com/numpy/numpy/pull/12292>`__: MAINT: removed unused vars in f2py test code
+* `#12299 <https://github.com/numpy/numpy/pull/12299>`__: BUILD: use system python3 in the chroot
+* `#12302 <https://github.com/numpy/numpy/pull/12302>`__: DOC: Update the docstring of asfortranarray and ascontiguousarray
+* `#12306 <https://github.com/numpy/numpy/pull/12306>`__: TST: add 32-bit linux Azure CI job
+* `#12312 <https://github.com/numpy/numpy/pull/12312>`__: MAINT, TST: unreachable Python code paths
+* `#12321 <https://github.com/numpy/numpy/pull/12321>`__: MAINT: Simple speed-ups for getting overloaded types
+* `#12326 <https://github.com/numpy/numpy/pull/12326>`__: DOC: NumPy 1.15.4 post release documentation update.
+* `#12328 <https://github.com/numpy/numpy/pull/12328>`__: MAINT: Allow subclasses in `ndarray.__array_function__`.
+* `#12330 <https://github.com/numpy/numpy/pull/12330>`__: TST: test_tofile_fromfile now uses initialized memory
+* `#12331 <https://github.com/numpy/numpy/pull/12331>`__: DEV: change ASV benchmarks to run on Python 3.6 by default
+* `#12338 <https://github.com/numpy/numpy/pull/12338>`__: DOC: add a docstring for the function 'compare_chararrays' (See...
+* `#12342 <https://github.com/numpy/numpy/pull/12342>`__: BUG: Fix for np.dtype(ctypes.Structure) does not respect _pack_...
+* `#12347 <https://github.com/numpy/numpy/pull/12347>`__: DOC: typo in docstring numpy.random.beta, shape parameters must...
+* `#12349 <https://github.com/numpy/numpy/pull/12349>`__: TST, DOC: store circleci doc artifacts
+* `#12353 <https://github.com/numpy/numpy/pull/12353>`__: BUG: test, fix for threshold='nan'
+* `#12354 <https://github.com/numpy/numpy/pull/12354>`__: BUG: Fix segfault when an error occurs in np.fromfile
+* `#12355 <https://github.com/numpy/numpy/pull/12355>`__: BUG: fix a bug in npy_PyFile_Dup2 where it didn't return immediately...
+* `#12357 <https://github.com/numpy/numpy/pull/12357>`__: MAINT: Cleanup pavement file
+* `#12358 <https://github.com/numpy/numpy/pull/12358>`__: BUG: test, fix loading structured dtypes with padding
+* `#12362 <https://github.com/numpy/numpy/pull/12362>`__: MAINT: disable `__array_function__` dispatch unless environment...
+* `#12363 <https://github.com/numpy/numpy/pull/12363>`__: MAINT: update gfortran RPATH for AIX/Windows non-support.
+* `#12364 <https://github.com/numpy/numpy/pull/12364>`__: NEP: clarify the purpose of "types" in `__array_function__`.
+* `#12366 <https://github.com/numpy/numpy/pull/12366>`__: MAINT: Refactor sorting header file
+* `#12372 <https://github.com/numpy/numpy/pull/12372>`__: BUG: random: Fix handling of a=0 for numpy.random.weibull.
+* `#12373 <https://github.com/numpy/numpy/pull/12373>`__: MAINT: Improve error message for legal but unsupported PEP3118...
+* `#12376 <https://github.com/numpy/numpy/pull/12376>`__: BUG: do not override exception on import failure
+* `#12377 <https://github.com/numpy/numpy/pull/12377>`__: NEP: move nep 15 from accepted to final
+* `#12378 <https://github.com/numpy/numpy/pull/12378>`__: TST: Update complex long double precision tests.
+* `#12380 <https://github.com/numpy/numpy/pull/12380>`__: BUG: Fix for #10533 np.dtype(ctype) does not respect endianness
+* `#12381 <https://github.com/numpy/numpy/pull/12381>`__: BUG: graceful DataSource __del__ when __init__ fails
+* `#12382 <https://github.com/numpy/numpy/pull/12382>`__: ENH: set correct __module__ for objects in numpy's public API
+* `#12388 <https://github.com/numpy/numpy/pull/12388>`__: ENH: allow arrays for start and stop in {lin,log,geom}space
+* `#12390 <https://github.com/numpy/numpy/pull/12390>`__: DEV: remove shim added in 1.4
+* `#12391 <https://github.com/numpy/numpy/pull/12391>`__: DEP: raise on a call to deprecated numpy.lib.function_base.unique
+* `#12392 <https://github.com/numpy/numpy/pull/12392>`__: DOC: Add release notes for ctypes improvements
+* `#12398 <https://github.com/numpy/numpy/pull/12398>`__: BUG: fix possible overlap issues with avx enabled
+* `#12399 <https://github.com/numpy/numpy/pull/12399>`__: DOC: Fix typo in polyint. Fixes #12386.
+* `#12405 <https://github.com/numpy/numpy/pull/12405>`__: ENH: Add support for `np.dtype(ctypes.Union)`
+* `#12407 <https://github.com/numpy/numpy/pull/12407>`__: BUG: Fall back to 'ascii' locale in build (if needed)
+* `#12408 <https://github.com/numpy/numpy/pull/12408>`__: BUG: multifield-view of MaskedArray gets bad fill_value
+* `#12409 <https://github.com/numpy/numpy/pull/12409>`__: MAINT: correct the dtype.descr docstring
+* `#12413 <https://github.com/numpy/numpy/pull/12413>`__: BUG: Do not double-quote arguments to the command line
+* `#12414 <https://github.com/numpy/numpy/pull/12414>`__: MAINT: Update cversion hash.
+* `#12417 <https://github.com/numpy/numpy/pull/12417>`__: BUG: Fix regression on np.dtype(ctypes.c_void_p)
+* `#12419 <https://github.com/numpy/numpy/pull/12419>`__: Fix PyArray_FillFunc function definitions
+* `#12420 <https://github.com/numpy/numpy/pull/12420>`__: gfortran needs -lpthread & -maix64(64 build) in AIX
+* `#12422 <https://github.com/numpy/numpy/pull/12422>`__: MNT: Reword error message about loading pickled data.
+* `#12424 <https://github.com/numpy/numpy/pull/12424>`__: BUG: Fix inconsistent cache keying in ndpointer
+* `#12429 <https://github.com/numpy/numpy/pull/12429>`__: MAINT: Update mailmap for 1.16.0 release.
+* `#12431 <https://github.com/numpy/numpy/pull/12431>`__: BUG/ENH: Fix use of ndpointer in return values
+* `#12437 <https://github.com/numpy/numpy/pull/12437>`__: MAINT: refactor datetime.c_metadata creation
+* `#12439 <https://github.com/numpy/numpy/pull/12439>`__: BUG: test, fix NPY_VISIBILITY_HIDDEN on gcc, which becomes NPY_NO_EXPORT
+* `#12440 <https://github.com/numpy/numpy/pull/12440>`__: BUG: don't override original errors when casting inside np.dot()...
+* `#12443 <https://github.com/numpy/numpy/pull/12443>`__: MAINT Use set litterals
+* `#12445 <https://github.com/numpy/numpy/pull/12445>`__: MAINT: Use list and dict comprehension when possible
+* `#12446 <https://github.com/numpy/numpy/pull/12446>`__: MAINT: Fixups to new functions in np.lib.recfunctions
+* `#12447 <https://github.com/numpy/numpy/pull/12447>`__: ENH: add back the multifield copy->view change
+* `#12448 <https://github.com/numpy/numpy/pull/12448>`__: MAINT: Review F401,F841,F842 flake8 errors (unused variables...
+* `#12455 <https://github.com/numpy/numpy/pull/12455>`__: TST: use condition directive for Azure 2.7 check
+* `#12458 <https://github.com/numpy/numpy/pull/12458>`__: MAINT, DOC: fix Azure README badge
+* `#12464 <https://github.com/numpy/numpy/pull/12464>`__: BUG: IndexError for empty list on structured MaskedArray.
+* `#12466 <https://github.com/numpy/numpy/pull/12466>`__: TST: use openblas for Windows CI
+* `#12470 <https://github.com/numpy/numpy/pull/12470>`__: MAINT: remove wrapper functions from numpy.core.multiarray
+* `#12471 <https://github.com/numpy/numpy/pull/12471>`__: ENH: override support for np.linspace and friends
+* `#12474 <https://github.com/numpy/numpy/pull/12474>`__: TST: enable dispatcher test coverage
+* `#12477 <https://github.com/numpy/numpy/pull/12477>`__: DOC: fix example for __call__. See #12451
+* `#12486 <https://github.com/numpy/numpy/pull/12486>`__: DOC: Update copyright year in the license
+* `#12488 <https://github.com/numpy/numpy/pull/12488>`__: ENH: implement matmul on NDArrayOperatorsMixin
+* `#12493 <https://github.com/numpy/numpy/pull/12493>`__: BUG: fix records.fromfile fails to read data >4 GB
+* `#12494 <https://github.com/numpy/numpy/pull/12494>`__: BUG: test, fix matmul, dot for vector array with stride[i]=0
+* `#12498 <https://github.com/numpy/numpy/pull/12498>`__: TST: sync Azure Win openblas
+* `#12501 <https://github.com/numpy/numpy/pull/12501>`__: MAINT: removed word/typo from comment in site.cfg.example
+* `#12556 <https://github.com/numpy/numpy/pull/12556>`__: BUG: only override vector size for avx code for 1.16
+* `#12562 <https://github.com/numpy/numpy/pull/12562>`__: DOC, MAINT: Make `PYVER = 3` in doc/Makefile.
+* `#12563 <https://github.com/numpy/numpy/pull/12563>`__: DOC: more doc updates for structured arrays
+* `#12564 <https://github.com/numpy/numpy/pull/12564>`__: BUG: fix an unsafe PyTuple_GET_ITEM call
+* `#12565 <https://github.com/numpy/numpy/pull/12565>`__: Fix lgtm.com C/C++ build
+* `#12567 <https://github.com/numpy/numpy/pull/12567>`__: BUG: reorder operations for VS2015
+* `#12568 <https://github.com/numpy/numpy/pull/12568>`__: BUG: fix improper use of C-API
+* `#12569 <https://github.com/numpy/numpy/pull/12569>`__: BUG: Make new-lines in compiler error messages print to the console
+* `#12570 <https://github.com/numpy/numpy/pull/12570>`__: MAINT: don't check alignment size=0 arrays (RELAXED_STRIDES)
+* `#12573 <https://github.com/numpy/numpy/pull/12573>`__: BUG: fix refcount issue caused by #12524
+* `#12580 <https://github.com/numpy/numpy/pull/12580>`__: BUG: fix segfault in ctypeslib with obj being collected
+* `#12581 <https://github.com/numpy/numpy/pull/12581>`__: TST: activate shippable maintenance branches
+* `#12582 <https://github.com/numpy/numpy/pull/12582>`__: BUG: fix f2py pep338 execution method
+* `#12587 <https://github.com/numpy/numpy/pull/12587>`__: BUG: Make `arr.ctypes.data` hold a reference to the underlying...
+* `#12588 <https://github.com/numpy/numpy/pull/12588>`__: BUG: check for errors after PyArray_DESCR_REPLACE
+* `#12590 <https://github.com/numpy/numpy/pull/12590>`__: DOC, MAINT: Prepare for 1.16.0rc1 release.
+* `#12603 <https://github.com/numpy/numpy/pull/12603>`__: DOC: Fix markup in 1.16.0 release notes.
+* `#12621 <https://github.com/numpy/numpy/pull/12621>`__: BUG: longdouble with elsize 12 is never uint alignable.
+* `#12622 <https://github.com/numpy/numpy/pull/12622>`__: BUG: Add missing free in ufunc dealloc
+* `#12623 <https://github.com/numpy/numpy/pull/12623>`__: MAINT: add test for 12-byte alignment
+* `#12655 <https://github.com/numpy/numpy/pull/12655>`__: BUG: fix uint alignment asserts in lowlevel loops
+* `#12656 <https://github.com/numpy/numpy/pull/12656>`__: BENCH: don't fail at import time with old Numpy
+* `#12657 <https://github.com/numpy/numpy/pull/12657>`__: DOC: update 2018 -> 2019
+* `#12705 <https://github.com/numpy/numpy/pull/12705>`__: ENH: Better links in documentation
+* `#12706 <https://github.com/numpy/numpy/pull/12706>`__: MAINT: Further fixups to uint alignment checks
+* `#12707 <https://github.com/numpy/numpy/pull/12707>`__: BUG: Add 'sparc' to platforms implementing 16 byte reals.
+* `#12708 <https://github.com/numpy/numpy/pull/12708>`__: TST: Fix endianness in unstuctured_to_structured test
+* `#12710 <https://github.com/numpy/numpy/pull/12710>`__: TST: pin Azure brew version for stability.
diff --git a/doc/neps/_static/nep-0000.png b/doc/neps/_static/nep-0000.png
index 51eb2b258..0fc8176d2 100644
--- a/doc/neps/_static/nep-0000.png
+++ b/doc/neps/_static/nep-0000.png
Binary files differ
diff --git a/doc/neps/_static/nep0013_image1.png b/doc/neps/_static/nep0013_image1.png
new file mode 100644
index 000000000..e1b35b738
--- /dev/null
+++ b/doc/neps/_static/nep0013_image1.png
Binary files differ
diff --git a/doc/neps/_static/nep0013_image2.png b/doc/neps/_static/nep0013_image2.png
new file mode 100644
index 000000000..99f51b2fa
--- /dev/null
+++ b/doc/neps/_static/nep0013_image2.png
Binary files differ
diff --git a/doc/neps/_static/nep0013_image3.png b/doc/neps/_static/nep0013_image3.png
new file mode 100644
index 000000000..87a354ad1
--- /dev/null
+++ b/doc/neps/_static/nep0013_image3.png
Binary files differ
diff --git a/doc/neps/conf.py b/doc/neps/conf.py
index aa11d37b3..6eed7d0c9 100644
--- a/doc/neps/conf.py
+++ b/doc/neps/conf.py
@@ -30,8 +30,7 @@ import os
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['sphinx.ext.imgmath',
- 'sphinx.ext.graphviz']
+extensions = ['sphinx.ext.imgmath',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../source/_templates/']
@@ -47,7 +46,7 @@ master_doc = 'index'
# General information about the project.
project = u'NumPy Enhancement Proposals'
-copyright = u'2017, NumPy Developers'
+copyright = u'2017-2018, NumPy Developers'
author = u'NumPy Developers'
# The version info for the project you're documenting, acts as replacement for
@@ -100,7 +99,7 @@ todo_include_todos = False
## to template names.
##
## This is required for the alabaster theme
-## refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
+## refs: https://alabaster.readthedocs.io/en/latest/installation.html#sidebars
#html_sidebars = {
# '**': [
# 'relations.html', # needs 'show_related': True theme option to display
@@ -127,8 +126,8 @@ if True:
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
- "rootlinks": [("http://scipy.org/", "Scipy.org"),
- ("http://docs.scipy.org/", "Docs")]
+ "rootlinks": [("https://scipy.org/", "Scipy.org"),
+ ("https://docs.scipy.org/", "Docs")]
}
else:
# Default build
diff --git a/doc/neps/index.rst.tmpl b/doc/neps/index.rst.tmpl
index 6c988014f..0ad8e0f80 100644
--- a/doc/neps/index.rst.tmpl
+++ b/doc/neps/index.rst.tmpl
@@ -1,12 +1,21 @@
-===========================
-NumPy Enhancement Proposals
-===========================
-
-NumPy Enhancement Proposals (NEPs) describe proposed changes to NumPy.
-NEPs are modeled on Python Enhancement Proposals (PEPs), and are typically
-written up when large changes to NumPy are proposed.
+=====================================
+Roadmap & NumPy Enhancement Proposals
+=====================================
+
+This page provides an overview of development priorities for NumPy.
+Specifically, it contains a roadmap with a higher-level overview, as
+well as NumPy Enhancement Proposals (NEPs)—suggested changes
+to the library—in various stages of discussion or completion (see `NEP
+0 <nep-0000>`__).
+
+Roadmap
+-------
+.. toctree::
+ :maxdepth: 1
-This page provides an overview of all NEPs.
+ The Scope of NumPy <scope>
+ Current roadmap <roadmap>
+ Wish list <https://github.com/numpy/numpy/issues?q=is%3Aopen+is%3Aissue+label%3A%2223+-+Wish+List%22>
Meta-NEPs (NEPs about NEPs or Processes)
----------------------------------------
@@ -15,19 +24,30 @@ Meta-NEPs (NEPs about NEPs or Processes)
:maxdepth: 1
{% for nep, tags in neps.items() if tags['Type'] == 'Process' %}
- NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
nep-template
-Accepted NEPs, implementation in progress
------------------------------------------
+Provisional NEPs (provisionally accepted; interface may change)
+---------------------------------------------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Status'] == 'Provisional' %}
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
+
+
+Accepted NEPs (implementation in progress)
+------------------------------------------
.. toctree::
:maxdepth: 1
{% for nep, tags in neps.items() if tags['Status'] == 'Accepted' %}
- NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
@@ -38,37 +58,38 @@ Open NEPs (under consideration)
:maxdepth: 1
{% for nep, tags in neps.items() if tags['Status'] == 'Draft' %}
- NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
-Implemented NEPs
+Finished NEPs
----------------
.. toctree::
:maxdepth: 1
{% for nep, tags in neps.items() if tags['Status'] == 'Final' %}
- NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
-Deferred NEPs
--------------
+Deferred and Superseded NEPs
+----------------------------
.. toctree::
:maxdepth: 1
-{% for nep, tags in neps.items() if tags['Status'] == 'Deferred' %}
- NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% for nep, tags in neps.items() if tags['Status'] in ('Deferred', 'Superseded') %}
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
-Rejected NEPs
--------------
+Rejected and Withdrawn NEPs
+---------------------------
.. toctree::
:maxdepth: 1
-{% for nep, tags in neps.items() if tags['Status'] == 'Rejected' %}
- NEP {{ nep }} — {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% for nep, tags in neps.items() if tags['Status'] in ('Rejected', 'Withdrawn') %}
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
+
diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst
index 9c6646db2..89ba177cb 100644
--- a/doc/neps/nep-0000.rst
+++ b/doc/neps/nep-0000.rst
@@ -1,6 +1,6 @@
-===================
-Purpose and Process
-===================
+===========================
+NEP 0 — Purpose and Process
+===========================
:Author: Jarrod Millman <millman@berkeley.edu>
:Status: Active
@@ -31,12 +31,18 @@ feature proposal [1]_.
Types
^^^^^
-There are two kinds of NEP:
+There are three kinds of NEPs:
1. A **Standards Track** NEP describes a new feature or implementation
for NumPy.
-2. A **Process** NEP describes a process surrounding NumPy, or
+2. An **Informational** NEP describes a NumPy design issue, or provides
+ general guidelines or information to the Python community, but does not
+ propose a new feature. Informational NEPs do not necessarily represent a
+ NumPy community consensus or recommendation, so users and implementers are
+ free to ignore Informational NEPs or follow their advice.
+
+3. A **Process** NEP describes a process surrounding NumPy, or
proposes a change to (or an event in) a process. Process NEPs are
like Standards Track NEPs but apply to areas other than the NumPy
language itself. They may propose an implementation, but not to
@@ -97,21 +103,28 @@ status of NEPs are as follows:
All NEPs should be created with the ``Draft`` status.
-Normally, a NEP is ``Accepted`` by consensus of all interested
-Contributors. To verify that consensus has been reached, the NEP
-author or another interested party should make a post on the
-numpy-discussion mailing list proposing it for acceptance; if there
-are no substantive objections after one week, the NEP can officially
-be marked ``Accepted``, and a link to this post should be added to the
-NEP for reference.
-
-In unusual cases, the `NumPy Steering Council`_ may be asked to decide whether
-a controversial NEP is ``Accepted``.
+Eventually, after discussion, there may be a consensus that the NEP
+should be accepted – see the next section for details. At this point
+the status becomes ``Accepted``.
Once a NEP has been ``Accepted``, the reference implementation must be
completed. When the reference implementation is complete and incorporated
into the main source code repository, the status will be changed to ``Final``.
+To allow gathering of additional design and interface feedback before
+committing to long term stability for a language feature or standard library
+API, a NEP may also be marked as "Provisional". This is short for
+"Provisionally Accepted", and indicates that the proposal has been accepted for
+inclusion in the reference implementation, but additional user feedback is
+needed before the full design can be considered "Final". Unlike regular
+accepted NEPs, provisionally accepted NEPs may still be Rejected or Withdrawn
+even after the related changes have been included in a Python release.
+
+Wherever possible, it is considered preferable to reduce the scope of a
+proposal to avoid the need to rely on the "Provisional" status (e.g. by
+deferring some features to later NEPs), as this status can lead to version
+compatibility challenges in the wider NumPy ecosystem.
+
A NEP can also be assigned status ``Deferred``. The NEP author or a
core developer can assign the NEP this status when no progress is being made
on the NEP.
@@ -135,6 +148,61 @@ Process NEPs may also have a status of ``Active`` if they are never
meant to be completed, e.g. NEP 0 (this NEP).
+How a NEP becomes Accepted
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A NEP is ``Accepted`` by consensus of all interested contributors. We
+need a concrete way to tell whether consensus has been reached. When
+you think a NEP is ready to accept, send an email to the
+numpy-discussion mailing list with a subject like:
+
+ Proposal to accept NEP #<number>: <title>
+
+In the body of your email, you should:
+
+* link to the latest version of the NEP,
+
+* briefly describe any major points of contention and how they were
+ resolved,
+
+* include a sentence like: "If there are no substantive objections
+ within 7 days from this email, then the NEP will be accepted; see
+ NEP 0 for more details."
+
+For an example, see: https://mail.python.org/pipermail/numpy-discussion/2018-June/078345.html
+
+After you send the email, you should make sure to link to the email
+thread from the ``Discussion`` section of the NEP, so that people can
+find it later.
+
+Generally the NEP author will be the one to send this email, but
+anyone can do it – the important thing is to make sure that everyone
+knows when a NEP is on the verge of acceptance, and give them a final
+chance to respond. If there's some special reason to extend this final
+comment period beyond 7 days, then that's fine, just say so in the
+email. You shouldn't do less than 7 days, because sometimes people are
+travelling or similar and need some time to respond.
+
+In general, the goal is to make sure that the community has consensus,
+not provide a rigid policy for people to try to game. When in doubt,
+err on the side of asking for more feedback and looking for
+opportunities to compromise.
+
+If the final comment period passes without any substantive objections,
+then the NEP can officially be marked ``Accepted``. You should send a
+followup email notifying the list (celebratory emoji optional but
+encouraged 🎉✨), and then update the NEP by setting its ``:Status:``
+to ``Accepted``, and its ``:Resolution:`` header to a link to your
+followup email.
+
+If there *are* substantive objections, then the NEP remains in
+``Draft`` state, discussion continues as normal, and it can be
+proposed for acceptance again later once the objections are resolved.
+
+In unusual cases, the `NumPy Steering Council`_ may be asked to decide
+whether a controversial NEP is ``Accepted``.
+
+
Maintenance
^^^^^^^^^^^
@@ -203,7 +271,7 @@ References and Footnotes
`GitHub <https://github.com/numpy/numpy/tree/master/doc/neps>`_.
.. [2] The URL for viewing NEPs on the web is
- http://numpy.github.io/neps/.
+ https://www.numpy.org/neps/.
.. _repo: https://github.com/numpy/numpy
@@ -220,7 +288,7 @@ References and Footnotes
.. _reStructuredTextPrimer: http://www.sphinx-doc.org/en/stable/rest.html
-.. _Sphinx: www.sphinx-doc.org/en/stable
+.. _Sphinx: http://www.sphinx-doc.org/en/stable/
Copyright
diff --git a/doc/neps/nep-0001-npy-format.rst b/doc/neps/nep-0001-npy-format.rst
index 2057aed83..4eded02ff 100644
--- a/doc/neps/nep-0001-npy-format.rst
+++ b/doc/neps/nep-0001-npy-format.rst
@@ -1,6 +1,6 @@
-=====================================
-A Simple File Format for NumPy Arrays
-=====================================
+=============================================
+NEP 1 — A Simple File Format for NumPy Arrays
+=============================================
:Author: Robert Kern <robert.kern@gmail.com>
:Status: Final
@@ -290,15 +290,15 @@ included in the 1.9.0 release of numpy.
Specifically, the file format.py in this directory implements the
format as described here.
- http://github.com/numpy/numpy/blob/master/numpy/lib/format.py
+ https://github.com/numpy/numpy/blob/master/numpy/lib/format.py
References
----------
-[1] http://docs.python.org/lib/module-pickle.html
+[1] https://docs.python.org/library/pickle.html
-[2] http://hdf.ncsa.uiuc.edu/products/hdf5/index.html
+[2] https://support.hdfgroup.org/HDF5/
Copyright
diff --git a/doc/neps/nep-0002-warnfix.rst b/doc/neps/nep-0002-warnfix.rst
index 60dc885b2..207dfa3d4 100644
--- a/doc/neps/nep-0002-warnfix.rst
+++ b/doc/neps/nep-0002-warnfix.rst
@@ -1,6 +1,6 @@
-=========================================================================
-A proposal to build numpy without warning with a big set of warning flags
-=========================================================================
+=================================================================================
+NEP 2 — A proposal to build numpy without warning with a big set of warning flags
+=================================================================================
:Author: David Cournapeau
:Contact: david@ar.media.kyoto-u.ac.jp
diff --git a/doc/neps/nep-0003-math_config_clean.rst b/doc/neps/nep-0003-math_config_clean.rst
index 5af907437..ebd32b124 100644
--- a/doc/neps/nep-0003-math_config_clean.rst
+++ b/doc/neps/nep-0003-math_config_clean.rst
@@ -1,6 +1,6 @@
-===========================================================
-Cleaning the math configuration of numpy.core
-===========================================================
+=====================================================
+NEP 3 — Cleaning the math configuration of numpy.core
+=====================================================
:Author: David Cournapeau
:Contact: david@ar.media.kyoto-u.ac.jp
diff --git a/doc/neps/nep-0004-datetime-proposal3.rst b/doc/neps/nep-0004-datetime-proposal3.rst
index 46d8e314b..b32964e88 100644
--- a/doc/neps/nep-0004-datetime-proposal3.rst
+++ b/doc/neps/nep-0004-datetime-proposal3.rst
@@ -1,6 +1,6 @@
-====================================================================
- A (third) proposal for implementing some date/time types in NumPy
-====================================================================
+=========================================================================
+NEP 4 — A (third) proposal for implementing some date/time types in NumPy
+=========================================================================
:Author: Francesc Alted i Abad
:Contact: faltet@pytables.com
@@ -562,9 +562,9 @@ examples of other derived units, and we find this a bit too overwhelming
for this proposal purposes.
-.. [1] http://docs.python.org/lib/module-datetime.html
-.. [2] http://www.egenix.com/products/python/mxBase/mxDateTime
-.. [3] http://en.wikipedia.org/wiki/Unix_time
+.. [1] https://docs.python.org/library/datetime.html
+.. [2] https://www.egenix.com/products/python/mxBase/mxDateTime
+.. [3] https://en.wikipedia.org/wiki/Unix_time
.. Local Variables:
diff --git a/doc/neps/nep-0005-generalized-ufuncs.rst b/doc/neps/nep-0005-generalized-ufuncs.rst
index 54b2b370e..366e26ffd 100644
--- a/doc/neps/nep-0005-generalized-ufuncs.rst
+++ b/doc/neps/nep-0005-generalized-ufuncs.rst
@@ -1,6 +1,6 @@
-===============================
-Generalized Universal Functions
-===============================
+=======================================
+NEP 5 — Generalized Universal Functions
+=======================================
:Status: Final
diff --git a/doc/neps/nep-0006-newbugtracker.rst b/doc/neps/nep-0006-newbugtracker.rst
index 2b9344ed0..8dc7a1d8e 100644
--- a/doc/neps/nep-0006-newbugtracker.rst
+++ b/doc/neps/nep-0006-newbugtracker.rst
@@ -1,6 +1,6 @@
-===========================================
-Replacing Trac with a different bug tracker
-===========================================
+===================================================
+NEP 6 — Replacing Trac with a different bug tracker
+===================================================
:Author: David Cournapeau, Stefan van der Walt
:Status: Deferred
diff --git a/doc/neps/nep-0007-datetime-proposal.rst b/doc/neps/nep-0007-datetime-proposal.rst
index 72d48d244..5547a4306 100644
--- a/doc/neps/nep-0007-datetime-proposal.rst
+++ b/doc/neps/nep-0007-datetime-proposal.rst
@@ -1,6 +1,6 @@
-====================================================================
- A proposal for implementing some date/time types in NumPy
-====================================================================
+==================================================================
+NEP 7 — A proposal for implementing some date/time types in NumPy
+==================================================================
:Author: Travis Oliphant
:Contact: oliphant@enthought.com
@@ -662,9 +662,9 @@ operations mixing business days with other time units will not be
allowed.
-.. [1] http://docs.python.org/lib/module-datetime.html
-.. [2] http://www.egenix.com/products/python/mxBase/mxDateTime
-.. [3] http://en.wikipedia.org/wiki/Unix_time
+.. [1] https://docs.python.org/library/datetime.html
+.. [2] https://www.egenix.com/products/python/mxBase/mxDateTime
+.. [3] https://en.wikipedia.org/wiki/Unix_time
.. Local Variables:
diff --git a/doc/neps/nep-0008-groupby_additions.rst b/doc/neps/nep-0008-groupby_additions.rst
index fa02f2f9c..3189fcf41 100644
--- a/doc/neps/nep-0008-groupby_additions.rst
+++ b/doc/neps/nep-0008-groupby_additions.rst
@@ -1,6 +1,6 @@
-====================================================================
- A proposal for adding groupby functionality to NumPy
-====================================================================
+=============================================================
+NEP 8 — A proposal for adding groupby functionality to NumPy
+=============================================================
:Author: Travis Oliphant
:Contact: oliphant@enthought.com
diff --git a/doc/neps/nep-0009-structured_array_extensions.rst b/doc/neps/nep-0009-structured_array_extensions.rst
index 695d0d516..8b81a308d 100644
--- a/doc/neps/nep-0009-structured_array_extensions.rst
+++ b/doc/neps/nep-0009-structured_array_extensions.rst
@@ -1,6 +1,6 @@
-===========================
-Structured array extensions
-===========================
+===================================
+NEP 9 — Structured array extensions
+===================================
:Status: Deferred
diff --git a/doc/neps/nep-0010-new-iterator-ufunc.rst b/doc/neps/nep-0010-new-iterator-ufunc.rst
index 7b388a974..8601b4a4c 100644
--- a/doc/neps/nep-0010-new-iterator-ufunc.rst
+++ b/doc/neps/nep-0010-new-iterator-ufunc.rst
@@ -1,6 +1,6 @@
-=====================================
-Optimizing Iterator/UFunc Performance
-=====================================
+==============================================
+NEP 10 — Optimizing Iterator/UFunc Performance
+==============================================
:Author: Mark Wiebe <mwwiebe@gmail.com>
:Content-Type: text/x-rst
diff --git a/doc/neps/nep-0011-deferred-ufunc-evaluation.rst b/doc/neps/nep-0011-deferred-ufunc-evaluation.rst
index 5f5de3518..a7143c6ee 100644
--- a/doc/neps/nep-0011-deferred-ufunc-evaluation.rst
+++ b/doc/neps/nep-0011-deferred-ufunc-evaluation.rst
@@ -1,6 +1,6 @@
-=========================
-Deferred UFunc Evaluation
-=========================
+==================================
+NEP 11 — Deferred UFunc Evaluation
+==================================
:Author: Mark Wiebe <mwwiebe@gmail.com>
:Content-Type: text/x-rst
diff --git a/doc/neps/nep-0012-missing-data.rst b/doc/neps/nep-0012-missing-data.rst
index 1553339f4..dbcf1b579 100644
--- a/doc/neps/nep-0012-missing-data.rst
+++ b/doc/neps/nep-0012-missing-data.rst
@@ -1,10 +1,10 @@
-===================================
-Missing Data Functionality in NumPy
-===================================
+============================================
+NEP 12 — Missing Data Functionality in NumPy
+============================================
:Author: Mark Wiebe <mwwiebe@gmail.com>
:Copyright: Copyright 2011 by Enthought, Inc
-:License: CC By-SA 3.0 (http://creativecommons.org/licenses/by-sa/3.0/)
+:License: CC By-SA 3.0 (https://creativecommons.org/licenses/by-sa/3.0/)
:Date: 2011-06-23
:Status: Deferred
@@ -224,7 +224,7 @@ but with semantics reflecting its status as a missing value. In particular,
trying to treat it as a boolean will raise an exception, and comparisons
with it will produce numpy.NA instead of True or False. These basics are
adopted from the behavior of the NA value in the R project. To dig
-deeper into the ideas, http://en.wikipedia.org/wiki/Ternary_logic#Kleene_logic
+deeper into the ideas, https://en.wikipedia.org/wiki/Ternary_logic#Kleene_logic
provides a starting point.
For example,::
@@ -857,7 +857,7 @@ Shared Masks
One feature of numpy.ma is called 'shared masks'.
-http://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray.sharedmask
+https://docs.scipy.org/doc/numpy/reference/maskedarray.baseclass.html#numpy.ma.MaskedArray.sharedmask
This feature cannot be supported by a masked implementation of
missing values without directly violating the missing value abstraction.
@@ -888,7 +888,7 @@ found from doing google searches of numpy C API array access.
NumPy Documentation - How to extend NumPy
-----------------------------------------
-http://docs.scipy.org/doc/numpy/user/c-info.how-to-extend.html#dealing-with-array-objects
+https://docs.scipy.org/doc/numpy/user/c-info.how-to-extend.html#dealing-with-array-objects
This page has a section "Dealing with array objects" which has some advice for how
to access numpy arrays from C. When accepting arrays, the first step it suggests is
@@ -898,7 +898,7 @@ advice will properly fail when given an NA-masked array it doesn't know how to h
The way this is handled is that PyArray_FromAny requires a special flag, NPY_ARRAY_ALLOWNA,
before it will allow NA-masked arrays to flow through.
-http://docs.scipy.org/doc/numpy/reference/c-api.array.html#NPY_ARRAY_ALLOWNA
+https://docs.scipy.org/doc/numpy/reference/c-api.array.html#NPY_ARRAY_ALLOWNA
Code which does not follow this advice, and instead just calls PyArray_Check() to verify
its an ndarray and checks some flags, will silently produce incorrect results. This style
diff --git a/doc/neps/nep-0013-ufunc-overrides.rst b/doc/neps/nep-0013-ufunc-overrides.rst
index c97b69023..0888c7559 100644
--- a/doc/neps/nep-0013-ufunc-overrides.rst
+++ b/doc/neps/nep-0013-ufunc-overrides.rst
@@ -1,6 +1,6 @@
-=================================
-A Mechanism for Overriding Ufuncs
-=================================
+==========================================
+NEP 13 — A Mechanism for Overriding Ufuncs
+==========================================
.. currentmodule:: numpy
@@ -53,7 +53,7 @@ changes in 3rd party code.
.. [1] http://docs.python.org/doc/numpy/user/basics.subclassing.html
.. [2] https://github.com/scipy/scipy/issues/2123
.. [3] https://github.com/scipy/scipy/issues/1569
-.. [4] http://technicaldiscovery.blogspot.com/2013/07/thoughts-after-scipy-2013-and-specific.html
+.. [4] https://technicaldiscovery.blogspot.com/2013/07/thoughts-after-scipy-2013-and-specific.html
Motivation
@@ -134,7 +134,7 @@ which have multiplication semantics incompatible with numpy arrays.
However, the aim is to enable writing other custom array types that have
strictly ndarray compatible semantics.
-.. [5] http://mail.python.org/pipermail/numpy-discussion/2011-June/056945.html
+.. [5] https://mail.python.org/pipermail/numpy-discussion/2011-June/056945.html
.. [6] https://github.com/numpy/numpy/issues/5844
@@ -261,16 +261,7 @@ consider carefully if any surprising behavior results.
Type casting hierarchy.
- .. graphviz::
-
- digraph array_ufuncs {
- rankdir=BT;
- A -> C [label="C"];
- B -> C [label="C"];
- D -> B [label="B"];
- ndarray -> C [label="A"];
- ndarray -> B [label="B"];
- }
+ .. image:: _static/nep0013_image1.png
The ``__array_ufunc__`` of type A can handle ndarrays returning C,
B can handle ndarray and D returning B, and C can handle A and B returning C,
@@ -286,14 +277,7 @@ consider carefully if any surprising behavior results.
One-cycle in the ``__array_ufunc__`` graph.
- .. graphviz::
-
- digraph array_ufuncs {
- rankdir=BT;
- A -> B [label="B"];
- B -> A [label="A"];
- }
-
+ .. image:: _static/nep0013_image2.png
In this case, the ``__array_ufunc__`` relations have a cycle of length 1,
and a type casting hierarchy does not exist. Binary operations are not
@@ -303,15 +287,7 @@ consider carefully if any surprising behavior results.
Longer cycle in the ``__array_ufunc__`` graph.
- .. graphviz::
-
- digraph array_ufuncs {
- rankdir=BT;
- A -> B [label="B"];
- B -> C [label="C"];
- C -> A [label="A"];
- }
-
+ .. image:: _static/nep0013_image3.png
In this case, the ``__array_ufunc__`` relations have a longer cycle, and a
type casting hierarchy does not exist. Binary operations are still
@@ -635,7 +611,7 @@ simplify the dispatch logic for binary operations with NumPy arrays
as much as possible, by making it possible to use Python's dispatch rules
or NumPy's dispatch rules, but not some mixture of both at the same time.
-.. [9] http://bugs.python.org/issue30140
+.. [9] https://bugs.python.org/issue30140
.. _neps.ufunc-overrides.list-of-operators:
diff --git a/doc/neps/nep-0014-dropping-python2.7-proposal.rst b/doc/neps/nep-0014-dropping-python2.7-proposal.rst
index 6cfd4707f..3adf3b407 100644
--- a/doc/neps/nep-0014-dropping-python2.7-proposal.rst
+++ b/doc/neps/nep-0014-dropping-python2.7-proposal.rst
@@ -1,6 +1,6 @@
-====================================
-Plan for dropping Python 2.7 support
-====================================
+=============================================
+NEP 14 — Plan for dropping Python 2.7 support
+=============================================
:Status: Accepted
:Resolution: https://mail.python.org/pipermail/numpy-discussion/2017-November/077419.html
@@ -50,6 +50,6 @@ to Python3 only, see the python3-statement_.
For more information on porting your code to run on Python 3, see the
python3-howto_.
-.. _python3-statement: http://www.python3statement.org/
+.. _python3-statement: https://python3statement.org/
.. _python3-howto: https://docs.python.org/3/howto/pyporting.html
diff --git a/doc/neps/nep-0015-merge-multiarray-umath.rst b/doc/neps/nep-0015-merge-multiarray-umath.rst
new file mode 100644
index 000000000..576a21e23
--- /dev/null
+++ b/doc/neps/nep-0015-merge-multiarray-umath.rst
@@ -0,0 +1,157 @@
+=====================================
+NEP 15 — Merging multiarray and umath
+=====================================
+
+:Author: Nathaniel J. Smith <njs@pobox.com>
+:Status: Final
+:Type: Standards Track
+:Created: 2018-02-22
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078345.html
+
+Abstract
+--------
+
+Let's merge ``numpy.core.multiarray`` and ``numpy.core.umath`` into a
+single extension module, and deprecate ``np.set_numeric_ops``.
+
+
+Background
+----------
+
+Currently, numpy's core C code is split between two separate extension
+modules.
+
+``numpy.core.multiarray`` is built from
+``numpy/core/src/multiarray/*.c``, and contains the core array
+functionality (in particular, the ``ndarray`` object).
+
+``numpy.core.umath`` is built from ``numpy/core/src/umath/*.c``, and
+contains the ufunc machinery.
+
+These two modules each expose their own separate C API, accessed via
+``import_multiarray()`` and ``import_umath()`` respectively. The idea
+is that they're supposed to be independent modules, with
+``multiarray`` as a lower-level layer with ``umath`` built on top. In
+practice this has turned out to be problematic.
+
+First, the layering isn't perfect: when you write ``ndarray +
+ndarray``, this invokes ``ndarray.__add__``, which then calls the
+ufunc ``np.add``. This means that ``ndarray`` needs to know about
+ufuncs – so instead of a clean layering, we have a circular
+dependency. To solve this, ``multiarray`` exports a somewhat
+terrifying function called ``set_numeric_ops``. The bootstrap
+procedure each time you ``import numpy`` is:
+
+1. ``multiarray`` and its ``ndarray`` object are loaded, but
+ arithmetic operations on ndarrays are broken.
+
+2. ``umath`` is loaded.
+
+3. ``set_numeric_ops`` is used to monkeypatch all the methods like
+ ``ndarray.__add__`` with objects from ``umath``.
+
+In addition, ``set_numeric_ops`` is exposed as a public API,
+``np.set_numeric_ops``.
+
+Furthermore, even when this layering does work, it ends up distorting
+the shape of our public ABI. In recent years, the most common reason
+for adding new functions to ``multiarray``\'s "public" ABI is not that
+they really need to be public or that we expect other projects to use
+them, but rather just that we need to call them from ``umath``. This
+is extremely unfortunate, because it makes our public ABI
+unnecessarily large, and since we can never remove things from it then
+this creates an ongoing maintenance burden. The way C works, you can
+have internal API that's visible to everything inside the same
+extension module, or you can have a public API that everyone can use;
+you can't (easily) have an API that's visible to multiple extension
+modules inside numpy, but not to external users.
+
+We've also increasingly been putting utility code into
+``numpy/core/src/private/``, which now contains a bunch of files which
+are ``#include``\d twice, once into ``multiarray`` and once into
+``umath``. This is pretty gross, and is purely a workaround for these
+being separate C extensions. The ``npymath`` library is also
+included in both extension modules.
+
+
+Proposed changes
+----------------
+
+This NEP proposes three changes:
+
+1. We should start building ``numpy/core/src/multiarray/*.c`` and
+ ``numpy/core/src/umath/*.c`` together into a single extension
+ module.
+
+2. Instead of ``set_numeric_ops``, we should use some new, private API
+ to set up ``ndarray.__add__`` and friends.
+
+3. We should deprecate, and eventually remove, ``np.set_numeric_ops``.
+
+
+Non-proposed changes
+--------------------
+
+We don't necessarily propose to throw away the distinction between
+multiarray/ and umath/ in terms of our source code organization:
+internal organization is useful! We just want to build them together
+into a single extension module. Of course, this does open the door for
+potential future refactorings, which we can then evaluate based on
+their merits as they come up.
+
+It also doesn't propose that we break the public C ABI. We should
+continue to provide ``import_multiarray()`` and ``import_umath()``
+functions – it's just that now both ABIs will ultimately be loaded
+from the same C library. Due to how ``import_multiarray()`` and
+``import_umath()`` are written, we'll also still need to have modules
+called ``numpy.core.multiarray`` and ``numpy.core.umath``, and they'll
+need to continue to export ``_ARRAY_API`` and ``_UFUNC_API`` objects –
+but we can make one or both of these modules be tiny shims that simply
+re-export the magic API object from where-ever it's actually defined.
+(See ``numpy/core/code_generators/generate_{numpy,ufunc}_api.py`` for
+details of how these imports work.)
+
+
+Backward compatibility
+----------------------
+
+The only compatibility break is the deprecation of ``np.set_numeric_ops``.
+
+
+Rejected alternatives
+---------------------
+
+Preserve ``set_numeric_ops`` for monkeypatching
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In discussing this NEP, one additional use case was raised for
+``set_numeric_ops``: if you have an optimized vector math library
+(e.g. Intel's MKL VML, Sleef, or Yeppp), then ``set_numeric_ops`` can
+be used to monkeypatch numpy to use these operations instead of
+numpy's built-in vector operations. But, even if we grant that this is
+a great idea, using ``set_numeric_ops`` isn't actually the best way to
+do it. All ``set_numeric_ops`` allows you to do is take over Python's
+syntactic operators (``+``, ``*``, etc.) on ndarrays; it doesn't let
+you affect operations called via other APIs (e.g., ``np.add``), or
+operations that don't have built-in syntax (e.g., ``np.exp``). Also,
+you have to reimplement the whole ufunc machinery, instead of just the
+core loop. On the other hand, the `PyUFunc_ReplaceLoopBySignature
+<https://docs.scipy.org/doc/numpy/reference/c-api.ufunc.html#c.PyUFunc_ReplaceLoopBySignature>`__
+API – which was added in 2006 – allows replacement of the inner loops
+of arbitrary ufuncs. This is both simpler and more powerful – e.g.
+replacing the inner loop of ``np.add`` means your code will
+automatically be used for both ``ndarray + ndarray`` as well as direct
+calls to ``np.add``. So this doesn't seem like a good reason to not
+deprecate ``set_numeric_ops``.
+
+
+Discussion
+----------
+
+* https://mail.python.org/pipermail/numpy-discussion/2018-March/077764.html
+* https://mail.python.org/pipermail/numpy-discussion/2018-June/078345.html
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0016-abstract-array.rst b/doc/neps/nep-0016-abstract-array.rst
new file mode 100644
index 000000000..86d164d8e
--- /dev/null
+++ b/doc/neps/nep-0016-abstract-array.rst
@@ -0,0 +1,328 @@
+=============================================================
+NEP 16 — An abstract base class for identifying "duck arrays"
+=============================================================
+
+:Author: Nathaniel J. Smith <njs@pobox.com>
+:Status: Withdrawn
+:Type: Standards Track
+:Created: 2018-03-06
+:Resolution: https://github.com/numpy/numpy/pull/12174
+
+.. note::
+
+ This NEP has been withdrawn in favor of the protocol based approach
+ described in
+ `NEP 22 <nep-0022-ndarray-duck-typing-overview.html>`__
+
+Abstract
+--------
+
+We propose to add an abstract base class ``AbstractArray`` so that
+third-party classes can declare their ability to "quack like" an
+``ndarray``, and an ``asabstractarray`` function that performs
+similarly to ``asarray`` except that it passes through
+``AbstractArray`` instances unchanged.
+
+
+Detailed description
+--------------------
+
+Many functions, in NumPy and in third-party packages, start with some
+code like::
+
+ def myfunc(a, b):
+ a = np.asarray(a)
+ b = np.asarray(b)
+ ...
+
+This ensures that ``a`` and ``b`` are ``np.ndarray`` objects, so
+``myfunc`` can carry on assuming that they'll act like ndarrays both
+semantically (at the Python level), and also in terms of how they're
+stored in memory (at the C level). But many of these functions only
+work with arrays at the Python level, which means that they don't
+actually need ``ndarray`` objects *per se*: they could work just as
+well with any Python object that "quacks like" an ndarray, such as
+sparse arrays, dask's lazy arrays, or xarray's labeled arrays.
+
+However, currently, there's no way for these libraries to express that
+their objects can quack like an ndarray, and there's no way for
+functions like ``myfunc`` to express that they'd be happy with
+anything that quacks like an ndarray. The purpose of this NEP is to
+provide those two features.
+
+Sometimes people suggest using ``np.asanyarray`` for this purpose, but
+unfortunately its semantics are exactly backwards: it guarantees that
+the object it returns uses the same memory layout as an ``ndarray``,
+but tells you nothing at all about its semantics, which makes it
+essentially impossible to use safely in practice. Indeed, the two
+``ndarray`` subclasses distributed with NumPy – ``np.matrix`` and
+``np.ma.masked_array`` – do have incompatible semantics, and if they
+were passed to a function like ``myfunc`` that doesn't check for them
+as a special-case, then it may silently return incorrect results.
+
+
+Declaring that an object can quack like an array
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are two basic approaches we could use for checking whether an
+object quacks like an array. We could check for a special attribute on
+the class::
+
+ def quacks_like_array(obj):
+ return bool(getattr(type(obj), "__quacks_like_array__", False))
+
+Or, we could define an `abstract base class (ABC)
+<https://docs.python.org/3/library/collections.abc.html>`__::
+
+ def quacks_like_array(obj):
+ return isinstance(obj, AbstractArray)
+
+If you look at how ABCs work, this is essentially equivalent to
+keeping a global set of types that have been declared to implement the
+``AbstractArray`` interface, and then checking it for membership.
+
+Between these, the ABC approach seems to have a number of advantages:
+
+* It's Python's standard, "one obvious way" of doing this.
+
+* ABCs can be introspected (e.g. ``help(np.AbstractArray)`` does
+ something useful).
+
+* ABCs can provide useful mixin methods.
+
+* ABCs integrate with other features like mypy type-checking,
+ ``functools.singledispatch``, etc.
+
+One obvious thing to check is whether this choice affects speed. Using
+the attached benchmark script on a CPython 3.7 prerelease (revision
+c4d77a661138d, self-compiled, no PGO), on a Thinkpad T450s running
+Linux, we find::
+
+ np.asarray(ndarray_obj) 330 ns
+ np.asarray([]) 1400 ns
+
+ Attribute check, success 80 ns
+ Attribute check, failure 80 ns
+
+ ABC, success via subclass 340 ns
+ ABC, success via register() 700 ns
+ ABC, failure 370 ns
+
+Notes:
+
+* The first two lines are included to put the other lines in context.
+
+* This used 3.7 because both ``getattr`` and ABCs are receiving
+ substantial optimizations in this release, and it's more
+ representative of the long-term future of Python. (Failed
+ ``getattr`` doesn't necessarily construct an exception object
+ anymore, and ABCs were reimplemented in C.)
+
+* The "success" lines refer to cases where ``quacks_like_array`` would
+ return True. The "failure" lines are cases where it would return
+ False.
+
+* The first measurement for ABCs is subclasses defined like::
+
+ class MyArray(AbstractArray):
+ ...
+
+ The second is for subclasses defined like::
+
+ class MyArray:
+ ...
+
+ AbstractArray.register(MyArray)
+
+ I don't know why there's such a large difference between these.
+
+In practice, either way we'd only do the full test after first
+checking for well-known types like ``ndarray``, ``list``, etc. `This
+is how NumPy currently checks for other double-underscore attributes
+<https://github.com/numpy/numpy/blob/master/numpy/core/src/private/get_attr_string.h>`__
+and the same idea applies here to either approach. So these numbers
+won't affect the common case, just the case where we actually have an
+``AbstractArray``, or else another third-party object that will end up
+going through ``__array__`` or ``__array_interface__`` or end up as an
+object array.
+
+So in summary, using an ABC will be slightly slower than using an
+attribute, but this doesn't affect the most common paths, and the
+magnitude of slowdown is fairly small (~250 ns on an operation that
+already takes longer than that). Furthermore, we can potentially
+optimize this further (e.g. by keeping a tiny LRU cache of types that
+are known to be AbstractArray subclasses, on the assumption that most
+code will only use one or two of these types at a time), and it's very
+unclear that this even matters – if the speed of ``asarray`` no-op
+pass-throughs were a bottleneck that showed up in profiles, then
+probably we would have made them faster already! (It would be trivial
+to fast-path this, but we don't.)
+
+Given the semantic and usability advantages of ABCs, this seems like
+an acceptable trade-off.
+
+..
+ CPython 3.6 (from Debian)::
+
+ Attribute check, success 110 ns
+ Attribute check, failure 370 ns
+
+ ABC, success via subclass 690 ns
+ ABC, success via register() 690 ns
+ ABC, failure 1220 ns
+
+
+Specification of ``asabstractarray``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Given ``AbstractArray``, the definition of ``asabstractarray`` is simple::
+
+ def asabstractarray(a, dtype=None):
+ if isinstance(a, AbstractArray):
+ if dtype is not None and dtype != a.dtype:
+ return a.astype(dtype)
+ return a
+ return asarray(a, dtype=dtype)
+
+Things to note:
+
+* ``asarray`` also accepts an ``order=`` argument, but we don't
+ include that here because it's about details of memory
+ representation, and the whole point of this function is that you use
+ it to declare that you don't care about details of memory
+ representation.
+
+* Using the ``astype`` method allows the ``a`` object to decide how to
+ implement casting for its particular type.
+
+* For strict compatibility with ``asarray``, we skip calling
+ ``astype`` when the dtype is already correct. Compare::
+
+ >>> a = np.arange(10)
+
+ # astype() always returns a view:
+ >>> a.astype(a.dtype) is a
+ False
+
+ # asarray() returns the original object if possible:
+ >>> np.asarray(a, dtype=a.dtype) is a
+ True
+
+
+What exactly are you promising if you inherit from ``AbstractArray``?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This will presumably be refined over time. The ideal of course is that
+your class should be indistinguishable from a real ``ndarray``, but
+nothing enforces that except the expectations of users. In practice,
+declaring that your class implements the ``AbstractArray`` interface
+simply means that it will start passing through ``asabstractarray``,
+and so by subclassing it you're saying that if some code works for
+``ndarray``\s but breaks for your class, then you're willing to accept
+bug reports on that.
+
+To start with, we should declare ``__array_ufunc__`` to be an abstract
+method, and add the ``NDArrayOperatorsMixin`` methods as mixin
+methods.
+
+Declaring ``astype`` as an ``@abstractmethod`` probably makes sense as
+well, since it's used by ``asabstractarray``. We might also want to go
+ahead and add some basic attributes like ``ndim``, ``shape``,
+``dtype``.
+
+Adding new abstract methods will be a bit tricky, because ABCs enforce
+these at subclass time; therefore, simply adding a new
+`@abstractmethod` will be a backwards compatibility break. If this
+becomes a problem then we can use some hacks to implement an
+`@upcoming_abstractmethod` decorator that only issues a warning if the
+method is missing, and treat it like a regular deprecation cycle. (In
+this case, the thing we'd be deprecating is "support for abstract
+arrays that are missing feature X".)
+
+
+Naming
+~~~~~~
+
+The name of the ABC doesn't matter too much, because it will only be
+referenced rarely and in relatively specialized situations. The name
+of the function matters a lot, because most existing instances of
+``asarray`` should be replaced by this, and in the future it's what
+everyone should be reaching for by default unless they have a specific
+reason to use ``asarray`` instead. This suggests that its name really
+should be *shorter* and *more memorable* than ``asarray``... which
+is difficult. I've used ``asabstractarray`` in this draft, but I'm not
+really happy with it, because it's too long and people are unlikely to
+start using it by habit without endless exhortations.
+
+One option would be to actually change ``asarray``\'s semantics so
+that *it* passes through ``AbstractArray`` objects unchanged. But I'm
+worried that there may be a lot of code out there that calls
+``asarray`` and then passes the result into some C function that
+doesn't do any further type checking (because it knows that its caller
+has already used ``asarray``). If we allow ``asarray`` to return
+``AbstractArray`` objects, and then someone calls one of these C
+wrappers and passes it an ``AbstractArray`` object like a sparse
+array, then they'll get a segfault. Right now, in the same situation,
+``asarray`` will instead invoke the object's ``__array__`` method, or
+use the buffer interface to make a view, or pass through an array with
+object dtype, or raise an error, or similar. Probably none of these
+outcomes are actually desireable in most cases, so maybe making it a
+segfault instead would be OK? But it's dangerous given that we don't
+know how common such code is. OTOH, if we were starting from scratch
+then this would probably be the ideal solution.
+
+We can't use ``asanyarray`` or ``array``, since those are already
+taken.
+
+Any other ideas? ``np.cast``, ``np.coerce``?
+
+
+Implementation
+--------------
+
+1. Rename ``NDArrayOperatorsMixin`` to ``AbstractArray`` (leaving
+ behind an alias for backwards compatibility) and make it an ABC.
+
+2. Add ``asabstractarray`` (or whatever we end up calling it), and
+ probably a C API equivalent.
+
+3. Begin migrating NumPy internal functions to using
+ ``asabstractarray`` where appropriate.
+
+
+Backward compatibility
+----------------------
+
+This is purely a new feature, so there are no compatibility issues.
+(Unless we decide to change the semantics of ``asarray`` itself.)
+
+
+Rejected alternatives
+---------------------
+
+One suggestion that has come up is to define multiple abstract classes
+for different subsets of the array interface. Nothing in this proposal
+stops either NumPy or third-parties from doing this in the future, but
+it's very difficult to guess ahead of time which subsets would be
+useful. Also, "the full ndarray interface" is something that existing
+libraries are written to expect (because they work with actual
+ndarrays) and test (because they test with actual ndarrays), so it's
+by far the easiest place to start.
+
+
+Links to discussion
+-------------------
+
+* https://mail.python.org/pipermail/numpy-discussion/2018-March/077767.html
+
+
+Appendix: Benchmark script
+--------------------------
+
+.. literalinclude:: nep-0016-benchmark.py
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0016-benchmark.py b/doc/neps/nep-0016-benchmark.py
new file mode 100644
index 000000000..ec8e44726
--- /dev/null
+++ b/doc/neps/nep-0016-benchmark.py
@@ -0,0 +1,48 @@
+import perf
+import abc
+import numpy as np
+
+class NotArray:
+ pass
+
+class AttrArray:
+ __array_implementer__ = True
+
+class ArrayBase(abc.ABC):
+ pass
+
+class ABCArray1(ArrayBase):
+ pass
+
+class ABCArray2:
+ pass
+
+ArrayBase.register(ABCArray2)
+
+not_array = NotArray()
+attr_array = AttrArray()
+abc_array_1 = ABCArray1()
+abc_array_2 = ABCArray2()
+
+# Make sure ABC cache is primed
+isinstance(not_array, ArrayBase)
+isinstance(abc_array_1, ArrayBase)
+isinstance(abc_array_2, ArrayBase)
+
+runner = perf.Runner()
+def t(name, statement):
+ runner.timeit(name, statement, globals=globals())
+
+t("np.asarray([])", "np.asarray([])")
+arrobj = np.array([])
+t("np.asarray(arrobj)", "np.asarray(arrobj)")
+
+t("attr, False",
+ "getattr(not_array, '__array_implementer__', False)")
+t("attr, True",
+ "getattr(attr_array, '__array_implementer__', False)")
+
+t("ABC, False", "isinstance(not_array, ArrayBase)")
+t("ABC, True, via inheritance", "isinstance(abc_array_1, ArrayBase)")
+t("ABC, True, via register", "isinstance(abc_array_2, ArrayBase)")
+
diff --git a/doc/neps/nep-0017-split-out-maskedarray.rst b/doc/neps/nep-0017-split-out-maskedarray.rst
index d6dcc1def..7ef949763 100644
--- a/doc/neps/nep-0017-split-out-maskedarray.rst
+++ b/doc/neps/nep-0017-split-out-maskedarray.rst
@@ -1,6 +1,6 @@
-=======================
-Split Out Masked Arrays
-=======================
+================================
+NEP 17 — Split Out Masked Arrays
+================================
:Author: Stéfan van der Walt <stefanv@berkeley.edu>
:Status: Rejected
diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst
index 943ca4cbf..988c9086a 100644
--- a/doc/neps/nep-0018-array-function-protocol.rst
+++ b/doc/neps/nep-0018-array-function-protocol.rst
@@ -1,35 +1,41 @@
-==================================================
-NEP: Dispatch Mechanism for NumPy's high level API
-==================================================
+====================================================================
+NEP 18 — A dispatch mechanism for NumPy's high level array functions
+====================================================================
:Author: Stephan Hoyer <shoyer@google.com>
:Author: Matthew Rocklin <mrocklin@gmail.com>
-:Status: Draft
+:Author: Marten van Kerkwijk <mhvk@astro.utoronto.ca>
+:Author: Hameer Abbasi <hameerabbasi@yahoo.com>
+:Author: Eric Wieser <wieser.eric@gmail.com>
+:Status: Provisional
:Type: Standards Track
:Created: 2018-05-29
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-August/078493.html
Abstact
-------
-We propose a protocol to allow arguments of numpy functions to define
-how that function operates on them. This allows other libraries that
-implement NumPy's high level API to reuse Numpy functions. This allows
-libraries that extend NumPy's high level API to apply to more NumPy-like
-libraries.
+We propose the ``__array_function__`` protocol, to allow arguments of NumPy
+functions to define how that function operates on them. This will allow
+using NumPy as a high level API for efficient multi-dimensional array
+operations, even with array implementations that differ greatly from
+``numpy.ndarray``.
Detailed description
--------------------
-Numpy's high level ndarray API has been implemented several times
+NumPy's high level ndarray API has been implemented several times
outside of NumPy itself for different architectures, such as for GPU
arrays (CuPy), Sparse arrays (scipy.sparse, pydata/sparse) and parallel
-arrays (Dask array) as well as various Numpy-like implementations in the
+arrays (Dask array) as well as various NumPy-like implementations in the
deep learning frameworks, like TensorFlow and PyTorch.
-Similarly there are several projects that build on top of the Numpy API
-for labeled and indexed arrays (XArray), automatic differentation
-(Autograd, Tangent), higher order array factorizations (TensorLy), etc.
-that add additional functionality on top of the Numpy API.
+Similarly there are many projects that build on top of the NumPy API
+for labeled and indexed arrays (XArray), automatic differentiation
+(Autograd, Tangent), masked arrays (numpy.ma), physical units (astropy.units,
+pint, unyt), etc. that add additional functionality on top of the NumPy API.
+Most of these project also implement a close variation of NumPy's level high
+API.
We would like to be able to use these libraries together, for example we
would like to be able to place a CuPy array within XArray, or perform
@@ -38,7 +44,7 @@ accomplish if code written for NumPy ndarrays could also be used by
other NumPy-like projects.
For example, we would like for the following code example to work
-equally well with any Numpy-like array object:
+equally well with any NumPy-like array object:
.. code:: python
@@ -47,7 +53,7 @@ equally well with any Numpy-like array object:
return np.mean(np.exp(y))
Some of this is possible today with various protocol mechanisms within
-Numpy.
+NumPy.
- The ``np.exp`` function checks the ``__array_ufunc__`` protocol
- The ``.T`` method works using Python's method dispatch
@@ -55,10 +61,10 @@ Numpy.
the argument
However other functions, like ``np.tensordot`` do not dispatch, and
-instead are likely to coerce to a Numpy array (using the ``__array__``)
+instead are likely to coerce to a NumPy array (using the ``__array__``)
protocol, or err outright. To achieve enough coverage of the NumPy API
to support downstream projects like XArray and autograd we want to
-support *almost all* functions within Numpy, which calls for a more
+support *almost all* functions within NumPy, which calls for a more
reaching protocol than just ``__array_ufunc__``. We would like a
protocol that allows arguments of a NumPy function to take control and
divert execution to another function (for example a GPU or parallel
@@ -71,10 +77,32 @@ We propose adding support for a new protocol in NumPy,
``__array_function__``.
This protocol is intended to be a catch-all for NumPy functionality that
-is not covered by existing protocols, like reductions (like ``np.sum``)
-or universal functions (like ``np.exp``). The semantics are very similar
-to ``__array_ufunc__``, except the operation is specified by an
-arbitrary callable object rather than a ufunc instance and method.
+is not covered by the ``__array_ufunc__`` protocol for universal functions
+(like ``np.exp``). The semantics are very similar to ``__array_ufunc__``, except
+the operation is specified by an arbitrary callable object rather than a ufunc
+instance and method.
+
+A prototype implementation can be found in
+`this notebook <https://nbviewer.jupyter.org/gist/shoyer/1f0a308a06cd96df20879a1ddb8f0006>`_.
+
+.. warning::
+
+ The ``__array_function__`` protocol, and its use on particular functions,
+ is *experimental*. We plan to retain an interface that makes it possible
+ to override NumPy functions, but the way to do so for particular functions
+ **can and will change** with little warning. If such reduced backwards
+ compatibility guarantees are not accepted to you, do not rely upon overrides
+ of NumPy functions for non-NumPy arrays. See "Non-goals" below for more
+ details.
+
+.. note::
+
+ Dispatch with the ``__array_function__`` protocol has been implemented on
+ NumPy's master branch but is not yet enabled by default. In NumPy 1.16,
+ you will need to set the environment variable
+ ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1`` before importing NumPy to test
+ NumPy function overrides. We anticipate the protocol will be enabled by
+ default in NumPy 1.17.
The interface
~~~~~~~~~~~~~
@@ -88,23 +116,26 @@ We propose the following signature for implementations of
- ``func`` is an arbitrary callable exposed by NumPy's public API,
which was called in the form ``func(*args, **kwargs)``.
-- ``types`` is a list of types for all arguments to the original NumPy
- function call that will be checked for an ``__array_function__``
- implementation.
-- The tuple ``args`` and dict ``**kwargs`` are directly passed on from the
+- ``types`` is a `collection <https://docs.python.org/3/library/collections.abc.html#collections.abc.Collection>`_
+ of unique argument types from the original NumPy function call that
+ implement ``__array_function__``.
+- The tuple ``args`` and dict ``kwargs`` are directly passed on from the
original call.
Unlike ``__array_ufunc__``, there are no high-level guarantees about the
type of ``func``, or about which of ``args`` and ``kwargs`` may contain objects
-implementing the array API. As a convenience for ``__array_function__``
-implementors of the NumPy API, the ``types`` keyword contains a list of all
-types that implement the ``__array_function__`` protocol. This allows
-downstream implementations to quickly determine if they are likely able to
-support the operation.
-
-Still be determined: what guarantees can we offer for ``types``? Should
-we promise that types are unique, and appear in the order in which they
-are checked?
+implementing the array API.
+
+As a convenience for ``__array_function__`` implementors, ``types`` provides all
+argument types with an ``'__array_function__'`` attribute. This
+allows implementors to quickly identify cases where they should defer to
+``__array_function__`` implementations on other arguments.
+The type of ``types`` is intentionally vague:
+``frozenset`` would most closely match intended use, but we may use ``tuple``
+instead for performance reasons. In any case, ``__array_function__``
+implementations should not rely on the iteration order of ``types``, which
+would violate a well-defined "Type casting hierarchy" (as described in
+`NEP-13 <https://www.numpy.org/neps/nep-0013-ufunc-overrides.html>`_).
Example for a project implementing the NumPy API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -118,45 +149,78 @@ checks:
If these conditions hold, ``__array_function__`` should return
the result from calling its implementation for ``func(*args, **kwargs)``.
Otherwise, it should return the sentinel value ``NotImplemented``, indicating
-that the function is not implemented by these types.
+that the function is not implemented by these types. This is preferable to
+raising ``TypeError`` directly, because it gives *other* arguments the
+opportunity to define the operations.
+
+There are no general requirements on the return value from
+``__array_function__``, although most sensible implementations should probably
+return array(s) with the same type as one of the function's arguments.
+If/when Python gains
+`typing support for protocols <https://www.python.org/dev/peps/pep-0544/>`_
+and NumPy adds static type annotations, the ``@overload`` implementation
+for ``SupportsArrayFunction`` will indicate a return type of ``Any``.
+
+It may also be convenient to define a custom decorators (``implements`` below)
+for registering ``__array_function__`` implementations.
.. code:: python
+ HANDLED_FUNCTIONS = {}
+
class MyArray:
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
+ # Note: this allows subclasses that don't override
+ # __array_function__ to handle MyArray objects
if not all(issubclass(t, MyArray) for t in types):
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
- HANDLED_FUNCTIONS = {
- np.concatenate: my_concatenate,
- np.broadcast_to: my_broadcast_to,
- np.sum: my_sum,
- ...
- }
+ def implements(numpy_function):
+ """Register an __array_function__ implementation for MyArray objects."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ @implements(np.concatenate)
+ def concatenate(arrays, axis=0, out=None):
+ ... # implementation of concatenate for MyArray objects
+
+ @implements(np.broadcast_to)
+ def broadcast_to(array, shape):
+ ... # implementation of broadcast_to for MyArray objects
-Necessary changes within the Numpy codebase itself
+Note that it is not required for ``__array_function__`` implementations to
+include *all* of the corresponding NumPy function's optional arguments
+(e.g., ``broadcast_to`` above omits the irrelevant ``subok`` argument).
+Optional arguments are only passed in to ``__array_function__`` if they
+were explicitly used in the NumPy function call.
+
+Necessary changes within the NumPy codebase itself
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This will require two changes within the Numpy codebase:
+This will require two changes within the NumPy codebase:
1. A function to inspect available inputs, look for the
``__array_function__`` attribute on those inputs, and call those
methods appropriately until one succeeds. This needs to be fast in the
- common all-NumPy case.
+ common all-NumPy case, and have acceptable performance (no worse than
+ linear time) even if the number of overloaded inputs is large (e.g.,
+ as might be the case for `np.concatenate`).
This is one additional function of moderate complexity.
-2. Calling this function within all relevant Numpy functions.
+2. Calling this function within all relevant NumPy functions.
- This affects many parts of the Numpy codebase, although with very low
+ This affects many parts of the NumPy codebase, although with very low
complexity.
Finding and calling the right ``__array_function__``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Given a Numpy function, ``*args`` and ``**kwargs`` inputs, we need to
+Given a NumPy function, ``*args`` and ``**kwargs`` inputs, we need to
search through ``*args`` and ``**kwargs`` for all appropriate inputs
that might have the ``__array_function__`` attribute. Then we need to
select among those possible methods and execute the right one.
@@ -171,13 +235,46 @@ be nested within lists or dictionaries, such as in the case of
``np.concatenate([x, y, z])``. This can be problematic for two reasons:
1. Some functions are given long lists of values, and traversing them
- might be prohibitively expensive
-2. Some function may have arguments that we don't want to inspect, even
- if they have the ``__array_function__`` method
+ might be prohibitively expensive.
+2. Some functions may have arguments that we don't want to inspect, even
+ if they have the ``__array_function__`` method.
+
+To resolve these issues, NumPy functions should explicitly indicate which
+of their arguments may be overloaded, and how these arguments should be
+checked. As a rule, this should include all arguments documented as either
+``array_like`` or ``ndarray``.
+
+We propose to do so by writing "dispatcher" functions for each overloaded
+NumPy function:
+
+- These functions will be called with the exact same arguments that were passed
+ into the NumPy function (i.e., ``dispatcher(*args, **kwargs)``), and should
+ return an iterable of arguments to check for overrides.
+- Dispatcher functions are required to share the exact same positional,
+ optional and keyword-only arguments as their corresponding NumPy functions.
+ Otherwise, valid invocations of a NumPy function could result in an error when
+ calling its dispatcher.
+- Because default *values* for keyword arguments do not have
+ ``__array_function__`` attributes, by convention we set all default argument
+ values to ``None``. This reduces the likelihood of signatures falling out
+ of sync, and minimizes extraneous information in the dispatcher.
+ The only exception should be cases where the argument value in some way
+ effects dispatching, which should be rare.
+
+An example of the dispatcher for ``np.concatenate`` may be instructive:
+
+.. code:: python
+
+ def _concatenate_dispatcher(arrays, axis=None, out=None):
+ for array in arrays:
+ yield array
+ if out is not None:
+ yield out
-To resolve these we ask the functions to provide an explicit list of
-arguments that should be traversed. This is the ``relevant_arguments=``
-keyword in the examples below.
+The concatenate dispatcher is written as generator function, which allows it
+to potentially include the value of the optional ``out`` argument without
+needing to create a new sequence with the (potentially long) list of objects
+to be concatenated.
Trying ``__array_function__`` methods until the right one works
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@@ -187,15 +284,15 @@ of these may decide that, given the available inputs, they are unable to
determine the correct result. How do we call the right one? If several
are valid then which has precedence?
-The rules for dispatch with ``__array_function__`` match those for
-``__array_ufunc__`` (see
-`NEP-13 <http://www.numpy.org/neps/nep-0013-ufunc-overrides.html>`_).
+For the most part, the rules for dispatch with ``__array_function__``
+match those for ``__array_ufunc__`` (see
+`NEP-13 <https://www.numpy.org/neps/nep-0013-ufunc-overrides.html>`_).
In particular:
- NumPy will gather implementations of ``__array_function__`` from all
specified inputs and call them in order: subclasses before
- superclasses, and otherwise left to right. Note that in some edge cases,
- this differs slightly from the
+ superclasses, and otherwise left to right. Note that in some edge cases
+ involving subclasses, this differs slightly from the
`current behavior <https://bugs.python.org/issue30140>`_ of Python.
- Implementations of ``__array_function__`` indicate that they can
handle the operation by returning any value other than
@@ -203,69 +300,197 @@ In particular:
- If all ``__array_function__`` methods return ``NotImplemented``,
NumPy will raise ``TypeError``.
-Changes within Numpy functions
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+One deviation from the current behavior of ``__array_ufunc__`` is that NumPy
+will only call ``__array_function__`` on the *first* argument of each unique
+type. This matches Python's
+`rule for calling reflected methods <https://docs.python.org/3/reference/datamodel.html#object.__ror__>`_,
+and this ensures that checking overloads has acceptable performance even when
+there are a large number of overloaded arguments. To avoid long-term divergence
+between these two dispatch protocols, we should
+`also update <https://github.com/numpy/numpy/issues/11306>`_
+``__array_ufunc__`` to match this behavior.
-Given a function defined above, for now call it
-``do_array_function_dance``, we now need to call that function from
-within every relevant Numpy function. This is a pervasive change, but of
-fairly simple and innocuous code that should complete quickly and
-without effect if no arguments implement the ``__array_function__``
-protocol. Let us consider a few examples of NumPy functions and how they
-might be affected by this change:
+Special handling of ``numpy.ndarray``
+'''''''''''''''''''''''''''''''''''''
+
+The use cases for subclasses with ``__array_function__`` are the same as those
+with ``__array_ufunc__``, so ``numpy.ndarray`` should also define a
+``__array_function__`` method mirroring ``ndarray.__array_ufunc__``:
.. code:: python
- def broadcast_to(array, shape, subok=False):
- success, value = do_array_function_dance(
- func=broadcast_to,
- relevant_arguments=[array],
- args=(array,),
- kwargs=dict(shape=shape, subok=subok))
- if success:
- return value
+ def __array_function__(self, func, types, args, kwargs):
+ # Cannot handle items that have __array_function__ other than our own.
+ for t in types:
+ if (hasattr(t, '__array_function__') and
+ t.__array_function__ is not ndarray.__array_function__):
+ return NotImplemented
- ... # continue with the definition of broadcast_to
+ # Arguments contain no overrides, so we can safely call the
+ # overloaded function again.
+ return func(*args, **kwargs)
- def concatenate(arrays, axis=0, out=None)
- success, value = do_array_function_dance(
- func=concatenate,
- relevant_arguments=[arrays, out],
- args=(arrays,),
- kwargs=dict(axis=axis, out=out))
- if success:
- return value
+To avoid infinite recursion, the dispatch rules for ``__array_function__`` need
+also the same special case they have for ``__array_ufunc__``: any arguments with
+an ``__array_function__`` method that is identical to
+``numpy.ndarray.__array_function__`` are not be called as
+``__array_function__`` implementations.
- ... # continue with the definition of concatenate
+Changes within NumPy functions
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The list of objects passed to ``relevant_arguments`` are those that should
-be inspected for ``__array_function__`` implementations.
+Given a function defining the above behavior, for now call it
+``array_function_implementation_or_override``, we now need to call that
+function from within every relevant NumPy function. This is a pervasive change,
+but of fairly simple and innocuous code that should complete quickly and
+without effect if no arguments implement the ``__array_function__``
+protocol.
-Alternatively, we could write these overloads with a decorator, e.g.,
+In most cases, these functions should written using the
+``array_function_dispatch`` decorator, which also associates dispatcher
+functions:
.. code:: python
- @overload_for_array_function(['array'])
+ def array_function_dispatch(dispatcher):
+ """Wrap a function for dispatch with the __array_function__ protocol."""
+ def decorator(implementation):
+ @functools.wraps(implementation)
+ def public_api(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return array_function_implementation_or_override(
+ implementation, public_api, relevant_args, args, kwargs)
+ return public_api
+ return decorator
+
+ # example usage
+ def _broadcast_to_dispatcher(array, shape, subok=None):
+ return (array,)
+
+ @array_function_dispatch(_broadcast_to_dispatcher)
def broadcast_to(array, shape, subok=False):
- ... # continue with the definition of broadcast_to
+ ... # existing definition of np.broadcast_to
+
+Using a decorator is great! We don't need to change the definitions of
+existing NumPy functions, and only need to write a few additional lines
+for the dispatcher function. We could even reuse a single dispatcher for
+families of functions with the same signature (e.g., ``sum`` and ``prod``).
+For such functions, the largest change could be adding a few lines to the
+docstring to note which arguments are checked for overloads.
+
+It's particularly worth calling out the decorator's use of
+``functools.wraps``:
+
+- This ensures that the wrapped function has the same name and docstring as
+ the wrapped NumPy function.
+- On Python 3, it also ensures that the decorator function copies the original
+ function signature, which is important for introspection based tools such as
+ auto-complete. If we care about preserving function signatures on Python 2,
+ for the `short while longer <http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html>`_
+ that NumPy supports Python 2.7, we do could do so by adding a vendored
+ dependency on the (single-file, BSD licensed)
+ `decorator library <https://github.com/micheles/decorator>`_.
+- Finally, it ensures that the wrapped function
+ `can be pickled <http://gael-varoquaux.info/programming/decoration-in-python-done-right-decorating-and-pickling.html>`_.
+
+In a few cases, it would not make sense to use the ``array_function_dispatch``
+decorator directly, but override implementation in terms of
+``array_function_implementation_or_override`` should still be straightforward.
+
+- Functions written entirely in C (e.g., ``np.concatenate``) can't use
+ decorators, but they could still use a C equivalent of
+ ``array_function_implementation_or_override``. If performance is not a
+ concern, they could also be easily wrapped with a small Python wrapper.
+- ``np.einsum`` does complicated argument parsing to handle two different
+ function signatures. It would probably be best to avoid the overhead of
+ parsing it twice in the typical case of no overrides.
+
+Fortunately, in each of these cases so far, the functions already has a generic
+signature of the form ``*args, **kwargs``, which means we don't need to worry
+about potential inconsistency between how functions are called and what we pass
+to ``__array_function__``. (In C, arguments for all Python functions are parsed
+from a tuple ``*args`` and dict ``**kwargs``.) This shouldn't stop us from
+writing overrides for functions with non-generic signatures that can't use the
+decorator, but we should consider these cases carefully.
+
+.. note::
+
+ The code for ``array_function_dispatch`` above has been updated from the
+ original version of this NEP to match the actual
+ `implementation in NumPy <https://github.com/numpy/numpy/blob/e104f03ac8f65ae5b92a9b413b0fa639f39e6de2/numpy/core/overrides.py>`_.
+
+Extensibility
+~~~~~~~~~~~~~
- @overload_for_array_function(['arrays', 'out'])
- def concatenate(arrays, axis=0, out=None):
- ... # continue with the definition of concatenate
+An important virtue of this approach is that it allows for adding new
+optional arguments to NumPy functions without breaking code that already
+relies on ``__array_function__``.
+
+This is not a theoretical concern. The implementation of overrides *within*
+functions like ``np.sum()`` rather than defining a new function capturing
+``*args`` and ``**kwargs`` necessitated some awkward gymnastics to ensure that
+the new ``keepdims`` argument is only passed in cases where it is used, e.g.,
+
+.. code:: python
+
+ def sum(array, ..., keepdims=np._NoValue):
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return array.sum(..., **kwargs)
-The decorator ``overload_for_array_function`` would be written in terms
-of ``do_array_function_dance``.
+This also makes it possible to add optional arguments to ``__array_function__``
+implementations incrementally and only in cases where it makes sense. For
+example, a library implementing immutable arrays would not be required to
+explicitly include an unsupported ``out`` argument. Doing this properly for all
+optional arguments is somewhat onerous, e.g.,
-The downside of this approach would be a loss of introspection capability
-for NumPy functions on Python 2, since this requires the use of
-``inspect.Signature`` (only available on Python 3). However, NumPy won't
-be supporting Python 2 for `very much longer <http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html>`_.
+.. code:: python
+
+ def my_sum(array, ..., out=None):
+ if out is not None:
+ raise TypeError('out argument is not supported')
+ ...
+
+We thus avoid encouraging the tempting shortcut of adding catch-all
+``**ignored_kwargs`` to the signatures of functions called by NumPy, which fails
+silently for misspelled or ignored arguments.
+
+Performance
+~~~~~~~~~~~
+
+Performance is always a concern with NumPy, even though NumPy users have
+already prioritized usability over pure speed with their choice of the Python
+language itself. It's important that this new ``__array_function__`` protocol
+not impose a significant cost in the typical case of NumPy functions acting
+on NumPy arrays.
+
+Our `microbenchmark results <https://nbviewer.jupyter.org/gist/shoyer/1f0a308a06cd96df20879a1ddb8f0006>`_
+show that a pure Python implementation of the override machinery described
+above adds roughly 2-3 microseconds of overhead to each NumPy function call
+without any overloaded arguments. For context, typical NumPy functions on small
+arrays have a runtime of 1-10 microseconds, mostly determined by what fraction
+of the function's logic is written in C. For example, one microsecond is about
+the difference in speed between the ``ndarray.sum()`` method (1.6 us) and
+``numpy.sum()`` function (2.6 us).
+
+Fortunately, we expect significantly less overhead with a C implementation of
+``array_function_implementation_or_override``, which is where the bulk of the
+runtime is. This would leave the ``array_function_dispatch`` decorator and
+dispatcher function on their own adding about 0.5 microseconds of overhead,
+for perhaps ~1 microsecond of overhead in the typical case.
+
+In our view, this level of overhead is reasonable to accept for code written
+in Python. We're pretty sure that the vast majority of NumPy users aren't
+concerned about performance differences measured in microsecond(s) on NumPy
+functions, because it's difficult to do *anything* in Python in less than a
+microsecond.
Use outside of NumPy
~~~~~~~~~~~~~~~~~~~~
Nothing about this protocol that is particular to NumPy itself. Should
-we enourage use of the same ``__array_function__`` protocol third-party
+we encourage use of the same ``__array_function__`` protocol third-party
libraries for overloading non-NumPy functions, e.g., for making
array-implementation generic functionality in SciPy?
@@ -276,8 +501,9 @@ to be explicitly recognized. Libraries like Dask, CuPy, and Autograd
already wrap a limited subset of SciPy functionality (e.g.,
``scipy.linalg``) similarly to how they wrap NumPy.
-If we want to do this, we should consider exposing the helper function
-``do_array_function_dance()`` above as a public API.
+If we want to do this, we should expose at least the decorator
+``array_function_dispatch()`` and possibly also the lower level
+``array_function_implementation_or_override()`` as part of NumPy's public API.
Non-goals
---------
@@ -293,7 +519,10 @@ wait for an optimal implementation. The price of moving fast is that for
now **this protocol should be considered strictly experimental**. We
reserve the right to change the details of this protocol and how
specific NumPy functions use it at any time in the future -- even in
-otherwise bug-fix only releases of NumPy.
+otherwise bug-fix only releases of NumPy. In practice, once initial
+issues with ``__array_function__`` are worked out, we will use abbreviated
+deprecation cycles as short as a single major NumPy release (e.g., as
+little as four months).
In particular, we don't plan to write additional NEPs that list all
specific functions to overload, with exactly how they should be
@@ -311,11 +540,13 @@ own protocols:
- dispatch for methods of any kind, e.g., methods on
``np.random.RandomState`` objects.
-As a concrete example of how we expect to break behavior in the future,
-some functions such as ``np.where`` are currently not NumPy universal
-functions, but conceivably could become universal functions in the
-future. When/if this happens, we will change such overloads from using
-``__array_function__`` to the more specialized ``__array_ufunc__``.
+We also expect that the mechanism for overriding specific functions
+that will initially use the ``__array_function__`` protocol can and will
+change in the future. As a concrete example of how we expect to break
+behavior in the future, some functions such as ``np.where`` are currently
+not NumPy universal functions, but conceivably could become universal
+functions in the future. When/if this happens, we will change such overloads
+from using ``__array_function__`` to the more specialized ``__array_ufunc__``.
Backward compatibility
@@ -332,13 +563,24 @@ Specialized protocols
~~~~~~~~~~~~~~~~~~~~~
We could (and should) continue to develop protocols like
-``__array_ufunc__`` for cohesive subsets of Numpy functionality.
+``__array_ufunc__`` for cohesive subsets of NumPy functionality.
As mentioned above, if this means that some functions that we overload
with ``__array_function__`` should switch to a new protocol instead,
that is explicitly OK for as long as ``__array_function__`` retains its
experimental status.
+Switching to a new protocol should use an abbreviated version of NumPy's
+normal deprecation cycle:
+
+- For a single major release, after checking for any new protocols, NumPy
+ should still check for ``__array_function__`` methods that implement the
+ given function. If any argument returns a value other than
+ ``NotImplemented`` from ``__array_function__``, a descriptive
+ ``FutureWarning`` should be issued.
+- In the next major release, the checks for ``__array_function__`` will be
+ removed.
+
Separate namespace
~~~~~~~~~~~~~~~~~~
@@ -347,7 +589,7 @@ either inside or outside of NumPy.
This has the advantage of alleviating any possible concerns about
backwards compatibility and would provide the maximum freedom for quick
-experimentation. In the long term, it would provide a clean abstration
+experimentation. In the long term, it would provide a clean abstraction
layer, separating NumPy's high level API from default implementations on
``numpy.ndarray`` objects.
@@ -358,6 +600,11 @@ functions from ``numpy`` itself are already overloaded (but
inadequately), so confusion about high vs. low level APIs in NumPy would
still persist.
+Alternatively, a separate namespace, e.g., ``numpy.array_only``, could be
+created for a non-overloaded version of NumPy's high level API, for cases
+where performance with NumPy arrays is a critical concern. This has most
+of the same downsides as the separate namespace.
+
Multiple dispatch
~~~~~~~~~~~~~~~~~
@@ -370,7 +617,7 @@ don't think this approach makes sense for NumPy in the near term.
The main reason is that NumPy already has a well-proven dispatching
mechanism with ``__array_ufunc__``, based on Python's own dispatching
-system for arithemtic, and it would be confusing to add another
+system for arithmetic, and it would be confusing to add another
mechanism that works in a very different way. This would also be more
invasive change to NumPy itself, which would need to gain a multiple
dispatch implementation.
@@ -384,36 +631,45 @@ would be straightforward to write a shim for a default
Implementations in terms of a limited core API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The internal implemenations of some NumPy functions is extremely simple.
-For example: - ``np.stack()`` is implemented in only a few lines of code
-by combining indexing with ``np.newaxis``, ``np.concatenate`` and the
-``shape`` attribute. - ``np.mean()`` is implemented internally in terms
-of ``np.sum()``, ``np.divide()``, ``.astype()`` and ``.shape``.
+The internal implementations of some NumPy functions is extremely simple.
+For example:
+
+- ``np.stack()`` is implemented in only a few lines of code by combining
+ indexing with ``np.newaxis``, ``np.concatenate`` and the ``shape`` attribute.
+- ``np.mean()`` is implemented internally in terms of ``np.sum()``,
+ ``np.divide()``, ``.astype()`` and ``.shape``.
This suggests the possibility of defining a minimal "core" ndarray
interface, and relying upon it internally in NumPy to implement the full
API. This is an attractive option, because it could significantly reduce
the work required for new array implementations.
-However, this also comes with several downsides: 1. The details of how
-NumPy implements a high-level function in terms of overloaded functions
-now becomes an implicit part of NumPy's public API. For example,
-refactoring ``stack`` to use ``np.block()`` instead of
-``np.concatenate()`` internally would now become a breaking change. 2.
-Array libraries may prefer to implement high level functions differently
-than NumPy. For example, a library might prefer to implement a
-fundamental operations like ``mean()`` directly rather than relying on
-``sum()`` followed by division. More generally, it's not clear yet what
-exactly qualifies as core functionality, and figuring this out could be
-a large project. 3. We don't yet have an overloading system for
-attributes and methods on array objects, e.g., for accessing ``.dtype``
-and ``.shape``. This should be the subject of a future NEP, but until
-then we should be reluctant to rely on these properties.
-
-Given these concerns, we encourage relying on this approach only in
-limited cases.
-
-Coersion to a NumPy array as a catch-all fallback
+However, this also comes with several downsides:
+
+1. The details of how NumPy implements a high-level function in terms of
+ overloaded functions now becomes an implicit part of NumPy's public API. For
+ example, refactoring ``stack`` to use ``np.block()`` instead of
+ ``np.concatenate()`` internally would now become a breaking change.
+2. Array libraries may prefer to implement high level functions differently than
+ NumPy. For example, a library might prefer to implement a fundamental
+ operations like ``mean()`` directly rather than relying on ``sum()`` followed
+ by division. More generally, it's not clear yet what exactly qualifies as
+ core functionality, and figuring this out could be a large project.
+3. We don't yet have an overloading system for attributes and methods on array
+ objects, e.g., for accessing ``.dtype`` and ``.shape``. This should be the
+ subject of a future NEP, but until then we should be reluctant to rely on
+ these properties.
+
+Given these concerns, we think it's valuable to support explicit overloading of
+nearly every public function in NumPy's API. This does not preclude the future
+possibility of rewriting NumPy functions in terms of simplified core
+functionality with ``__array_function__`` and a protocol and/or base class for
+ensuring that arrays expose methods and properties like ``numpy.ndarray``.
+However, to work well this would require the possibility of implementing
+*some* but not all functions with ``__array_function__``, e.g., as described
+in the next section.
+
+Coercion to a NumPy array as a catch-all fallback
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
With the current design, classes that implement ``__array_function__``
@@ -438,106 +694,195 @@ make it impossible to implement this generic fallback behavior for
``__array_function__``.
We could resolve this issue by change the handling of return values in
-``__array_function__`` in either of two possible ways: 1. Change the
-meaning of all arguments returning ``NotImplemented`` to indicate that
-all arguments should be coerced to NumPy arrays instead. However, many
-array libraries (e.g., scipy.sparse) really don't want implicit
-conversions to NumPy arrays, and often avoid implementing ``__array__``
-for exactly this reason. Implicit conversions can result in silent bugs
-and performance degradation. 2. Use another sentinel value of some sort
-to indicate that a class implementing part of the higher level array API
-is coercible as a fallback, e.g., a return value of
-``np.NotImplementedButCoercible`` from ``__array_function__``.
-
-If we take this second approach, we would need to define additional
-rules for how coercible array arguments are coerced, e.g., - Would we
-try for ``__array_function__`` overloads again after coercing coercible
-arguments? - If so, would we coerce coercible arguments one-at-a-time,
-or all-at-once?
-
-These are slightly tricky design questions, so for now we propose to
-defer this issue. We can always implement
-``np.NotImplementedButCoercible`` at some later time if it proves
-critical to the numpy community in the future. Importantly, we don't
-think this will stop critical libraries that desire to implement most of
-the high level NumPy API from adopting this proposal.
-
-NOTE: If you are reading this NEP in its draft state and disagree,
-please speak up on the mailing list!
-
-Drawbacks of this approach
---------------------------
-
-Future difficulty extending NumPy's API
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+``__array_function__`` in either of two possible ways:
+
+1. Change the meaning of all arguments returning ``NotImplemented`` to indicate
+ that all arguments should be coerced to NumPy arrays and the operation
+ should be retried. However, many array libraries (e.g., scipy.sparse) really
+ don't want implicit conversions to NumPy arrays, and often avoid implementing
+ ``__array__`` for exactly this reason. Implicit conversions can result in
+ silent bugs and performance degradation.
+
+ Potentially, we could enable this behavior only for types that implement
+ ``__array__``, which would resolve the most problematic cases like
+ scipy.sparse. But in practice, a large fraction of classes that present a
+ high level API like NumPy arrays already implement ``__array__``. This would
+ preclude reliable use of NumPy's high level API on these objects.
+2. Use another sentinel value of some sort, e.g.,
+ ``np.NotImplementedButCoercible``, to indicate that a class implementing part
+ of NumPy's higher level array API is coercible as a fallback. This is a more
+ appealing option.
+
+With either approach, we would need to define additional rules for *how*
+coercible array arguments are coerced. The only sane rule would be to treat
+these return values as equivalent to not defining an
+``__array_function__`` method at all, which means that NumPy functions would
+fall-back to their current behavior of coercing all array-like arguments.
+
+It is not yet clear to us yet if we need an optional like
+``NotImplementedButCoercible``, so for now we propose to defer this issue.
+We can always implement ``np.NotImplementedButCoercible`` at some later time if
+it proves critical to the NumPy community in the future. Importantly, we don't
+think this will stop critical libraries that desire to implement most of the
+high level NumPy API from adopting this proposal.
+
+A magic decorator that inspects type annotations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In principle, Python 3 type annotations contain sufficient information to
+automatically create most ``dispatcher`` functions. It would be convenient to
+use these annotations to dispense with the need for manually writing
+dispatchers, e.g.,
+
+.. code:: python
+
+ @array_function_dispatch
+ def broadcast_to(array: ArrayLike
+ shape: Tuple[int, ...],
+ subok: bool = False):
+ ... # existing definition of np.broadcast_to
+
+This would require some form of automatic code generation, either at compile or
+import time.
-One downside of passing on all arguments directly on to
-``__array_function__`` is that it makes it hard to extend the signatures
-of overloaded NumPy functions with new arguments, because adding even an
-optional keyword argument would break existing overloads.
+We think this is an interesting possible extension to consider in the future. We
+don't think it makes sense to do so now, because code generation involves
+tradeoffs and NumPy's experience with type annotations is still
+`quite limited <https://github.com/numpy/numpy-stubs>`_. Even if NumPy
+was Python 3 only (which will happen
+`sometime in 2019 <http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html>`_),
+we aren't ready to annotate NumPy's codebase directly yet.
-This is not a new problem for NumPy. NumPy has occasionally changed the
-signature for functions in the past, including functions like
-``numpy.sum`` which support overloads.
+Support for implementation-specific arguments
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-For adding new keyword arguments that do not change default behavior, we
-would only include these as keyword arguments when they have changed
-from default values. This is similar to `what NumPy already has
-done <https://github.com/numpy/numpy/blob/v1.14.2/numpy/core/fromnumeric.py#L1865-L1867>`_,
-e.g., for the optional ``keepdims`` argument in ``sum``:
+We could allow ``__array_function__`` implementations to add their own
+optional keyword arguments by including ``**ignored_kwargs`` in dispatcher
+functions, e.g.,
.. code:: python
- def sum(array, ..., keepdims=np._NoValue):
- kwargs = {}
- if keepdims is not np._NoValue:
- kwargs['keepdims'] = keepdims
- return array.sum(..., **kwargs)
+ def _concatenate_dispatcher(arrays, axis=None, out=None, **ignored_kwargs):
+ ... # same implementation of _concatenate_dispatcher as above
+
+Implementation-specific arguments are somewhat common in libraries that
+otherwise emulate NumPy's higher level API (e.g., ``dask.array.sum()`` adds
+``split_every`` and ``tensorflow.reduce_sum()`` adds ``name``). Supporting
+them in NumPy would be particularly useful for libraries that implement new
+high-level array functions on top of NumPy functions, e.g.,
+
+.. code:: python
-In other cases, such as deprecated arguments, preserving the existing
-behavior of overloaded functions may not be possible. Libraries that use
-``__array_function__`` should be aware of this risk: we don't propose to
-freeze NumPy's API in stone any more than it already is.
+ def mean_squared_error(x, y, **kwargs):
+ return np.mean((x - y) ** 2, **kwargs)
-Difficulty adding implementation specific arguments
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Otherwise, we would need separate versions of ``mean_squared_error`` for each
+array implementation in order to pass implementation-specific arguments to
+``mean()``.
-Some array implementations generally follow NumPy's API, but have
-additional optional keyword arguments (e.g., ``dask.array.sum()`` has
-``split_every`` and ``tensorflow.reduce_sum()`` has ``name``). A generic
-dispatching library could potentially pass on all unrecognized keyword
-argument directly to the implementation, but extending ``np.sum()`` to
-pass on ``**kwargs`` would entail public facing changes in NumPy.
-Customizing the detailed behavior of array libraries will require using
-library specific functions, which could be limiting in the case of
-libraries that consume the NumPy API such as xarray.
+We wouldn't allow adding optional positional arguments, because these are
+reserved for future use by NumPy itself, but conflicts between keyword arguments
+should be relatively rare.
+However, this flexibility would come with a cost. In particular, it implicitly
+adds ``**kwargs`` to the signature for all wrapped NumPy functions without
+actually including it (because we use ``functools.wraps``). This means it is
+unlikely to work well with static analysis tools, which could report invalid
+arguments. Likewise, there is a price in readability: these optional arguments
+won't be included in the docstrings for NumPy functions.
+
+It's not clear that this tradeoff is worth it, so we propose to leave this out
+for now. Adding implementation-specific arguments will require using those
+libraries directly.
+
+Other possible choices for the protocol
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The array function ``__array_function__`` includes only two arguments, ``func``
+and ``types``, that provide information about the context of the function call.
+
+``func`` is part of the protocol because there is no way to avoid it:
+implementations need to be able to dispatch by matching a function to NumPy's
+public API.
+
+``types`` is included because we can compute it almost for free as part of
+collecting ``__array_function__`` implementations to call in
+``array_function_implementation_or_override``. We also think it will be used
+by many ``__array_function__`` methods, which otherwise would need to extract
+this information themselves. It would be equivalently easy to provide single
+instances of each type, but providing only types seemed cleaner.
+
+Taking this even further, it was suggested that ``__array_function__`` should be
+a ``classmethod``. We agree that it would be a little cleaner to remove the
+redundant ``self`` argument, but feel that this minor clean-up would not be
+worth breaking from the precedence of ``__array_ufunc__``.
+
+There are two other arguments that we think *might* be important to pass to
+``__array_ufunc__`` implementations:
+
+- Access to the non-dispatched implementation (i.e., before wrapping with
+ ``array_function_dispatch``) in ``ndarray.__array_function__`` would allow
+ us to drop special case logic for that method from
+ ``array_function_implementation_or_override``.
+- Access to the ``dispatcher`` function passed into
+ ``array_function_dispatch()`` would allow ``__array_function__``
+ implementations to determine the list of "array-like" arguments in a generic
+ way by calling ``dispatcher(*args, **kwargs)``. This *could* be useful for
+ ``__array_function__`` implementations that dispatch based on the value of an
+ array attribute (e.g., ``dtype`` or ``units``) rather than directly on the
+ array type.
+
+We have left these out for now, because we don't know that they are necessary.
+If we want to include them in the future, the easiest way to do so would be to
+update the ``array_function_dispatch`` decorator to add them as function
+attributes.
+
+Callable objects generated at runtime
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+NumPy has some APIs that define callable objects *dynamically*, such as
+``vectorize`` and methods on ``random.RandomState`` object. Examples can
+also be found in other core libraries in the scientific Python stack, e.g.,
+distribution objects in scipy.stats and model objects in scikit-learn. It would
+be nice to be able to write overloads for such callables, too. This presents a
+challenge for the ``__array_function__`` protocol, because unlike the case for
+functions there is no public object in the ``numpy`` namespace to pass into
+the ``func`` argument.
+
+We could potentially handle this by establishing an alternative convention
+for how the ``func`` argument could be inspected, e.g., by using
+``func.__self__`` to obtain the class object and ``func.__func__`` to return
+the unbound function object. However, some caution is in order, because
+this would immesh what are currently implementation details as a permanent
+features of the interface, such as the fact that ``vectorize`` is implemented as a
+class rather than closure, or whether a method is implemented directly or using
+a descriptor.
+
+Given the complexity and the limited use cases, we are also deferring on this
+issue for now, but we are confident that ``__array_function__`` could be
+expanded to accomodate these use cases in the future if need be.
Discussion
----------
-Various alternatives to this proposal were discussed in a few Github issues:
+Various alternatives to this proposal were discussed in a few GitHub issues:
-1. `pydata/sparse #1 <https://github.com/pydata/sparse/issues/1>`_
-2. `numpy/numpy #11129 <https://github.com/numpy/numpy/issues/11129>`_
+1. `pydata/sparse #1 <https://github.com/pydata/sparse/issues/1>`_
+2. `numpy/numpy #11129 <https://github.com/numpy/numpy/issues/11129>`_
Additionally it was the subject of `a blogpost
-<http://matthewrocklin.com/blog/work/2018/05/27/beyond-numpy>`_ Following this
+<http://matthewrocklin.com/blog/work/2018/05/27/beyond-numpy>`_. Following this
it was discussed at a `NumPy developer sprint
<https://scisprints.github.io/#may-numpy-developer-sprint>`_ at the `UC
Berkeley Institute for Data Science (BIDS) <https://bids.berkeley.edu/>`_.
-
-References and Footnotes
-------------------------
-
-.. [1] Each NEP must either be explicitly labeled as placed in the public domain (see
- this NEP as an example) or licensed under the `Open Publication License`_.
-
-.. _Open Publication License: http://www.opencontent.org/openpub/
-
+Detailed discussion of this proposal itself can be found on the
+`the mailing list <https://mail.python.org/pipermail/numpy-discussion/2018-June/078127.html>`_ and relvant pull requests
+(`1 <https://github.com/numpy/numpy/pull/11189>`_,
+`2 <https://github.com/numpy/numpy/pull/11303#issuecomment-396638175>`_,
+`3 <https://github.com/numpy/numpy/pull/11374>`_)
Copyright
---------
-This document has been placed in the public domain. [1]_
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0019-rng-policy.rst b/doc/neps/nep-0019-rng-policy.rst
index de9164bba..f50897b0f 100644
--- a/doc/neps/nep-0019-rng-policy.rst
+++ b/doc/neps/nep-0019-rng-policy.rst
@@ -1,12 +1,12 @@
-==============================
-Random Number Generator Policy
-==============================
+=======================================
+NEP 19 — Random Number Generator Policy
+=======================================
:Author: Robert Kern <robert.kern@gmail.com>
-:Status: Draft
+:Status: Accepted
:Type: Standards Track
:Created: 2018-05-24
-
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078126.html
Abstract
--------
@@ -91,23 +91,12 @@ those contributors simply walked away.
Implementation
--------------
-We propose first freezing ``RandomState`` as it is and developing a new RNG
-subsystem alongside it. This allows anyone who has been relying on our old
-stream-compatibility guarantee to have plenty of time to migrate.
-``RandomState`` will be considered deprecated, but with a long deprecation
-cycle, at least a few years. Deprecation warnings will start silent but become
-increasingly noisy over time. Bugs in the current state of the code will *not*
-be fixed if fixing them would impact the stream. However, if changes in the
-rest of ``numpy`` would break something in the ``RandomState`` code, we will
-fix ``RandomState`` to continue working (for example, some change in the
-C API). No new features will be added to ``RandomState``. Users should
-migrate to the new subsystem as they are able to.
-
-Work on a proposed `new PRNG subsystem
-<https://github.com/bashtage/randomgen>`_ is already underway. The specifics
-of the new design are out of scope for this NEP and up for much discussion, but
-we will discuss general policies that will guide the evolution of whatever code
-is adopted.
+Work on a proposed new PRNG subsystem is already underway in the randomgen_
+project. The specifics of the new design are out of scope for this NEP and up
+for much discussion, but we will discuss general policies that will guide the
+evolution of whatever code is adopted. We will also outline just a few of the
+requirements that such a new system must have to support the policy proposed in
+this NEP.
First, we will maintain API source compatibility just as we do with the rest of
``numpy``. If we *must* make a breaking change, we will only do so with an
@@ -116,66 +105,158 @@ appropriate deprecation period and warnings.
Second, breaking stream-compatibility in order to introduce new features or
improve performance will be *allowed* with *caution*. Such changes will be
considered features, and as such will be no faster than the standard release
-cadence of features (i.e. on ``X.Y`` releases, never ``X.Y.Z``). Slowness is
-not a bug. Correctness bug fixes that break stream-compatibility can happen on
-bugfix releases, per usual, but developers should consider if they can wait
-until the next feature release. We encourage developers to strongly weight
-user’s pain from the break in stream-compatibility against the improvements.
-One example of a worthwhile improvement would be to change algorithms for
-a significant increase in performance, for example, moving from the `Box-Muller
-transform <https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform>`_ method
-of Gaussian variate generation to the faster `Ziggurat algorithm
-<https://en.wikipedia.org/wiki/Ziggurat_algorithm>`_. An example of an
-unworthy improvement would be tweaking the Ziggurat tables just a little bit.
+cadence of features (i.e. on ``X.Y`` releases, never ``X.Y.Z``). Slowness will
+not be considered a bug for this purpose. Correctness bug fixes that break
+stream-compatibility can happen on bugfix releases, per usual, but developers
+should consider if they can wait until the next feature release. We encourage
+developers to strongly weight user’s pain from the break in
+stream-compatibility against the improvements. One example of a worthwhile
+improvement would be to change algorithms for a significant increase in
+performance, for example, moving from the `Box-Muller transform
+<https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform>`_ method of
+Gaussian variate generation to the faster `Ziggurat algorithm
+<https://en.wikipedia.org/wiki/Ziggurat_algorithm>`_. An example of a
+discouraged improvement would be tweaking the Ziggurat tables just a little bit
+for a small performance improvement.
Any new design for the RNG subsystem will provide a choice of different core
-uniform PRNG algorithms. We will be more strict about a select subset of
-methods on these core PRNG objects. They MUST guarantee stream-compatibility
-for a minimal, specified set of methods which are chosen to make it easier to
-compose them to build other distributions. Namely,
+uniform PRNG algorithms. A promising design choice is to make these core
+uniform PRNGs their own lightweight objects with a minimal set of methods
+(randomgen_ calls them “basic RNGs”). The broader set of non-uniform
+distributions will be its own class that holds a reference to one of these core
+uniform PRNG objects and simply delegates to the core uniform PRNG object when
+it needs uniform random numbers. To borrow an example from randomgen_, the
+class ``MT19937`` is a basic RNG that implements the classic Mersenne Twister
+algorithm. The class ``RandomGenerator`` wraps around the basic RNG to provide
+all of the non-uniform distribution methods::
+
+ # This is not the only way to instantiate this object.
+ # This is just handy for demonstrating the delegation.
+ >>> brng = MT19937(seed)
+ >>> rg = RandomGenerator(brng)
+ >>> x = rg.standard_normal(10)
+
+We will be more strict about a select subset of methods on these basic RNG
+objects. They MUST guarantee stream-compatibility for a specified set
+of methods which are chosen to make it easier to compose them to build other
+distributions and which are needed to abstract over the implementation details
+of the variety of core PRNG algorithms. Namely,
* ``.bytes()``
* ``.random_uintegers()``
* ``.random_sample()``
-Furthermore, the new design should also provide one generator class (we shall
-call it ``StableRandom`` for discussion purposes) that provides a slightly
-broader subset of distribution methods for which stream-compatibility is
-*guaranteed*. The point of ``StableRandom`` is to provide something that can
-be used in unit tests so projects that currently have tests which rely on the
-precise stream can be migrated off of ``RandomState``. For the best
-transition, ``StableRandom`` should use as its core uniform PRNG the current
-MT19937 algorithm. As best as possible, the API for the distribution methods
-that are provided on ``StableRandom`` should match their counterparts on
-``RandomState``. They should provide the same stream that the current version
-of ``RandomState`` does. Because their intended use is for unit tests, we do
-not need the performance improvements from the new algorithms that will be
-introduced by the new subsystem.
-
-The list of ``StableRandom`` methods should be chosen to support unit tests:
-
- * ``.randint()``
- * ``.uniform()``
- * ``.normal()``
- * ``.standard_normal()``
- * ``.choice()``
- * ``.shuffle()``
- * ``.permutation()``
-
-
-Not Versioning
---------------
+The distributions class (``RandomGenerator``) SHOULD have all of the same
+distribution methods as ``RandomState`` with close-enough function signatures
+such that almost all code that currently works with ``RandomState`` instances
+will work with ``RandomGenerator`` instances (ignoring the precise stream
+values). Some variance will be allowed for integer distributions: in order to
+avoid some of the cross-platform problems described above, these SHOULD be
+rewritten to work with ``uint64`` numbers on all platforms.
+
+.. _randomgen: https://github.com/bashtage/randomgen
+
+
+Supporting Unit Tests
+:::::::::::::::::::::
+
+Because we did make a strong stream-compatibility guarantee early in numpy’s
+life, reliance on stream-compatibility has grown beyond reproducible
+simulations. One use case that remains for stream-compatibility across numpy
+versions is to use pseudorandom streams to generate test data in unit tests.
+With care, many of the cross-platform instabilities can be avoided in the
+context of small unit tests.
+
+The new PRNG subsystem MUST provide a second, legacy distributions class that
+uses the same implementations of the distribution methods as the current
+version of ``numpy.random.RandomState``. The methods of this class will have
+strict stream-compatibility guarantees, even stricter than the current policy.
+It is intended that this class will no longer be modified, except to keep it
+working when numpy internals change. All new development should go into the
+primary distributions class. Bug fixes that change the stream SHALL NOT be
+made to ``RandomState``; instead, buggy distributions should be made to warn
+when they are buggy. The purpose of ``RandomState`` will be documented as
+providing certain fixed functionality for backwards compatibility and stable
+numbers for the limited purpose of unit testing, and not making whole programs
+reproducible across numpy versions.
+
+This legacy distributions class MUST be accessible under the name
+``numpy.random.RandomState`` for backwards compatibility. All current ways of
+instantiating ``numpy.random.RandomState`` with a given state should
+instantiate the Mersenne Twister basic RNG with the same state. The legacy
+distributions class MUST be capable of accepting other basic RNGs. The purpose
+here is to ensure that one can write a program with a consistent basic RNG
+state with a mixture of libraries that may or may not have upgraded from
+``RandomState``. Instances of the legacy distributions class MUST respond
+``True`` to ``isinstance(rg, numpy.random.RandomState)`` because there is
+current utility code that relies on that check. Similarly, old pickles of
+``numpy.random.RandomState`` instances MUST unpickle correctly.
+
+
+``numpy.random.*``
+::::::::::::::::::
+
+The preferred best practice for getting reproducible pseudorandom numbers is to
+instantiate a generator object with a seed and pass it around. The implicit
+global ``RandomState`` behind the ``numpy.random.*`` convenience functions can
+cause problems, especially when threads or other forms of concurrency are
+involved. Global state is always problematic. We categorically recommend
+avoiding using the convenience functions when reproducibility is involved.
+
+That said, people do use them and use ``numpy.random.seed()`` to control the
+state underneath them. It can be hard to categorize and count API usages
+consistently and usefully, but a very common usage is in unit tests where many
+of the problems of global state are less likely.
+
+This NEP does not propose removing these functions or changing them to use the
+less-stable ``RandomGenerator`` distribution implementations. Future NEPs
+might.
+
+Specifically, the initial release of the new PRNG subsystem SHALL leave these
+convenience functions as aliases to the methods on a global ``RandomState``
+that is initialized with a Mersenne Twister basic RNG object. A call to
+``numpy.random.seed()`` will be forwarded to that basic RNG object. In
+addition, the global ``RandomState`` instance MUST be accessible in this
+initial release by the name ``numpy.random.mtrand._rand``: Robert Kern long ago
+promised ``scikit-learn`` that this name would be stable. Whoops.
+
+In order to allow certain workarounds, it MUST be possible to replace the basic
+RNG underneath the global ``RandomState`` with any other basic RNG object (we
+leave the precise API details up to the new subsystem). Calling
+``numpy.random.seed()`` thereafter SHOULD just pass the given seed to the
+current basic RNG object and not attempt to reset the basic RNG to the Mersenne
+Twister. The set of ``numpy.random.*`` convenience functions SHALL remain the
+same as they currently are. They SHALL be aliases to the ``RandomState``
+methods and not the new less-stable distributions class (``RandomGenerator``,
+in the examples above). Users who want to get the fastest, best distributions
+can follow best practices and instantiate generator objects explicitly.
+
+This NEP does not propose that these requirements remain in perpetuity. After
+we have experience with the new PRNG subsystem, we can and should revisit these
+issues in future NEPs.
+
+
+Alternatives
+------------
+
+Versioning
+::::::::::
For a long time, we considered that the way to allow algorithmic improvements
while maintaining the stream was to apply some form of versioning. That is,
every time we make a stream change in one of the distributions, we increment
some version number somewhere. ``numpy.random`` would keep all past versions
-of the code, and there would be a way to get the old versions. Proposals of
-how to do this exactly varied widely, but we will not exhaustively list them
-here. We spent years going back and forth on these designs and were not able
-to find one that sufficed. Let that time lost, and more importantly, the
-contributors that we lost while we dithered, serve as evidence against the
-notion.
+of the code, and there would be a way to get the old versions.
+
+We will not be doing this. If one needs to get the exact bit-for-bit results
+from a given version of ``numpy``, whether one uses random numbers or not, one
+should use the exact version of ``numpy``.
+
+Proposals of how to do RNG versioning varied widely, and we will not
+exhaustively list them here. We spent years going back and forth on these
+designs and were not able to find one that sufficed. Let that time lost, and
+more importantly, the contributors that we lost while we dithered, serve as
+evidence against the notion.
Concretely, adding in versioning makes maintenance of ``numpy.random``
difficult. Necessarily, we would be keeping lots of versions of the same code
@@ -195,11 +276,49 @@ is to pin the release of ``numpy`` as a whole, versioning ``RandomState`` alone
is superfluous.
+``StableRandom``
+::::::::::::::::
+
+A previous version of this NEP proposed to leave ``RandomState`` completely
+alone for a deprecation period and build the new subsystem alongside with new
+names. To satisfy the unit testing use case, it proposed introducing a small
+distributions class nominally called ``StableRandom``. It would have provided
+a small subset of distribution methods that were considered most useful in unit
+testing, but not the full set such that it would be too likely to be used
+outside of the testing context.
+
+During discussion about this proposal, it became apparent that there was no
+satisfactory subset. At least some projects used a fairly broad selection of
+the ``RandomState`` methods in unit tests.
+
+Downstream project owners would have been forced to modify their code to
+accomodate the new PRNG subsystem. Some modifications might be simply
+mechanical, but the bulk of the work would have been tedious churn for no
+positive improvement to the downstream project, just avoiding being broken.
+
+Furthermore, under this old proposal, we would have had a quite lengthy
+deprecation period where ``RandomState`` existed alongside the new system of
+basic RNGs and distribution classes. Leaving the implementation of
+``RandomState`` fixed meant that it could not use the new basic RNG state
+objects. Developing programs that use a mixture of libraries that have and
+have not upgraded would require managing two sets of PRNG states. This would
+notionally have been time-limited, but we intended the deprecation to be very
+long.
+
+The current proposal solves all of these problems. All current usages of
+``RandomState`` will continue to work in perpetuity, though some may be
+discouraged through documentation. Unit tests can continue to use the full
+complement of ``RandomState`` methods. Mixed ``RandomState/RandomGenerator``
+code can safely share the common basic RNG state. Unmodified ``RandomState``
+code can make use of the new features of alternative basic RNGs like settable
+streams.
+
+
Discussion
----------
-- https://mail.python.org/pipermail/numpy-discussion/2018-January/077608.html
-- https://github.com/numpy/numpy/pull/10124#issuecomment-350876221
+- `NEP discussion <https://mail.python.org/pipermail/numpy-discussion/2018-June/078126.html>`_
+- `Earlier discussion <https://mail.python.org/pipermail/numpy-discussion/2018-January/077608.html>`_
Copyright
diff --git a/doc/neps/nep-0020-gufunc-signature-enhancement.rst b/doc/neps/nep-0020-gufunc-signature-enhancement.rst
new file mode 100644
index 000000000..38a9fd53b
--- /dev/null
+++ b/doc/neps/nep-0020-gufunc-signature-enhancement.rst
@@ -0,0 +1,257 @@
+===============================================================
+NEP 20 — Expansion of Generalized Universal Function Signatures
+===============================================================
+
+:Author: Marten van Kerkwijk <mhvk@astro.utoronto.ca>
+:Status: Accepted
+:Type: Standards Track
+:Created: 2018-06-10
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-April/077959.html,
+ https://mail.python.org/pipermail/numpy-discussion/2018-May/078078.html
+
+.. note:: The proposal to add fixed (i) and flexible (ii) dimensions
+ was accepted, while that to add broadcastable (iii) ones was deferred.
+
+Abstract
+--------
+
+Generalized universal functions are, as their name indicates, generalization
+of universal functions: they operate on non-scalar elements. Their signature
+describes the structure of the elements they operate on, with names linking
+dimensions of the operands that should be the same. Here, it is proposed to
+extend the signature to allow the signature to indicate that a dimension (i)
+has fixed size; (ii) can be absent; and (iii) can be broadcast.
+
+Detailed description
+--------------------
+
+Each part of the proposal is driven by specific needs [1]_.
+
+1. Fixed-size dimensions. Code working with spatial vectors often explicitly
+ is for 2 or 3-dimensional space (e.g., the code from the `Standards Of
+ Fundamental Astronomy <http://www.iausofa.org/>`_, which the author hopes
+ to wrap using gufuncs for astropy [2]_). The signature should be able to
+ indicate that. E.g., the signature of a function that converts a polar
+ angle to a two-dimensional cartesian unit vector would currently have to be
+ ``()->(n)``, with there being no way to indicate that ``n`` has to equal 2.
+ Indeed, this signature is particularly annoying since without putting in an
+ output argument, the current gufunc wrapper code fails because it cannot
+ determine ``n``. Similarly, the signature for an cross product of two
+ 3-dimensional vectors has to be ``(n),(n)->(n)``, with again no way to
+ indicate that ``n`` has to equal 3. Hence, the proposal here to allow one
+ to give numerical values in addition to variable names. Thus, angle to
+ two-dimensional unit vector would be ``()->(2)``; two angles to
+ three-dimensional unit vector ``(),()->(3)``; and that for the cross
+ product of two three-dimensional vectors would be ``(3),(3)->(3)``.
+
+2. Possibly missing dimensions. This part is almost entirely driven by the
+ wish to wrap ``matmul`` in a gufunc. ``matmul`` stands for matrix
+ multiplication, and if it did only that, it could be covered with the
+ signature ``(m,n),(n,p)->(m,p)``. However, it has special cases for when a
+ dimension is missing, allowing either argument to be treated as a single
+ vector, with the function thus becoming, effectively, vector-matrix,
+ matrix-vector, or vector-vector multiplication (but with no
+ broadcasting). To support this, it is suggested to allow postfixing a
+ dimension name with a question mark to indicate that the dimension does not
+ necessarily have to be present.
+
+ With this addition, the signature for ``matmul`` can be expressed as
+ ``(m?,n),(n,p?)->(m?,p?)``. This indicates that if, e.g., the second
+ operand has only one dimension, for the purposes of the elementary function
+ it will be treated as if that input has core shape ``(n, 1)``, and the
+ output has the corresponding core shape of ``(m, 1)``. The actual output
+ array, however, has the flexible dimension removed, i.e., it will have
+ shape ``(..., m)``. Similarly, if both arguments have only a single
+ dimension, the inputs will be presented as having shapes ``(1, n)`` and
+ ``(n, 1)`` to the elementary function, and the output as ``(1, 1)``, while
+ the actual output array returned will have shape ``()``. In this way, the
+ signature allows one to use a single elementary function for four related
+ but different signatures, ``(m,n),(n,p)->(m,p)``, ``(n),(n,p)->(p)``,
+ ``(m,n),(n)->(m)`` and ``(n),(n)->()``.
+
+3. Dimensions that can be broadcast. For some applications, broadcasting
+ between operands makes sense. For instance, an ``all_equal`` function that
+ compares vectors in arrays could have a signature ``(n),(n)->()``, but this
+ forces both operands to be arrays, while it would be useful also to check
+ that, e.g., all parts of a vector are constant (maybe zero). The proposal
+ is to allow the implementer of a gufunc to indicate that a dimension can be
+ broadcast by post-fixing the dimension name with ``|1``. Hence, the
+ signature for ``all_equal`` would become ``(n|1),(n|1)->()``. The
+ signature seems handy more generally for "chained ufuncs"; e.g., another
+ application might be in a putative ufunc implementing ``sumproduct``.
+
+ Another example that arose in the discussion, is of a weighted mean, which
+ might look like ``weighted_mean(y, sigma[, axis, ...])``, returning the
+ mean and its uncertainty. With a signature of ``(n),(n)->(),()``, one
+ would be forced to always give as many sigmas as there are data points,
+ while broadcasting would allow one to give a single sigma for all points
+ (which is still useful to calculate the uncertainty on the mean).
+
+Implementation
+--------------
+
+The proposed changes have all been implemented [3]_, [4]_, [5]_. These PRs
+extend the ufunc structure with two new fields, each of size equal to the
+number of distinct dimensions, with ``core_dim_sizes`` holding possibly fixed
+sizes, and ``core_dim_flags`` holding flags indicating whether a dimension can
+be missing or broadcast. To ensure we can distinguish between this new
+version and previous versions, an unused entry ``reserved1`` is repurposed as
+a version number.
+
+In the implementation, care is taken that to the elementary function flagged
+dimensions are not treated any differently than non-flagged ones: for
+instance, sizes of fixed-size dimensions are still passed on to the elementary
+function (but the loop can now count on that size being equal to the fixed one
+given in the signature).
+
+An implementation detail to be decided upon is whether it might be handy to
+have a summary of all flags. This could possibly be stored in ``core_enabled``
+(which currently is a bool), with non-zero continuing to indicate a gufunc,
+but specific flags indicating whether or not a gufunc uses fixed, flexible, or
+broadcastable dimensions.
+
+With the above, the formal defition of the syntax would become [4]_::
+
+ <Signature> ::= <Input arguments> "->" <Output arguments>
+ <Input arguments> ::= <Argument list>
+ <Output arguments> ::= <Argument list>
+ <Argument list> ::= nil | <Argument> | <Argument> "," <Argument list>
+ <Argument> ::= "(" <Core dimension list> ")"
+ <Core dimension list> ::= nil | <Core dimension> |
+ <Core dimension> "," <Core dimension list>
+ <Core dimension> ::= <Dimension name> <Dimension modifier>
+ <Dimension name> ::= valid Python variable name | valid integer
+ <Dimension modifier> ::= nil | "|1" | "?"
+
+#. All quotes are for clarity.
+#. Unmodified core dimensions that share the same name must have the same size.
+ Each dimension name typically corresponds to one level of looping in the
+ elementary function's implementation.
+#. White spaces are ignored.
+#. An integer as a dimension name freezes that dimension to the value.
+#. If a name if suffixed with the ``|1`` modifier, it is allowed to broadcast
+ against other dimensions with the same name. All input dimensions
+ must share this modifier, while no output dimensions should have it.
+#. If the name is suffixed with the ``?`` modifier, the dimension is a core
+ dimension only if it exists on all inputs and outputs that share it;
+ otherwise it is ignored (and replaced by a dimension of size 1 for the
+ elementary function).
+
+Examples of signatures [4]_:
+
++----------------------------+-----------------------------------+
+| Signature | Possible use |
++----------------------------+-----------------------------------+
+| ``(),()->()`` | Addition |
++----------------------------+-----------------------------------+
+| ``(i)->()`` | Sum over last axis |
++----------------------------+-----------------------------------+
+| ``(i|1),(i|1)->()`` | Test for equality along axis, |
+| | allowing comparison with a scalar |
++----------------------------+-----------------------------------+
+| ``(i),(i)->()`` | inner vector product |
++----------------------------+-----------------------------------+
+| ``(m,n),(n,p)->(m,p)`` | matrix multiplication |
++----------------------------+-----------------------------------+
+| ``(n),(n,p)->(p)`` | vector-matrix multiplication |
++----------------------------+-----------------------------------+
+| ``(m,n),(n)->(m)`` | matrix-vector multiplication |
++----------------------------+-----------------------------------+
+| ``(m?,n),(n,p?)->(m?,p?)`` | all four of the above at once, |
+| | except vectors cannot have loop |
+| | dimensions (ie, like ``matmul``) |
++----------------------------+-----------------------------------+
+| ``(3),(3)->(3)`` | cross product for 3-vectors |
++----------------------------+-----------------------------------+
+| ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, |
+| | outer over the second to last, |
+| | and loop/broadcast over the rest. |
++----------------------------+-----------------------------------+
+
+Backward compatibility
+----------------------
+
+One possible worry is the change in ufunc structure. For most applications,
+which call ``PyUFunc_FromDataAndSignature``, this is entirely transparent.
+Furthermore, by repurposing ``reserved1`` as a version number, code compiled
+against older versions of numpy will continue to work (though one will get a
+warning upon import of that code with a newer version of numpy), except if
+code explicitly changes the ``reserved1`` entry.
+
+Alternatives
+------------
+
+It was suggested instead of extending the signature, to have multiple
+dispatch, so that, e.g., ``matmul`` would simply have the multiple signatures
+it supports, i.e., instead of ``(m?,n),(n,p?)->(m?,p?)`` one would have
+``(m,n),(n,p)->(m,p) | (n),(n,p)->(p) | (m,n),(n)->(m) | (n),(n)->()``. A
+disadvantage of this is that the developer now has to make sure that the
+elementary function can deal with these different signatures. Furthermore,
+the expansion quickly becomes cumbersome. For instance, for the ``all_equal``
+signature of ``(n|1),(n|1)->()``, one would have to have five entries:
+``(n),(n)->() | (n),(1)->() | (1),(n)->() | (n),()->() | (),(n)->()``. For
+signatures like ``(m|1,n|1,o|1),(m|1,n|1,o|1)->()`` (from the ``cube_equal``
+test case in [4]_), it is not even worth writing out the expansion.
+
+For broadcasting, the alternative suffix of ``^`` was suggested (as
+broadcasting can be thought of as increasing the size of the array). This
+seems less clear. Furthermore, it was wondered whether it should not just be
+an all-or-nothing flag. This could be the case, though given the postfix
+for flexible dimensions, arguably another postfix is clearer (as is the
+implementation).
+
+Discussion
+----------
+
+The proposals here were discussed at fair length on the mailing list [6]_,
+[7]_. The main points of contention were whether the use cases were
+sufficiently strong. In particular, for frozen dimensions, it was argued that
+checks on the right number could be put in loop selection code. This seems
+much less clear for no benefit.
+
+For broadcasting, the lack of examples of elementary functions that might need
+it was noted, with it being questioned whether something like ``all_equal``
+was best done with a gufunc rather than as a special method on ``np.equal``.
+One counter-argument to this would be that there is an actual PR for
+``all_equal`` [8]_. Another that even if one were to use a method, it would
+be good to be able to express their signature (just as is possible at least
+for ``reduce`` and ``accumulate``).
+
+A final argument was that we were making the gufuncs too complex. This
+arguably holds for the dimensions that can be omitted, but that also has the
+strongest use case. The frozen dimensions has a very simple implementation and
+its meaning is obvious. The ability to broadcast is simple too, once the
+flexible dimensions are supported.
+
+References and Footnotes
+------------------------
+
+.. [1] Identified needs and suggestions for the implementation are not all by
+ the author. In particular, the suggestion for fixed dimensions and
+ initial implementation was by Jaime Frio (`gh-5015
+ <https://github.com/numpy/numpy/pull/5015>`_), the suggestion of ``?``
+ to indicate dimensions can be omitted was by Nathaniel Smith, and the
+ initial implementation of that by Matti Picus (`gh-11132
+ <https://github.com/numpy/numpy/pull/11132>`_).
+.. [2] `wrap ERFA functions in gufuncs
+ <https://github.com/astropy/astropy/pull/7502>`_ (`ERFA
+ <https://github.com/liberfa/erfa>`_) is the less stringently licensed
+ version of `Standards Of Fundamental Astronomy
+ <http://www.iausofa.org/>`_
+.. [3] `fixed-size and flexible dimensions
+ <https://github.com/numpy/numpy/pull/11175>`_
+.. [4] `broadcastable dimensions
+ <https://github.com/numpy/numpy/pull/11179>`_
+.. [5] `use in matmul <https://github.com/numpy/numpy/pull/11133>`_
+.. [6] Discusses implementations for ``matmul``:
+ https://mail.python.org/pipermail/numpy-discussion/2018-May/077972.html,
+ https://mail.python.org/pipermail/numpy-discussion/2018-May/078021.html
+.. [7] Broadcasting:
+ https://mail.python.org/pipermail/numpy-discussion/2018-May/078078.html
+.. [8] `Logical gufuncs <https://github.com/numpy/numpy/pull/8528>`_ (includes
+ ``all_equal``)
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst
new file mode 100644
index 000000000..5acabbf16
--- /dev/null
+++ b/doc/neps/nep-0021-advanced-indexing.rst
@@ -0,0 +1,661 @@
+==================================================
+NEP 21 — Simplified and explicit advanced indexing
+==================================================
+
+:Author: Sebastian Berg
+:Author: Stephan Hoyer <shoyer@google.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2015-08-27
+
+
+Abstract
+--------
+
+NumPy's "advanced" indexing support for indexing array with other arrays is
+one of its most powerful and popular features. Unfortunately, the existing
+rules for advanced indexing with multiple array indices are typically confusing
+to both new, and in many cases even old, users of NumPy. Here we propose an
+overhaul and simplification of advanced indexing, including two new "indexer"
+attributes ``oindex`` and ``vindex`` to facilitate explicit indexing.
+
+Background
+----------
+
+Existing indexing operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+NumPy arrays currently support a flexible range of indexing operations:
+
+- "Basic" indexing involving only slices, integers, ``np.newaxis`` and ellipsis
+ (``...``), e.g., ``x[0, :3, np.newaxis]`` for selecting the first element
+ from the 0th axis, the first three elements from the 1st axis and inserting a
+ new axis of size 1 at the end. Basic indexing always return a view of the
+ indexed array's data.
+- "Advanced" indexing, also called "fancy" indexing, includes all cases where
+ arrays are indexed by other arrays. Advanced indexing always makes a copy:
+
+ - "Boolean" indexing by boolean arrays, e.g., ``x[x > 0]`` for
+ selecting positive elements.
+ - "Vectorized" indexing by one or more integer arrays, e.g., ``x[[0, 1]]``
+ for selecting the first two elements along the first axis. With multiple
+ arrays, vectorized indexing uses broadcasting rules to combine indices along
+ multiple dimensions. This allows for producing a result of arbitrary shape
+ with arbitrary elements from the original arrays.
+ - "Mixed" indexing involving any combinations of the other advancing types.
+ This is no more powerful than vectorized indexing, but is sometimes more
+ convenient.
+
+For clarity, we will refer to these existing rules as "legacy indexing".
+This is only a high-level summary; for more details, see NumPy's documentation
+and and `Examples` below.
+
+Outer indexing
+~~~~~~~~~~~~~~
+
+One broadly useful class of indexing operations is not supported:
+
+- "Outer" or orthogonal indexing treats one-dimensional arrays equivalently to
+ slices for determining output shapes. The rule for outer indexing is that the
+ result should be equivalent to independently indexing along each dimension
+ with integer or boolean arrays as if both the indexed and indexing arrays
+ were one-dimensional. This form of indexing is familiar to many users of other
+ programming languages such as MATLAB, Fortran and R.
+
+The reason why NumPy omits support for outer indexing is that the rules for
+outer and vectorized conflict. Consider indexing a 2D array by two 1D integer
+arrays, e.g., ``x[[0, 1], [0, 1]]``:
+
+- Outer indexing is equivalent to combining multiple integer indices with
+ ``itertools.product()``. The result in this case is another 2D array with
+ all combinations of indexed elements, e.g.,
+ ``np.array([[x[0, 0], x[0, 1]], [x[1, 0], x[1, 1]]])``
+- Vectorized indexing is equivalent to combining multiple integer indices with
+ ``zip()``. The result in this case is a 1D array containing the diagonal
+ elements, e.g., ``np.array([x[0, 0], x[1, 1]])``.
+
+This difference is a frequent stumbling block for new NumPy users. The outer
+indexing model is easier to understand, and is a natural generalization of
+slicing rules. But NumPy instead chose to support vectorized indexing, because
+it is strictly more powerful.
+
+It is always possible to emulate outer indexing by vectorized indexing with
+the right indices. To make this easier, NumPy includes utility objects and
+functions such as ``np.ogrid`` and ``np.ix_``, e.g.,
+``x[np.ix_([0, 1], [0, 1])]``. However, there are no utilities for emulating
+fully general/mixed outer indexing, which could unambiguously allow for slices,
+integers, and 1D boolean and integer arrays.
+
+Mixed indexing
+~~~~~~~~~~~~~~
+
+NumPy's existing rules for combining multiple types of indexing in the same
+operation are quite complex, involving a number of edge cases.
+
+One reason why mixed indexing is particularly confusing is that at first glance
+the result works deceptively like outer indexing. Returning to our example of a
+2D array, both ``x[:2, [0, 1]]`` and ``x[[0, 1], :2]`` return 2D arrays with
+axes in the same order as the original array.
+
+However, as soon as two or more non-slice objects (including integers) are
+introduced, vectorized indexing rules apply. The axes introduced by the array
+indices are at the front, unless all array indices are consecutive, in which
+case NumPy deduces where the user "expects" them to be. Consider indexing a 3D
+array ``arr`` with shape ``(X, Y, Z)``:
+
+1. ``arr[:, [0, 1], 0]`` has shape ``(X, 2)``.
+2. ``arr[[0, 1], 0, :]`` has shape ``(2, Z)``.
+3. ``arr[0, :, [0, 1]]`` has shape ``(2, Y)``, not ``(Y, 2)``!
+
+These first two cases are intuitive and consistent with outer indexing, but
+this last case is quite surprising, even to many higly experienced NumPy users.
+
+Mixed cases involving multiple array indices are also surprising, and only
+less problematic because the current behavior is so useless that it is rarely
+encountered in practice. When a boolean array index is mixed with another boolean or
+integer array, boolean array is converted to integer array indices (equivalent
+to ``np.nonzero()``) and then broadcast. For example, indexing a 2D array of
+size ``(2, 2)`` like ``x[[True, False], [True, False]]`` produces a 1D vector
+with shape ``(1,)``, not a 2D sub-matrix with shape ``(1, 1)``.
+
+Mixed indexing seems so tricky that it is tempting to say that it never should
+be used. However, it is not easy to avoid, because NumPy implicitly adds full
+slices if there are fewer indices than the full dimensionality of the indexed
+array. This means that indexing a 2D array like `x[[0, 1]]`` is equivalent to
+``x[[0, 1], :]``. These cases are not surprising, but they constrain the
+behavior of mixed indexing.
+
+Indexing in other Python array libraries
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Indexing is a useful and widely recognized mechanism for accessing
+multi-dimensional array data, so it is no surprise that many other libraries in
+the scientific Python ecosystem also support array indexing.
+
+Unfortunately, the full complexity of NumPy's indexing rules mean that it is
+both challenging and undesirable for other libraries to copy its behavior in all
+of its nuance. The only full implementation of NumPy-style indexing is NumPy
+itself. This includes projects like dask.array and h5py, which support *most*
+types of array indexing in some form, and otherwise attempt to copy NumPy's API
+exactly.
+
+Vectorized indexing in particular can be challenging to implement with array
+storage backends not based on NumPy. In contrast, indexing by 1D arrays along
+at least one dimension in the style of outer indexing is much more acheivable.
+This has led many libraries (including dask and h5py) to attempt to define a
+safe subset of NumPy-style indexing that is equivalent to outer indexing, e.g.,
+by only allowing indexing with an array along at most one dimension. However,
+this is quite challenging to do correctly in a general enough way to be useful.
+For example, the current versions of dask and h5py both handle mixed indexing
+in case 3 above inconsistently with NumPy. This is quite likely to lead to
+bugs.
+
+These inconsistencies, in addition to the broader challenge of implementing
+every type of indexing logic, make it challenging to write high-level array
+libraries like xarray or dask.array that can interchangeably index many types of
+array storage. In contrast, explicit APIs for outer and vectorized indexing in
+NumPy would provide a model that external libraries could reliably emulate, even
+if they don't support every type of indexing.
+
+High level changes
+------------------
+
+Inspired by multiple "indexer" attributes for controlling different types
+of indexing behavior in pandas, we propose to:
+
+1. Introduce ``arr.oindex[indices]`` which allows array indices, but
+ uses outer indexing logic.
+2. Introduce ``arr.vindex[indices]`` which use the current
+ "vectorized"/broadcasted logic but with two differences from
+ legacy indexing:
+
+ * Boolean indices are not supported. All indices must be integers,
+ integer arrays or slices.
+ * The integer index result dimensions are always the first axes
+ of the result array. No transpose is done, even for a single
+ integer array index.
+
+3. Plain indexing on arrays will start to give warnings and eventually
+ errors in cases where one of the explicit indexers should be preferred:
+
+ * First, in all cases where legacy and outer indexing would give
+ different results.
+ * Later, potentially in all cases involving an integer array.
+
+These constraints are sufficient for making indexing generally consistent
+with expectations and providing a less surprising learning curve with
+``oindex``.
+
+Note that all things mentioned here apply both for assignment as well as
+subscription.
+
+Understanding these details is *not* easy. The `Examples` section in the
+discussion gives code examples.
+And the hopefully easier `Motivational Example` provides some
+motivational use-cases for the general ideas and is likely a good start for
+anyone not intimately familiar with advanced indexing.
+
+
+Detailed Description
+--------------------
+
+Proposed rules
+~~~~~~~~~~~~~~
+
+From the three problems noted above some expectations for NumPy can
+be deduced:
+
+1. There should be a prominent outer/orthogonal indexing method such as
+ ``arr.oindex[indices]``.
+
+2. Considering how confusing vectorized/fancy indexing can be, it should
+ be possible to be made more explicitly (e.g. ``arr.vindex[indices]``).
+
+3. A new ``arr.vindex[indices]`` method, would not be tied to the
+ confusing transpose rules of fancy indexing, which is for example
+ needed for the simple case of a single advanced index. Thus,
+ no transposing should be done. The axes created by the integer array
+ indices are always inserted at the front, even for a single index.
+
+4. Boolean indexing is conceptionally outer indexing. Broadcasting
+ together with other advanced indices in the manner of legacy
+ indexing is generally not helpful or well defined.
+ A user who wishes the "``nonzero``" plus broadcast behaviour can thus
+ be expected to do this manually. Thus, ``vindex`` does not need to
+ support boolean index arrays.
+
+5. An ``arr.legacy_index`` attribute should be implemented to support
+ legacy indexing. This gives a simple way to update existing codebases
+ using legacy indexing, which will make the deprecation of plain indexing
+ behavior easier. The longer name ``legacy_index`` is intentionally chosen
+ to be explicit and discourage its use in new code.
+
+6. Plain indexing ``arr[...]`` should return an error for ambiguous cases.
+ For the beginning, this probably means cases where ``arr[ind]`` and
+ ``arr.oindex[ind]`` return different results give deprecation warnings.
+ This includes every use of vectorized indexing with multiple integer arrays.
+ Due to the transposing behaviour, this means that``arr[0, :, index_arr]``
+ will be deprecated, but ``arr[:, 0, index_arr]`` will not for the time being.
+
+7. To ensure that existing subclasses of `ndarray` that override indexing
+ do not inadvertently revert to default behavior for indexing attributes,
+ these attribute should have explicit checks that disable them if
+ ``__getitem__`` or ``__setitem__`` has been overriden.
+
+Unlike plain indexing, the new indexing attributes are explicitly aimed
+at higher dimensional indexing, several additional changes should be implemented:
+
+* The indexing attributes will enforce exact dimension and indexing match.
+ This means that no implicit ellipsis (``...``) will be added. Unless
+ an ellipsis is present the indexing expression will thus only work for
+ an array with a specific number of dimensions.
+ This makes the expression more explicit and safeguards against wrong
+ dimensionality of arrays.
+ There should be no implications for "duck typing" compatibility with
+ builtin Python sequences, because Python sequences only support a limited
+ form of "basic indexing" with integers and slices.
+
+* The current plain indexing allows for the use of non-tuples for
+ multi-dimensional indexing such as ``arr[[slice(None), 2]]``.
+ This creates some inconsistencies and thus the indexing attributes
+ should only allow plain python tuples for this purpose.
+ (Whether or not this should be the case for plain indexing is a
+ different issue.)
+
+* The new attributes should not use getitem to implement setitem,
+ since it is a cludge and not useful for vectorized
+ indexing. (not implemented yet)
+
+
+Open Questions
+~~~~~~~~~~~~~~
+
+* The names ``oindex``, ``vindex`` and ``legacy_index`` are just suggestions at
+ the time of writing this, another name NumPy has used for something like
+ ``oindex`` is ``np.ix_``. See also below.
+
+* ``oindex`` and ``vindex`` could always return copies, even when no array
+ operation occurs. One argument for allowing a view return is that this way
+ ``oindex`` can be used as a general index replacement.
+ However, there is one argument for returning copies. It is possible for
+ ``arr.vindex[array_scalar, ...]``, where ``array_scalar`` should be
+ a 0-D array but is not, since 0-D arrays tend to be converted.
+ Copying always "fixes" this possible inconsistency.
+
+* The final state to morph plain indexing in is not fixed in this PEP.
+ It is for example possible that `arr[index]`` will be equivalent to
+ ``arr.oindex`` at some point in the future.
+ Since such a change will take years, it seems unnecessary to make
+ specific decisions at this time.
+
+* The proposed changes to plain indexing could be postponed indefinitely or
+ not taken in order to not break or force major fixes to existing code bases.
+
+
+Alternative Names
+~~~~~~~~~~~~~~~~~
+
+Possible names suggested (more suggestions will be added).
+
+============== ============ ========
+**Orthogonal** oindex oix
+**Vectorized** vindex vix
+**Legacy** legacy_index l/findex
+============== ============ ========
+
+
+Subclasses
+~~~~~~~~~~
+
+Subclasses are a bit problematic in the light of these changes. There are
+some possible solutions for this. For most subclasses (those which do not
+provide ``__getitem__`` or ``__setitem__``) the special attributes should
+just work. Subclasses that *do* provide it must be updated accordingly
+and should preferably not subclass ``oindex`` and ``vindex``.
+
+All subclasses will inherit the attributes, however, the implementation
+of ``__getitem__`` on these attributes should test
+``subclass.__getitem__ is ndarray.__getitem__``. If not, the
+subclass has special handling for indexing and ``NotImplementedError``
+should be raised, requiring that the indexing attributes is also explicitly
+overwritten. Likewise, implementations of ``__setitem__`` should check to see
+if ``__setitem__`` is overriden.
+
+A further question is how to facilitate implementing the special attributes.
+Also there is the weird functionality where ``__setitem__`` calls
+``__getitem__`` for non-advanced indices. It might be good to avoid it for
+the new attributes, but on the other hand, that may make it even more
+confusing.
+
+To facilitate implementations we could provide functions similar to
+``operator.itemgetter`` and ``operator.setitem`` for the attributes.
+Possibly a mixin could be provided to help implementation. These improvements
+are not essential to the initial implementation, so they are saved for
+future work.
+
+Implementation
+--------------
+
+Implementation would start with writing special indexing objects available
+through ``arr.oindex``, ``arr.vindex``, and ``arr.legacy_index`` to allow these
+indexing operations. Also, we would need to start to deprecate those plain index
+operations which are not ambiguous.
+Furthermore, the NumPy code base will need to use the new attributes and
+tests will have to be adapted.
+
+
+Backward compatibility
+----------------------
+
+As a new feature, no backward compatibility issues with the new ``vindex``
+and ``oindex`` attributes would arise.
+
+To facilitate backwards compatibility as much as possible, we expect a long
+deprecation cycle for legacy indexing behavior and propose the new
+``legacy_index`` attribute.
+
+Some forward compatibility issues with subclasses that do not specifically
+implement the new methods may arise.
+
+
+Alternatives
+------------
+
+NumPy may not choose to offer these different type of indexing methods, or
+choose to only offer them through specific functions instead of the proposed
+notation above.
+
+We don't think that new functions are a good alternative, because indexing
+notation ``[]`` offer some syntactic advantages in Python (i.e., direct
+creation of slice objects) compared to functions.
+
+A more reasonable alternative would be write new wrapper objects for alternative
+indexing with functions rather than methods (e.g., ``np.oindex(arr)[indices]``
+instead of ``arr.oindex[indices]``). Functionally, this would be equivalent,
+but indexing is such a common operation that we think it is important to
+minimize syntax and worth implementing it directly on `ndarray` objects
+themselves. Indexing attributes also define a clear interface that is easier
+for alternative array implementations to copy, nonwithstanding ongoing
+efforts to make it easier to override NumPy functions [2]_.
+
+Discussion
+----------
+
+The original discussion about vectorized vs outer/orthogonal indexing arose
+on the NumPy mailing list:
+
+ * https://mail.python.org/pipermail/numpy-discussion/2015-April/072550.html
+
+Some discussion can be found on the original pull request for this NEP:
+
+ * https://github.com/numpy/numpy/pull/6256
+
+Python implementations of the indexing operations can be found at:
+
+ * https://github.com/numpy/numpy/pull/5749
+ * https://gist.github.com/shoyer/c700193625347eb68fee4d1f0dc8c0c8
+
+
+Examples
+~~~~~~~~
+
+Since the various kinds of indexing is hard to grasp in many cases, these
+examples hopefully give some more insights. Note that they are all in terms
+of shape.
+In the examples, all original dimensions have 5 or more elements,
+advanced indexing inserts smaller dimensions.
+These examples may be hard to grasp without working knowledge of advanced
+indexing as of NumPy 1.9.
+
+Example array::
+
+ >>> arr = np.ones((5, 6, 7, 8))
+
+
+Legacy fancy indexing
+---------------------
+
+Note that the same result can be achieved with ``arr.legacy_index``, but the
+"future error" will still work in this case.
+
+Single index is transposed (this is the same for all indexing types)::
+
+ >>> arr[[0], ...].shape
+ (1, 6, 7, 8)
+ >>> arr[:, [0], ...].shape
+ (5, 1, 7, 8)
+
+
+Multiple indices are transposed *if* consecutive::
+
+ >>> arr[:, [0], [0], :].shape # future error
+ (5, 1, 8)
+ >>> arr[:, [0], :, [0]].shape # future error
+ (1, 5, 7)
+
+
+It is important to note that a scalar *is* integer array index in this sense
+(and gets broadcasted with the other advanced index)::
+
+ >>> arr[:, [0], 0, :].shape
+ (5, 1, 8)
+ >>> arr[:, [0], :, 0].shape # future error (scalar is "fancy")
+ (1, 5, 7)
+
+
+Single boolean index can act on multiple dimensions (especially the whole
+array). It has to match (as of 1.10. a deprecation warning) the dimensions.
+The boolean index is otherwise identical to (multiple consecutive) integer
+array indices::
+
+ >>> # Create boolean index with one True value for the last two dimensions:
+ >>> bindx = np.zeros((7, 8), dtype=np.bool_)
+ >>> bindx[0, 0] = True
+ >>> arr[:, 0, bindx].shape
+ (5, 1)
+ >>> arr[0, :, bindx].shape
+ (1, 6)
+
+
+The combination with anything that is not a scalar is confusing, e.g.::
+
+ >>> arr[[0], :, bindx].shape # bindx result broadcasts with [0]
+ (1, 6)
+ >>> arr[:, [0, 1], bindx].shape # IndexError
+
+
+Outer indexing
+--------------
+
+Multiple indices are "orthogonal" and their result axes are inserted
+at the same place (they are not broadcasted)::
+
+ >>> arr.oindex[:, [0], [0, 1], :].shape
+ (5, 1, 2, 8)
+ >>> arr.oindex[:, [0], :, [0, 1]].shape
+ (5, 1, 7, 2)
+ >>> arr.oindex[:, [0], 0, :].shape
+ (5, 1, 8)
+ >>> arr.oindex[:, [0], :, 0].shape
+ (5, 1, 7)
+
+
+Boolean indices results are always inserted where the index is::
+
+ >>> # Create boolean index with one True value for the last two dimensions:
+ >>> bindx = np.zeros((7, 8), dtype=np.bool_)
+ >>> bindx[0, 0] = True
+ >>> arr.oindex[:, 0, bindx].shape
+ (5, 1)
+ >>> arr.oindex[0, :, bindx].shape
+ (6, 1)
+
+
+Nothing changed in the presence of other advanced indices since::
+
+ >>> arr.oindex[[0], :, bindx].shape
+ (1, 6, 1)
+ >>> arr.oindex[:, [0, 1], bindx].shape
+ (5, 2, 1)
+
+
+Vectorized/inner indexing
+-------------------------
+
+Multiple indices are broadcasted and iterated as one like fancy indexing,
+but the new axes are always inserted at the front::
+
+ >>> arr.vindex[:, [0], [0, 1], :].shape
+ (2, 5, 8)
+ >>> arr.vindex[:, [0], :, [0, 1]].shape
+ (2, 5, 7)
+ >>> arr.vindex[:, [0], 0, :].shape
+ (1, 5, 8)
+ >>> arr.vindex[:, [0], :, 0].shape
+ (1, 5, 7)
+
+
+Boolean indices results are always inserted where the index is, exactly
+as in ``oindex`` given how specific they are to the axes they operate on::
+
+ >>> # Create boolean index with one True value for the last two dimensions:
+ >>> bindx = np.zeros((7, 8), dtype=np.bool_)
+ >>> bindx[0, 0] = True
+ >>> arr.vindex[:, 0, bindx].shape
+ (5, 1)
+ >>> arr.vindex[0, :, bindx].shape
+ (6, 1)
+
+
+But other advanced indices are again transposed to the front::
+
+ >>> arr.vindex[[0], :, bindx].shape
+ (1, 6, 1)
+ >>> arr.vindex[:, [0, 1], bindx].shape
+ (2, 5, 1)
+
+
+Motivational Example
+~~~~~~~~~~~~~~~~~~~~
+
+Imagine having a data acquisition software storing ``D`` channels and
+``N`` datapoints along the time. She stores this into an ``(N, D)`` shaped
+array. During data analysis, we needs to fetch a pool of channels, for example
+to calculate a mean over them.
+
+This data can be faked using::
+
+ >>> arr = np.random.random((100, 10))
+
+Now one may remember indexing with an integer array and find the correct code::
+
+ >>> group = arr[:, [2, 5]]
+ >>> mean_value = arr.mean()
+
+However, assume that there were some specific time points (first dimension
+of the data) that need to be specially considered. These time points are
+already known and given by::
+
+ >>> interesting_times = np.array([1, 5, 8, 10], dtype=np.intp)
+
+Now to fetch them, we may try to modify the previous code::
+
+ >>> group_at_it = arr[interesting_times, [2, 5]]
+ IndexError: Ambiguous index, use `.oindex` or `.vindex`
+
+An error such as this will point to read up the indexing documentation.
+This should make it clear, that ``oindex`` behaves more like slicing.
+So, out of the different methods it is the obvious choice
+(for now, this is a shape mismatch, but that could possibly also mention
+``oindex``)::
+
+ >>> group_at_it = arr.oindex[interesting_times, [2, 5]]
+
+Now of course one could also have used ``vindex``, but it is much less
+obvious how to achieve the right thing!::
+
+ >>> reshaped_times = interesting_times[:, np.newaxis]
+ >>> group_at_it = arr.vindex[reshaped_times, [2, 5]]
+
+
+One may find, that for example our data is corrupt in some places.
+So, we need to replace these values by zero (or anything else) for these
+times. The first column may for example give the necessary information,
+so that changing the values becomes easy remembering boolean indexing::
+
+ >>> bad_data = arr[:, 0] > 0.5
+ >>> arr[bad_data, :] = 0 # (corrupts further examples)
+
+Again, however, the columns may need to be handled more individually (but in
+groups), and the ``oindex`` attribute works well::
+
+ >>> arr.oindex[bad_data, [2, 5]] = 0
+
+Note that it would be very hard to do this using legacy fancy indexing.
+The only way would be to create an integer array first::
+
+ >>> bad_data_indx = np.nonzero(bad_data)[0]
+ >>> bad_data_indx_reshaped = bad_data_indx[:, np.newaxis]
+ >>> arr[bad_data_indx_reshaped, [2, 5]]
+
+In any case we can use only ``oindex`` to do all of this without getting
+into any trouble or confused by the whole complexity of advanced indexing.
+
+But, some new features are added to the data acquisition. Different sensors
+have to be used depending on the times. Let us assume we already have
+created an array of indices::
+
+ >>> correct_sensors = np.random.randint(10, size=(100, 2))
+
+Which lists for each time the two correct sensors in an ``(N, 2)`` array.
+
+A first try to achieve this may be ``arr[:, correct_sensors]`` and this does
+not work. It should be clear quickly that slicing cannot achieve the desired
+thing. But hopefully users will remember that there is ``vindex`` as a more
+powerful and flexible approach to advanced indexing.
+One may, if trying ``vindex`` randomly, be confused about::
+
+ >>> new_arr = arr.vindex[:, correct_sensors]
+
+which is neither the same, nor the correct result (see transposing rules)!
+This is because slicing works still the same in ``vindex``. However, reading
+the documentation and examples, one can hopefully quickly find the desired
+solution::
+
+ >>> rows = np.arange(len(arr))
+ >>> rows = rows[:, np.newaxis] # make shape fit with correct_sensors
+ >>> new_arr = arr.vindex[rows, correct_sensors]
+
+At this point we have left the straight forward world of ``oindex`` but can
+do random picking of any element from the array. Note that in the last example
+a method such as mentioned in the ``Related Questions`` section could be more
+straight forward. But this approach is even more flexible, since ``rows``
+does not have to be a simple ``arange``, but could be ``intersting_times``::
+
+ >>> interesting_times = np.array([0, 4, 8, 9, 10])
+ >>> correct_sensors_at_it = correct_sensors[interesting_times, :]
+ >>> interesting_times_reshaped = interesting_times[:, np.newaxis]
+ >>> new_arr_it = arr[interesting_times_reshaped, correct_sensors_at_it]
+
+Truly complex situation would arise now if you would for example pool ``L``
+experiments into an array shaped ``(L, N, D)``. But for ``oindex`` this should
+not result into surprises. ``vindex``, being more powerful, will quite
+certainly create some confusion in this case but also cover pretty much all
+eventualities.
+
+
+Copyright
+---------
+
+This document is placed under the CC0 1.0 Universell (CC0 1.0) Public Domain Dedication [1]_.
+
+
+References and Footnotes
+------------------------
+
+.. [1] To the extent possible under law, the person who associated CC0
+ with this work has waived all copyright and related or neighboring
+ rights to this work. The CC0 license may be found at
+ https://creativecommons.org/publicdomain/zero/1.0/
+.. [2] e.g., see NEP 18,
+ http://www.numpy.org/neps/nep-0018-array-function-protocol.html
diff --git a/doc/neps/nep-0022-ndarray-duck-typing-overview.rst b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst
new file mode 100644
index 000000000..077166453
--- /dev/null
+++ b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst
@@ -0,0 +1,352 @@
+===========================================================
+NEP 22 — Duck typing for NumPy arrays – high level overview
+===========================================================
+
+:Author: Stephan Hoyer <shoyer@google.com>, Nathaniel J. Smith <njs@pobox.com>
+:Status: Final
+:Type: Informational
+:Created: 2018-03-22
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-September/078752.html
+
+Abstract
+--------
+
+We outline a high-level vision for how NumPy will approach handling
+“duck arrays”. This is an Informational-class NEP; it doesn’t
+prescribe full details for any particular implementation. In brief, we
+propose developing a number of new protocols for defining
+implementations of multi-dimensional arrays with high-level APIs
+matching NumPy.
+
+
+Detailed description
+--------------------
+
+Traditionally, NumPy’s ``ndarray`` objects have provided two things: a
+high level API for expression operations on homogenously-typed,
+arbitrary-dimensional, array-structured data, and a concrete
+implementation of the API based on strided in-RAM storage. The API is
+powerful, fairly general, and used ubiquitously across the scientific
+Python stack. The concrete implementation, on the other hand, is
+suitable for a wide range of uses, but has limitations: as data sets
+grow and NumPy becomes used in a variety of new environments, there
+are increasingly cases where the strided in-RAM storage strategy is
+inappropriate, and users find they need sparse arrays, lazily
+evaluated arrays (as in dask), compressed arrays (as in blosc), arrays
+stored in GPU memory, arrays stored in alternative formats such as
+Arrow, and so forth – yet users still want to work with these arrays
+using the familiar NumPy APIs, and re-use existing code with minimal
+(ideally zero) porting overhead. As a working shorthand, we call these
+“duck arrays”, by analogy with Python’s “duck typing”: a “duck array”
+is a Python object which “quacks like” a numpy array in the sense that
+it has the same or similar Python API, but doesn’t share the C-level
+implementation.
+
+This NEP doesn’t propose any specific changes to NumPy or other
+projects; instead, it gives an overview of how we hope to extend NumPy
+to support a robust ecosystem of projects implementing and relying
+upon its high level API.
+
+Terminology
+~~~~~~~~~~~
+
+“Duck array” works fine as a placeholder for now, but it’s pretty
+jargony and may confuse new users, so we may want to pick something
+else for the actual API functions. Unfortunately, “array-like” is
+already taken for the concept of “anything that can be coerced into an
+array” (including e.g. list objects), and “anyarray” is already taken
+for the concept of “something that shares ndarray’s implementation,
+but has different semantics”, which is the opposite of a duck array
+(e.g., np.matrix is an “anyarray”, but is not a “duck array”). This is
+a classic bike-shed so for now we’re just using “duck array”. Some
+possible options though include: arrayish, pseudoarray, nominalarray,
+ersatzarray, arraymimic, ...
+
+
+General approach
+~~~~~~~~~~~~~~~~
+
+At a high level, duck array support requires working through each of
+the API functions provided by NumPy, and figuring out how it can be
+extended to work with duck array objects. In some cases this is easy
+(e.g., methods/attributes on ndarray itself); in other cases it’s more
+difficult. Here are some principles we’ve found useful so far:
+
+
+Principle 1: Focus on “full” duck arrays, but don’t rule out “partial” duck arrays
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We can distinguish between two classes:
+
+* “full” duck arrays, which aspire to fully implement np.ndarray’s
+ Python-level APIs and work essentially anywhere that np.ndarray
+ works
+
+* “partial” duck arrays, which intentionally implement only a subset
+ of np.ndarray’s API.
+
+Full duck arrays are, well, kind of boring. They have exactly the same
+semantics as ndarray, with differences being restricted to
+under-the-hood decisions about how the data is actually stored. The
+kind of people that are excited about making numpy more extensible are
+also, unsurprisingly, excited about changing or extending numpy’s
+semantics. So there’s been a lot of discussion of how to best support
+partial duck arrays. We've been guilty of this ourself.
+
+At this point though, we think the best general strategy is to focus
+our efforts primarily on supporting full duck arrays, and only worry
+about partial duck arrays as much as we need to to make sure we don't
+accidentally rule them out for no reason.
+
+Why focus on full duck arrays? Several reasons:
+
+First, there are lots of very clear use cases. Potential consumers of
+the full duck array interface include almost every package that uses
+numpy (scipy, sklearn, astropy, ...), and in particular packages that
+provide array-wrapping-classes that handle multiple types of arrays,
+such as xarray and dask.array. Potential implementers of the full duck
+array interface include: distributed arrays, sparse arrays, masked
+arrays, arrays with units (unless they switch to using dtypes),
+labeled arrays, and so forth. Clear use cases lead to good and
+relevant APIs.
+
+Second, the Anna Karenina principle applies here: full duck arrays are
+all alike, but every partial duck array is partial in its own way:
+
+* ``xarray.DataArray`` is mostly a duck array, but has incompatible
+ broadcasting semantics.
+* ``xarray.Dataset`` wraps multiple arrays in one object; it still
+ implements some array interfaces like ``__array_ufunc__``, but
+ certainly not all of them.
+* ``pandas.Series`` has methods with similar behavior to numpy, but
+ unique null-skipping behavior.
+* scipy’s ``LinearOperator``\s support matrix multiplication and nothing else
+* h5py and similar libraries for accessing array storage have objects
+ that support numpy-like slicing and conversion into a full array,
+ but not computation.
+* Some classes may be similar to ndarray, but without supporting the
+ full indexing semantics.
+
+And so forth.
+
+Despite our best attempts, we haven't found any clear, unique way of
+slicing up the ndarray API into a hierarchy of related types that
+captures these distinctions; in fact, it’s unlikely that any single
+person even understands all the distinctions. And this is important,
+because we have a *lot* of APIs that we need to add duck array support
+to (both in numpy and in all the projects that depend on numpy!). By
+definition, these already work for ``ndarray``, so hopefully getting
+them to work for full duck arrays shouldn’t be so hard, since by
+definition full duck arrays act like ``ndarray``. It’d be very
+cumbersome to have to go through each function and identify the exact
+subset of the ndarray API that it needs, then figure out which partial
+array types can/should support it. Once we have things working for
+full duck arrays, we can go back later and refine the APIs needed
+further as needed. Focusing on full duck arrays allows us to start
+making progress immediately.
+
+In the future, it might be useful to identify specific use cases for
+duck arrays and standardize narrower interfaces targeted just at those
+use cases. For example, it might make sense to have a standard “array
+loader” interface that file access libraries like h5py, netcdf, pydap,
+zarr, ... all implement, to make it easy to switch between these
+libraries. But that’s something that we can do as we go, and it
+doesn’t necessarily have to involve the NumPy devs at all. For an
+example of what this might look like, see the documentation for
+`dask.array.from_array
+<http://dask.pydata.org/en/latest/array-api.html#dask.array.from_array>`__.
+
+
+Principle 2: Take advantage of duck typing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``ndarray`` has a very large API surface area::
+
+ In [1]: len(set(dir(np.ndarray)) - set(dir(object)))
+ Out[1]: 138
+
+And this is a huge **under**\estimate, because there are also many
+free-standing functions in NumPy and other libraries which currently
+use the NumPy C API and thus only work on ``ndarray`` objects. In type
+theory, a type is defined by the operations you can perform on an
+object; thus, the actual type of ``ndarray`` includes not just its
+methods and attributes, but *all* of these functions. For duck arrays
+to be successful, they’ll need to implement a large proportion of the
+``ndarray`` API – but not all of it. (For example,
+``dask.array.Array`` does not provide an equivalent to the
+``ndarray.ptp`` method, presumably because no-one has ever noticed or
+cared about its absence. But this doesn’t seem to have stopped people
+from using dask.)
+
+This means that realistically, we can’t hope to define the whole duck
+array API up front, or that anyone will be able to implement it all in
+one go; this will be an incremental process. It also means that even
+the so-called “full” duck array interface is somewhat fuzzily defined
+at the borders; there are parts of the ``np.ndarray`` API that duck
+arrays won’t have to implement, but we aren’t entirely sure what those
+are.
+
+And ultimately, it isn’t really up to the NumPy developers to define
+what does or doesn’t qualify as a duck array. If we want scikit-learn
+functions to work on dask arrays (for example), then that’s going to
+require negotiation between those two projects to discover
+incompatibilities, and when an incompatibility is discovered it will
+be up to them to negotiate who should change and how. The NumPy
+project can provide technical tools and general advice to help resolve
+these disagreements, but we can’t force one group or another to take
+responsibility for any given bug.
+
+Therefore, even though we’re focusing on “full” duck arrays, we
+*don’t* attempt to define a normative “array ABC” – maybe this will be
+useful someday, but right now, it’s not. And as a convenient
+side-effect, the lack of a normative definition leaves partial duck
+arrays room to experiment.
+
+But, we do provide some more detailed advice for duck array
+implementers and consumers below.
+
+Principle 3: Focus on protocols
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Historically, numpy has had lots of success at interoperating with
+third-party objects by defining *protocols*, like ``__array__`` (asks
+an arbitrary object to convert itself into an array),
+``__array_interface__`` (a precursor to Python’s buffer protocol), and
+``__array_ufunc__`` (allows third-party objects to support ufuncs like
+``np.exp``).
+
+`NEP 16 <https://github.com/numpy/numpy/pull/10706>`_ took a
+different approach: we need a duck-array equivalent of
+``asarray``, and it proposed to do this by defining a version of
+``asarray`` that would let through objects which implemented a new
+AbstractArray ABC. As noted above, we now think that trying to define
+an ABC is a bad idea for other reasons. But when this NEP was
+discussed on the mailing list, we realized that even on its own
+merits, this idea is not so great. A better approach is to define a
+*method* that can be called on an arbitrary object to ask it to
+convert itself into a duck array, and then define a version of
+``asarray`` that calls this method.
+
+This is strictly more powerful: if an object is already a duck array,
+it can simply ``return self``. It allows more correct semantics: NEP
+16 assumed that ``asarray(obj, dtype=X)`` is the same as
+``asarray(obj).astype(X)``, but this isn’t true. And it supports more
+use cases: if h5py supported sparse arrays, it might want to provide
+an object which is not itself a sparse array, but which can be
+automatically converted into a sparse array. See NEP <XX, to be
+written> for full details.
+
+The protocol approach is also more consistent with core Python
+conventions: for example, see the ``__iter__`` method for coercing
+objects to iterators, or the ``__index__`` protocol for safe integer
+coercion. And finally, focusing on protocols leaves the door open for
+partial duck arrays, which can pick and choose which subset of the
+protocols they want to participate in, each of which have well-defined
+semantics.
+
+Conclusion: protocols are one honking great idea – let’s do more of
+those.
+
+Principle 4: Reuse existing methods when possible
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+It’s tempting to try to define cleaned up versions of ndarray methods
+with a more minimal interface to allow for easier implementation. For
+example, ``__array_reshape__`` could drop some of the strange
+arguments accepted by ``reshape`` and ``__array_basic_getitem__``
+could drop all the `strange edge cases
+<http://www.numpy.org/neps/nep-0021-advanced-indexing.html>`__ of
+NumPy’s advanced indexing.
+
+But as discussed above, we don’t really know what APIs we need for
+duck-typing ndarray. We would inevitably end up with a very long list
+of new special methods. In contrast, existing methods like ``reshape``
+and ``__getitem__`` have the advantage of already being widely
+used/exercised by libraries that use duck arrays, and in practice, any
+serious duck array type is going to have to implement them anyway.
+
+Principle 5: Make it easy to do the right thing
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Making duck arrays work well is going to be a community effort.
+Documentation helps, but only goes so far. We want to make it easy to
+implement duck arrays that do the right thing.
+
+One way NumPy can help is by providing mixin classes for implementing
+large groups of related functionality at once.
+``NDArrayOperatorsMixin`` is a good example: it allows for
+implementing arithmetic operators implicitly via the
+``__array_ufunc__`` method. It’s not complete, and we’ll want more
+helpers like that (e.g. for reductions).
+
+(We initially thought that the importance of these mixins might be an
+argument for providing an array ABC, since that’s the standard way to
+do mixins in modern Python. But in discussion around NEP 16 we
+realized that partial duck arrays also wanted to take advantage of
+these mixins in some cases, so even if we did have an array ABC then
+the mixins would still need some sort of separate existence. So never
+mind that argument.)
+
+Tentative duck array guidelines
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+As a general rule, libraries using duck arrays should insist upon the
+minimum possible requirements, and libraries implementing duck arrays
+should provide as complete of an API as possible. This will ensure
+maximum compatibility. For example, users should prefer to rely on
+``.transpose()`` rather than ``.swapaxes()`` (which can be implemented
+in terms of transpose), but duck array authors should ideally
+implement both.
+
+If you are trying to implement a duck array, then you should strive to
+implement everything. You certainly need ``.shape``, ``.ndim`` and
+``.dtype``, but also your dtype attribute should actually be a
+``numpy.dtype`` object, weird fancy indexing edge cases should ideally
+work, etc. Only details related to NumPy’s specific ``np.ndarray``
+implementation (e.g., ``strides``, ``data``, ``view``) are explicitly
+out of scope.
+
+A (very) rough sketch of future plans
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The proposals discussed so far – ``__array_ufunc__`` and some kind of
+``asarray`` protocol – are clearly necessary but not sufficient for
+full duck typing support. We expect the need for additional protocols
+to support (at least) these features:
+
+* **Concatenating** duck arrays, which would be used internally by other
+ array combining methods like stack/vstack/hstack. The implementation
+ of concatenate will need to be negotiated among the list of array
+ arguments. We expect to use an ``__array_concatenate__`` protocol
+ like ``__array_ufunc__`` instead of multiple dispatch.
+* **Ufunc-like functions** that currently aren’t ufuncs. Many NumPy
+ functions like median, percentile, sort, where and clip could be
+ written as generalized ufuncs but currently aren’t. Either these
+ functions should be written as ufuncs, or we should consider adding
+ another generic wrapper mechanism that works similarly to ufuncs but
+ makes fewer guarantees about how the implementation is done.
+* **Random number generation** with duck arrays, e.g.,
+ ``np.random.randn()``. For example, we might want to add new APIs
+ like ``random_like()`` for generating new arrays with a matching
+ shape *and* type – though we'll need to look at some real examples
+ of how these functions are used to figure out what would be helpful.
+* **Miscellaneous other functions** such as ``np.einsum``,
+ ``np.zeros_like``, and ``np.broadcast_to`` that don’t fall into any
+ of the above categories.
+* **Checking mutability** on duck arrays, which would imply that they
+ support assignment with ``__setitem__`` and the out argument to
+ ufuncs. Many otherwise fine duck arrays are not easily mutable (for
+ example, because they use some kinds of sparse or compressed
+ storage, or are in read-only shared memory), and it turns out that
+ frequently-used code like the default implementation of ``np.mean``
+ needs to check this (to decide whether it can re-use temporary
+ arrays).
+
+We intentionally do not describe exactly how to add support for these
+types of duck arrays here. These will be the subject of future NEPs.
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0023-backwards-compatibility.rst b/doc/neps/nep-0023-backwards-compatibility.rst
new file mode 100644
index 000000000..158b08f1f
--- /dev/null
+++ b/doc/neps/nep-0023-backwards-compatibility.rst
@@ -0,0 +1,288 @@
+=======================================================
+NEP 23 — Backwards compatibility and deprecation policy
+=======================================================
+
+:Author: Ralf Gommers <ralf.gommers@gmail.com>
+:Status: Draft
+:Type: Process
+:Created: 2018-07-14
+:Resolution: <url> (required for Accepted | Rejected | Withdrawn)
+
+Abstract
+--------
+
+In this NEP we describe NumPy's approach to backwards compatibility,
+its deprecation and removal policy, and the trade-offs and decision
+processes for individual cases where breaking backwards compatibility
+is considered.
+
+
+Detailed description
+--------------------
+
+NumPy has a very large user base. Those users rely on NumPy being stable
+and the code they write that uses NumPy functionality to keep working.
+NumPy is also actively maintained and improved -- and sometimes improvements
+require, or are made much easier, by breaking backwards compatibility.
+Finally, there are trade-offs in stability for existing users vs. avoiding
+errors or having a better user experience for new users. These competing
+needs often give rise to heated debates and delays in accepting or rejecting
+contributions. This NEP tries to address that by providing a policy as well
+as examples and rationales for when it is or isn't a good idea to break
+backwards compatibility.
+
+General principles:
+
+- Aim not to break users' code unnecessarily.
+- Aim never to change code in ways that can result in users silently getting
+ incorrect results from their previously working code.
+- Backwards incompatible changes can be made, provided the benefits outweigh
+ the costs.
+- When assessing the costs, keep in mind that most users do not read the mailing
+ list, do not look at deprecation warnings, and sometimes wait more than one or
+ two years before upgrading from their old version. And that NumPy has
+ many hundreds of thousands or even a couple of million users, so "no one will
+ do or use this" is very likely incorrect.
+- Benefits include improved functionality, usability and performance (in order
+ of importance), as well as lower maintenance cost and improved future
+ extensibility.
+- Bug fixes are exempt from the backwards compatibility policy. However in case
+ of serious impact on users (e.g. a downstream library doesn't build anymore),
+ even bug fixes may have to be delayed for one or more releases.
+- The Python API and the C API will be treated in the same way.
+
+
+Examples
+^^^^^^^^
+
+We now discuss a number of concrete examples to illustrate typical issues
+and trade-offs.
+
+**Changing the behavior of a function**
+
+``np.histogram`` is probably the most infamous example.
+First, a new keyword ``new=False`` was introduced, this was then switched
+over to None one release later, and finally it was removed again.
+Also, it has a ``normed`` keyword that had behavior that could be considered
+either suboptimal or broken (depending on ones opinion on the statistics).
+A new keyword ``density`` was introduced to replace it; ``normed`` started giving
+``DeprecationWarning`` only in v.1.15.0. Evolution of ``histogram``::
+
+ def histogram(a, bins=10, range=None, normed=False): # v1.0.0
+
+ def histogram(a, bins=10, range=None, normed=False, weights=None, new=False): #v1.1.0
+
+ def histogram(a, bins=10, range=None, normed=False, weights=None, new=None): #v1.2.0
+
+ def histogram(a, bins=10, range=None, normed=False, weights=None): #v1.5.0
+
+ def histogram(a, bins=10, range=None, normed=False, weights=None, density=None): #v1.6.0
+
+ def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): #v1.15.0
+ # v1.15.0 was the first release where `normed` started emitting
+ # DeprecationWarnings
+
+The ``new`` keyword was planned from the start to be temporary. Such a plan
+forces users to change their code more than once, which is almost never the
+right thing to do. Instead, a better approach here would have been to
+deprecate ``histogram`` and introduce a new function ``hist`` in its place.
+
+**Returning a view rather than a copy**
+
+The ``ndarray.diag`` method used to return a copy. A view would be better for
+both performance and design consistency. This change was warned about
+(``FutureWarning``) in v.8.0, and in v1.9.0 ``diag`` was changed to return
+a *read-only* view. The planned change to a writeable view in v1.10.0 was
+postponed due to backwards compatibility concerns, and is still an open issue
+(gh-7661).
+
+What should have happened instead: nothing. This change resulted in a lot of
+discussions and wasted effort, did not achieve its final goal, and was not that
+important in the first place. Finishing the change to a *writeable* view in
+the future is not desired, because it will result in users silently getting
+different results if they upgraded multiple versions or simply missed the
+warnings.
+
+**Disallowing indexing with floats**
+
+Indexing an array with floats is asking for something ambiguous, and can be a
+sign of a bug in user code. After some discussion, it was deemed a good idea
+to deprecate indexing with floats. This was first tried for the v1.8.0
+release, however in pre-release testing it became clear that this would break
+many libraries that depend on NumPy. Therefore it was reverted before release,
+to give those libraries time to fix their code first. It was finally
+introduced for v1.11.0 and turned into a hard error for v1.12.0.
+
+This change was disruptive, however it did catch real bugs in, e.g., SciPy and
+scikit-learn. Overall the change was worth the cost, and introducing it in
+master first to allow testing, then removing it again before a release, is a
+useful strategy.
+
+Similar recent deprecations also look like good examples of
+cleanups/improvements:
+
+- removing deprecated boolean indexing (gh-8312)
+- deprecating truth testing on empty arrays (gh-9718)
+- deprecating ``np.sum(generator)`` (gh-10670, one issue with this one is that
+ its warning message is wrong - this should error in the future).
+
+**Removing the financial functions**
+
+The financial functions (e.g. ``np.pmt``) are badly named, are present in the
+main NumPy namespace, and don't really fit well within NumPy's scope.
+They were added in 2008 after
+`a discussion <https://mail.python.org/pipermail/numpy-discussion/2008-April/032353.html>`_
+on the mailing list where opinion was divided (but a majority in favor).
+At the moment these functions don't cause a lot of overhead, however there are
+multiple issues and PRs a year for them which cost maintainer time to deal
+with. And they clutter up the ``numpy`` namespace. Discussion in 2013 happened
+on removing them again (gh-2880).
+
+This case is borderline, but given that they're clearly out of scope,
+deprecation and removal out of at least the main ``numpy`` namespace can be
+proposed. Alternatively, document clearly that new features for financial
+functions are unwanted, to keep the maintenance costs to a minimum.
+
+**Examples of features not added because of backwards compatibility**
+
+TODO: do we have good examples here? Possibly subclassing related?
+
+
+Removing complete submodules
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This year there have been suggestions to consider removing some or all of
+``numpy.distutils``, ``numpy.f2py``, ``numpy.linalg``, and ``numpy.random``.
+The motivation was that all these cost maintenance effort, and that they slow
+down work on the core of Numpy (ndarrays, dtypes and ufuncs).
+
+The impact on downstream libraries and users would be very large, and
+maintenance of these modules would still have to happen. Therefore this is
+simply not a good idea; removing these submodules should not happen even for
+a new major version of NumPy.
+
+
+Subclassing of ndarray
+^^^^^^^^^^^^^^^^^^^^^^
+
+Subclassing of ``ndarray`` is a pain point. ``ndarray`` was not (or at least
+not well) designed to be subclassed. Despite that, a lot of subclasses have
+been created even within the NumPy code base itself, and some of those (e.g.
+``MaskedArray``, ``astropy.units.Quantity``) are quite popular. The main
+problems with subclasses are:
+
+- They make it hard to change ``ndarray`` in ways that would otherwise be
+ backwards compatible.
+- Some of them change the behavior of ndarray methods, making it difficult to
+ write code that accepts array duck-types.
+
+Subclassing ``ndarray`` has been officially discouraged for a long time. Of
+the most important subclasses, ``np.matrix`` will be deprecated (see gh-10142)
+and ``MaskedArray`` will be kept in NumPy (`NEP 17
+<http://www.numpy.org/neps/nep-0017-split-out-maskedarray.html>`_).
+``MaskedArray`` will ideally be rewritten in a way such that it uses only
+public NumPy APIs. For subclasses outside of NumPy, more work is needed to
+provide alternatives (e.g. mixins, see gh-9016 and gh-10446) or better support
+for custom dtypes (see gh-2899). Until that is done, subclasses need to be
+taken into account when making change to the NumPy code base. A future change
+in NumPy to not support subclassing will certainly need a major version
+increase.
+
+
+Policy
+------
+
+1. Code changes that have the potential to silently change the results of a users'
+ code must never be made (except in the case of clear bugs).
+2. Code changes that break users' code (i.e. the user will see a clear exception)
+ can be made, *provided the benefit is worth the cost* and suitable deprecation
+ warnings have been raised first.
+3. Deprecation warnings are in all cases warnings that functionality will be removed.
+ If there is no intent to remove functionlity, then deprecation in documentation
+ only or other types of warnings shall be used.
+4. Deprecations for stylistic reasons (e.g. consistency between functions) are
+ strongly discouraged.
+
+Deprecations:
+
+- shall include the version numbers of both when the functionality was deprecated
+ and when it will be removed (either two releases after the warning is
+ introduced, or in the next major version).
+- shall include information on alternatives to the deprecated functionality, or a
+ reason for the deprecation if no clear alternative is available.
+- shall use ``VisibleDeprecationWarning`` rather than ``DeprecationWarning``
+ for cases of relevance to end users (as opposed to cases only relevant to
+ libraries building on top of NumPy).
+- shall be listed in the release notes of the release where the deprecation happened.
+
+Removal of deprecated functionality:
+
+- shall be done after 2 releases (assuming a 6-monthly release cycle; if that changes,
+ there shall be at least 1 year between deprecation and removal), unless the
+ impact of the removal is such that a major version number increase is
+ warranted.
+- shall be listed in the release notes of the release where the removal happened.
+
+Versioning:
+
+- removal of deprecated code can be done in any minor (but not bugfix) release.
+- for heavily used functionality (e.g. removal of ``np.matrix``, of a whole submodule,
+ or significant changes to behavior for subclasses) the major version number shall
+ be increased.
+
+In concrete cases where this policy needs to be applied, decisions are made according
+to the `NumPy governance model
+<https://docs.scipy.org/doc/numpy/dev/governance/index.html>`_.
+
+Functionality with more strict policies:
+
+- ``numpy.random`` has its own backwards compatibility policy,
+ see `NEP 19 <http://www.numpy.org/neps/nep-0019-rng-policy.html>`_.
+- The file format for ``.npy`` and ``.npz`` files must not be changed in a backwards
+ incompatible way.
+
+
+Alternatives
+------------
+
+**Being more aggressive with deprecations.**
+
+The goal of being more aggressive is to allow NumPy to move forward faster.
+This would avoid others inventing their own solutions (often in multiple
+places), as well as be a benefit to users without a legacy code base. We
+reject this alternative because of the place NumPy has in the scientific Python
+ecosystem - being fairly conservative is required in order to not increase the
+extra maintenance for downstream libraries and end users to an unacceptable
+level.
+
+**Semantic versioning.**
+
+This would change the versioning scheme for code removals; those could then
+only be done when the major version number is increased. Rationale for
+rejection: semantic versioning is relatively common in software engineering,
+however it is not at all common in the Python world. Also, it would mean that
+NumPy's version number simply starts to increase faster, which would be more
+confusing than helpful. gh-10156 contains more discussion on this alternative.
+
+
+Discussion
+----------
+
+TODO
+
+This section may just be a bullet list including links to any discussions
+regarding the NEP:
+
+- This includes links to mailing list threads or relevant GitHub issues.
+
+
+References and Footnotes
+------------------------
+
+.. [1] TODO
+
+
+Copyright
+---------
+
+This document has been placed in the public domain. [1]_
diff --git a/doc/neps/nep-0024-missing-data-2.rst b/doc/neps/nep-0024-missing-data-2.rst
new file mode 100644
index 000000000..c8b19561f
--- /dev/null
+++ b/doc/neps/nep-0024-missing-data-2.rst
@@ -0,0 +1,210 @@
+=============================================================
+NEP 24 — Missing Data Functionality - Alternative 1 to NEP 12
+=============================================================
+
+:Author: Nathaniel J. Smith <njs@pobox.com>, Matthew Brett <matthew.brett@gmail.com>
+:Status: Deferred
+:Type: Standards Track
+:Created: 2011-06-30
+
+
+Abstract
+--------
+
+*Context: this NEP was written as an alternative to NEP 12, which at the time of writing
+had an implementation that was merged into the NumPy master branch.*
+
+The principle of this NEP is to separate the APIs for masking and for missing values, according to
+
+* The current implementation of masked arrays (NEP 12)
+* This proposal.
+
+This discussion is only of the API, and not of the implementation.
+
+Detailed description
+--------------------
+
+
+Rationale
+^^^^^^^^^
+
+The purpose of this aNEP is to define two interfaces -- one for handling
+'missing values', and one for handling 'masked arrays'.
+
+An ordinary value is something like an integer or a floating point number. A
+*missing* value is a placeholder for an ordinary value that is for some
+reason unavailable. For example, in working with statistical data, we often
+build tables in which each row represents one item, and each column
+represents properties of that item. For instance, we might take a group of
+people and for each one record height, age, education level, and income, and
+then stick these values into a table. But then we discover that our research
+assistant screwed up and forgot to record the age of one of our individuals.
+We could throw out the rest of their data as well, but this would be
+wasteful; even such an incomplete row is still perfectly usable for some
+analyses (e.g., we can compute the correlation of height and income). The
+traditional way to handle this would be to stick some particular meaningless
+value in for the missing data, e.g., recording this person's age as 0. But
+this is very error prone; we may later forget about these special values
+while running other analyses, and discover to our surprise that babies have
+higher incomes than teenagers. (In this case, the solution would be to just
+leave out all the items where we have no age recorded, but this isn't a
+general solution; many analyses require something more clever to handle
+missing values.) So instead of using an ordinary value like 0, we define a
+special "missing" value, written "NA" for "not available".
+
+Therefore, missing values have the following properties: Like any other
+value, they must be supported by your array's dtype -- you can't store a
+floating point number in an array with dtype=int32, and you can't store an NA
+in it either. You need an array with dtype=NAint32 or something (exact syntax
+to be determined). Otherwise, they act exactly like any other values. In
+particular, you can apply arithmetic functions and so forth to them. By
+default, any function which takes an NA as an argument always returns an NA
+as well, regardless of the values of the other arguments. This ensures that
+if we try to compute the correlation of income with age, we will get "NA",
+meaning "given that some of the entries could be anything, the answer could
+be anything as well". This reminds us to spend a moment thinking about how we
+should rephrase our question to be more meaningful. And as a convenience for
+those times when you do decide that you just want the correlation between the
+known ages and income, then you can enable this behavior by adding a single
+argument to your function call.
+
+For floating point computations, NAs and NaNs have (almost?) identical
+behavior. But they represent different things -- NaN an invalid computation
+like 0/0, NA a value that is not available -- and distinguishing between
+these things is useful because in some situations they should be treated
+differently. (For example, an imputation procedure should replace NAs with
+imputed values, but probably should leave NaNs alone.) And anyway, we can't
+use NaNs for integers, or strings, or booleans, so we need NA anyway, and
+once we have NA support for all these types, we might as well support it for
+floating point too for consistency.
+
+A masked array is, conceptually, an ordinary rectangular numpy array, which
+has had an arbitrarily-shaped mask placed over it. The result is,
+essentially, a non-rectangular view of a rectangular array. In principle,
+anything you can accomplish with a masked array could also be accomplished by
+explicitly keeping a regular array and a boolean mask array and using numpy
+indexing to combine them for each operation, but combining them into a single
+structure is much more convenient when you need to perform complex operations
+on the masked view of an array, while still being able to manipulate the mask
+in the usual ways. Therefore, masks are preserved through indexing, and
+functions generally treat masked-out values as if they were not even part of
+the array in the first place. (Maybe this is a good heuristic: a length-4
+array in which the last value has been masked out behaves just like an
+ordinary length-3 array, so long as you don't change the mask.) Except, of
+course, that you are free to manipulate the mask in arbitrary ways whenever
+you like; it's just a standard numpy array.
+
+There are some simple situations where one could use either of these tools to
+get the job done -- or other tools entirely, like using designated surrogate
+values (age=0), separate mask arrays, etc. But missing values are designed to
+be particularly helpful in situations where the missingness is an intrinsic
+feature of the data -- where there's a specific value that **should** exist,
+if it did exist we'd it'd mean something specific, but it **doesn't**. Masked
+arrays are designed to be particularly helpful in situations where we just
+want to temporarily ignore some data that does exist, or generally when we
+need to work with data that has a non-rectangular shape (e.g., if you make
+some measurement at each point on a grid laid over a circular agar dish, then
+the points that fall outside the dish aren't missing measurements, they're
+just meaningless).
+
+Initialization
+^^^^^^^^^^^^^^
+
+First, missing values can be set and be displayed as ``np.NA, NA``::
+
+ >>> np.array([1.0, 2.0, np.NA, 7.0], dtype='NA[f8]')
+ array([1., 2., NA, 7.], dtype='NA[<f8]')
+
+As the initialization is not ambiguous, this can be written without the NA
+dtype::
+
+ >>> np.array([1.0, 2.0, np.NA, 7.0])
+ array([1., 2., NA, 7.], dtype='NA[<f8]')
+
+Masked values can be set and be displayed as ``np.IGNORE, IGNORE``::
+
+ >>> np.array([1.0, 2.0, np.IGNORE, 7.0], masked=True)
+ array([1., 2., IGNORE, 7.], masked=True)
+
+As the initialization is not ambiguous, this can be written without
+``masked=True``::
+
+ >>> np.array([1.0, 2.0, np.IGNORE, 7.0])
+ array([1., 2., IGNORE, 7.], masked=True)
+
+Ufuncs
+^^^^^^
+
+By default, NA values propagate::
+
+ >>> na_arr = np.array([1.0, 2.0, np.NA, 7.0])
+ >>> np.sum(na_arr)
+ NA('float64')
+
+unless the ``skipna`` flag is set::
+
+ >>> np.sum(na_arr, skipna=True)
+ 10.0
+
+By default, masking does not propagate::
+
+ >>> masked_arr = np.array([1.0, 2.0, np.IGNORE, 7.0])
+ >>> np.sum(masked_arr)
+ 10.0
+
+unless the ``propmask`` flag is set::
+
+ >>> np.sum(masked_arr, propmask=True)
+ IGNORE
+
+An array can be masked, and contain NA values::
+
+ >>> both_arr = np.array([1.0, 2.0, np.IGNORE, np.NA, 7.0])
+
+In the default case, the behavior is obvious::
+
+ >>> np.sum(both_arr)
+ NA('float64')
+
+It's also obvious what to do with ``skipna=True``::
+
+ >>> np.sum(both_arr, skipna=True)
+ 10.0
+ >>> np.sum(both_arr, skipna=True, propmask=True)
+ IGNORE
+
+To break the tie between NA and MSK, NAs propagate harder::
+
+ >>> np.sum(both_arr, propmask=True)
+ NA('float64')
+
+Assignment
+^^^^^^^^^^
+
+is obvious in the NA case::
+
+ >>> arr = np.array([1.0, 2.0, 7.0])
+ >>> arr[2] = np.NA
+ TypeError('dtype does not support NA')
+ >>> na_arr = np.array([1.0, 2.0, 7.0], dtype='NA[f8]')
+ >>> na_arr[2] = np.NA
+ >>> na_arr
+ array([1., 2., NA], dtype='NA[<f8]')
+
+Direct assignnent in the masked case is magic and confusing, and so happens only
+via the mask::
+
+ >>> masked_array = np.array([1.0, 2.0, 7.0], masked=True)
+ >>> masked_arr[2] = np.NA
+ TypeError('dtype does not support NA')
+ >>> masked_arr[2] = np.IGNORE
+ TypeError('float() argument must be a string or a number')
+ >>> masked_arr.visible[2] = False
+ >>> masked_arr
+ array([1., 2., IGNORE], masked=True)
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0025-missing-data-3.rst b/doc/neps/nep-0025-missing-data-3.rst
new file mode 100644
index 000000000..6343759e8
--- /dev/null
+++ b/doc/neps/nep-0025-missing-data-3.rst
@@ -0,0 +1,469 @@
+======================================
+NEP 25 — NA support via special dtypes
+======================================
+
+:Author: Nathaniel J. Smith <njs@pobox.com>
+:Status: Deferred
+:Type: Standards Track
+:Created: 2011-07-08
+
+Abstract
+========
+
+*Context: this NEP was written as an additional alternative to NEP 12 (NEP 24
+is another alternative), which at the time of writing had an implementation
+that was merged into the NumPy master branch.*
+
+To try and make more progress on the whole missing values/masked arrays/...
+debate, it seems useful to have a more technical discussion of the pieces
+which we *can* agree on. This is the second, which attempts to nail down the
+details of how NAs can be implemented using special dtype's.
+
+Rationale
+---------
+
+An ordinary value is something like an integer or a floating point number. A
+missing value is a placeholder for an ordinary value that is for some reason
+unavailable. For example, in working with statistical data, we often build
+tables in which each row represents one item, and each column represents
+properties of that item. For instance, we might take a group of people and
+for each one record height, age, education level, and income, and then stick
+these values into a table. But then we discover that our research assistant
+screwed up and forgot to record the age of one of our individuals. We could
+throw out the rest of their data as well, but this would be wasteful; even
+such an incomplete row is still perfectly usable for some analyses (e.g., we
+can compute the correlation of height and income). The traditional way to
+handle this would be to stick some particular meaningless value in for the
+missing data,e.g., recording this person's age as 0. But this is very error
+prone; we may later forget about these special values while running other
+analyses, and discover to our surprise that babies have higher incomes than
+teenagers. (In this case, the solution would be to just leave out all the
+items where we have no age recorded, but this isn't a general solution; many
+analyses require something more clever to handle missing values.) So instead
+of using an ordinary value like 0, we define a special "missing" value,
+written "NA" for "not available".
+
+There are several possible ways to represent such a value in memory. For
+instance, we could reserve a specific value (like 0, or a particular NaN, or
+the smallest negative integer) and then ensure that this value is treated
+specially by all arithmetic and other operations on our array. Another option
+would be to add an additional mask array next to our main array, use this to
+indicate which values should be treated as NA, and then extend our array
+operations to check this mask array whenever performing computations. Each
+implementation approach has various strengths and weaknesses, but here we focus
+on the former (value-based) approach exclusively and leave the possible
+addition of the latter to future discussion. The core advantages of this
+approach are (1) it adds no additional memory overhead, (2) it is
+straightforward to store and retrieve such arrays to disk using existing file
+storage formats, (3) it allows binary compatibility with R arrays including NA
+values, (4) it is compatible with the common practice of using NaN to indicate
+missingness when working with floating point numbers, (5) the dtype is already
+a place where "weird things can happen" -- there are a wide variety of dtypes
+that don't act like ordinary numbers (including structs, Python objects,
+fixed-length strings, ...), so code that accepts arbitrary numpy arrays already
+has to be prepared to handle these (even if only by checking for them and
+raising an error). Therefore adding yet more new dtypes has less impact on
+extension authors than if we change the ndarray object itself.
+
+The basic semantics of NA values are as follows. Like any other value, they
+must be supported by your array's dtype -- you can't store a floating point
+number in an array with dtype=int32, and you can't store an NA in it either.
+You need an array with dtype=NAint32 or something (exact syntax to be
+determined). Otherwise, NA values act exactly like any other values. In
+particular, you can apply arithmetic functions and so forth to them. By
+default, any function which takes an NA as an argument always returns an NA as
+well, regardless of the values of the other arguments. This ensures that if we
+try to compute the correlation of income with age, we will get "NA", meaning
+"given that some of the entries could be anything, the answer could be anything
+as well". This reminds us to spend a moment thinking about how we should
+rephrase our question to be more meaningful. And as a convenience for those
+times when you do decide that you just want the correlation between the known
+ages and income, then you can enable this behavior by adding a single argument
+to your function call.
+
+For floating point computations, NAs and NaNs have (almost?) identical
+behavior. But they represent different things -- NaN an invalid computation
+like 0/0, NA a value that is not available -- and distinguishing between these
+things is useful because in some situations they should be treated differently.
+(For example, an imputation procedure should replace NAs with imputed values,
+but probably should leave NaNs alone.) And anyway, we can't use NaNs for
+integers, or strings, or booleans, so we need NA anyway, and once we have NA
+support for all these types, we might as well support it for floating point too
+for consistency.
+
+General strategy
+================
+
+Numpy already has a general mechanism for defining new dtypes and slotting them
+in so that they're supported by ndarrays, by the casting machinery, by ufuncs,
+and so on. In principle, we could implement NA-dtypes just using these existing
+interfaces. But we don't want to do that, because defining all those new ufunc
+loops etc. from scratch would be a huge hassle, especially since the basic
+functionality needed is the same in all cases. So we need some generic
+functionality for NAs -- but it would be better not to bake this in as a single
+set of special "NA types", since users may well want to define new custom
+dtypes that have their own NA values, and have them integrate well the rest of
+the NA machinery. Our strategy, therefore, is to avoid the `mid-layer mistake`_
+by exposing some code for generic NA handling in different situations, which
+dtypes can selectively use or not as they choose.
+
+.. _mid-layer mistake: https://lwn.net/Articles/336262/
+
+Some example use cases:
+ 1. We want to define a dtype that acts exactly like an int32, except that the
+ most negative value is treated as NA.
+ 2. We want to define a parametrized dtype to represent `categorical data`_,
+ and the bit-pattern to be used for NA depends on the number of categories
+ defined, so our code needs to play an active role handling it rather than
+ simply deferring to the standard machinery.
+ 3. We want to define a dtype that acts like an length-10 string and supports
+ NAs. Since our string may hold arbitrary binary values, we want to actually
+ allocate 11 bytes for it, with the first byte a flag indicating whether this
+ string is NA and the rest containing the string content.
+ 4. We want to define a dtype that allows multiple different types of NA data,
+ which print differently and can be distinguished by the new ufunc that we
+ define called ``is_na_of_type(...)``, but otherwise takes advantage of the
+ generic NA machinery for most operations.
+
+.. _categorical data: http://mail.scipy.org/pipermail/numpy-discussion/2010-August/052401.html
+
+dtype C-level API extensions
+============================
+
+The `PyArray_Descr`_ struct gains the following new fields::
+
+ void * NA_value;
+ PyArray_Descr * NA_extends;
+ int NA_extends_offset;
+
+.. _PyArray_Descr: http://docs.scipy.org/doc/numpy/reference/c-api.types-and-structures.html#PyArray_Descr
+
+The following new flag values are defined::
+
+ NPY_NA_AUTO_ARRFUNCS
+ NPY_NA_AUTO_CAST
+ NPY_NA_AUTO_UFUNC
+ NPY_NA_AUTO_UFUNC_CHECKED
+ NPY_NA_AUTO_ALL /* the above flags OR'ed together */
+
+The `PyArray_ArrFuncs`_ struct gains the following new fields::
+
+ void (*isna)(void * src, void * dst, npy_intp n, void * arr);
+ void (*clearna)(void * data, npy_intp n, void * arr);
+
+.. _PyArray_ArrFuncs: http://docs.scipy.org/doc/numpy/reference/c-api.types-and-structures.html#PyArray_ArrFuncs
+
+We add at least one new convenience macro::
+
+ #define NPY_NA_SUPPORTED(dtype) ((dtype)->f->isna != NULL)
+
+The general idea is that anywhere where we used to call a dtype-specific
+function pointer, the code will be modified to instead:
+
+ 1. Check for whether the relevant ``NPY_NA_AUTO_...`` bit is enabled, the
+ NA_extends field is non-NULL, and the function pointer we wanted to call
+ is NULL.
+ 2. If these conditions are met, then use ``isna`` to identify which entries
+ in the array are NA, and handle them appropriately. Then look up whatever
+ function we were *going* to call using this dtype on the ``NA_extends``
+ dtype instead, and use that to handle the non-NA elements.
+
+For more specifics, see following sections.
+
+Note that if ``NA_extends`` points to a parametrized dtype, then the dtype
+object it points to must be fully specified. For example, if it is a string
+dtype, it must have a non-zero ``elsize`` field.
+
+In order to handle the case where the NA information is stored in a field next
+to the `real' data, the ``NA_extends_offset`` field is set to a non-zero value;
+it must point to the location within each element of this dtype where some data
+of the ``NA_extends`` dtype is found. For example, if we have are storing
+10-byte strings with an NA indicator byte at the beginning, then we have::
+
+ elsize == 11
+ NA_extends_offset == 1
+ NA_extends->elsize == 10
+
+When delegating to the ``NA_extends`` dtype, we offset our data pointer by
+``NA_extends_offset`` (while keeping our strides the same) so that it sees an
+array of data of the expected type (plus some superfluous padding). This is
+basically the same mechanism that record dtypes use, IIUC, so it should be
+pretty well-tested.
+
+When delegating to a function that cannot handle "misbehaved" source data (see
+the ``PyArray_ArrFuncs`` documentation for details), then we need to check for
+alignment issues before delegating (especially with a non-zero
+``NA_extends_offset``). If there's a problem, when we need to "clean up" the
+source data first, using the usual mechanisms for handling misaligned data. (Of
+course, we should usually set up our dtypes so that there aren't any alignment
+issues, but someone screws that up, or decides that reduced memory usage is
+more important to them then fast inner loops, then we should still handle that
+gracefully, as we do now.)
+
+The ``NA_value`` and ``clearna`` fields are used for various sorts of casting.
+``NA_value`` is a bit-pattern to be used when, for example, assigning from
+np.NA. ``clearna`` can be a no-op if ``elsize`` and ``NA_extends->elsize`` are
+the same, but if they aren't then it should clear whatever auxiliary NA storage
+this dtype uses, so that none of the specified array elements are NA.
+
+Core dtype functions
+--------------------
+
+The following functions are defined in ``PyArray_ArrFuncs``. The special
+behavior described here is enabled by the NPY_NA_AUTO_ARRFUNCS bit in the dtype
+flags, and only enabled if the given function field is *not* filled in.
+
+``getitem``: Calls ``isna``. If ``isna`` returns true, returns np.NA.
+Otherwise, delegates to the ``NA_extends`` dtype.
+
+``setitem``: If the input object is ``np.NA``, then runs
+``memcpy(self->NA_value, data, arr->dtype->elsize);``. Otherwise, calls
+``clearna``, and then delegates to the ``NA_extends`` dtype.
+
+``copyswapn``, ``copyswap``: FIXME: Not sure whether there's any special
+handling to use for these?
+
+``compare``: FIXME: how should this handle NAs? R's sort function *discards*
+NAs, which doesn't seem like a good option.
+
+``argmax``: FIXME: what is this used for? If it's the underlying implementation
+for np.max, then it really needs some way to get a skipna argument. If not,
+then the appropriate semantics depends on what it's supposed to accomplish...
+
+``dotfunc``: QUESTION: is it actually guaranteed that everything has the same
+dtype? FIXME: same issues as for ``argmax``.
+
+``scanfunc``: This one's ugly. We may have to explicitly override it in all of
+our special dtypes, because assuming that we want the option of, say, having
+the token "NA" represent an NA value in a text file, we need some way to check
+whether that's there before delegating. But ``ungetc`` is only guaranteed to
+let us put back 1 character, and we need 2 (or maybe 3 if we actually check for
+"NA "). The other option would be to read to the next delimiter, check whether
+we have an NA, and if not then delegate to ``fromstr`` instead of ``scanfunc``,
+but according to the current API, each dtype might in principle use a totally
+different rule for defining "the next delimiter". So... any ideas? (FIXME)
+
+``fromstr``: Easy -- check for "NA ", if present then assign ``NA_value``,
+otherwise call ``clearna`` and delegate.
+
+``nonzero``: FIXME: again, what is this used for? (It seems redundant with
+using the casting machinery to cast to bool.) Probably it needs to be modified
+so that it can return NA, though...
+
+``fill``: Use ``isna`` to check if either of the first two values is NA. If so,
+then fill the rest of the array with ``NA_value``. Otherwise, call ``clearna``
+and then delegate.
+
+``fillwithvalue``: Guess this can just delegate?
+
+``sort``, ``argsort``: These should probably arrange to sort NAs to a
+particular place in the array (either the front or the back -- any opinions?)
+
+``scalarkind``: FIXME: I have no idea what this does.
+
+``castdict``, ``cancastscalarkindto``, ``cancastto``: See section on casting
+below.
+
+Casting
+-------
+
+FIXME: this really needs attention from an expert on numpy's casting rules. But
+I can't seem to find the docs that explain how casting loops are looked up and
+decided between (e.g., if you're casting from dtype A to dtype B, which dtype's
+loops are used?), so I can't go into details. But those details are tricky and
+they matter...
+
+But the general idea is, if you have a dtype with ``NPY_NA_AUTO_CAST`` set,
+then the following conversions are automatically allowed:
+
+ * Casting from the underlying type to the NA-type: this is performed by the
+ * usual ``clearna`` + potentially-strided copy dance. Also, ``isna`` is
+ * called to check that none of the regular values have been accidentally
+ * converted into NA; if so, then an error is raised.
+ * Casting from the NA-type to the underlying type: allowed in principle, but
+ if ``isna`` returns true for any of the values that are to be converted,
+ then again, an error is raised. (If you want to get around this, use
+ ``np.view(array_with_NAs, dtype=float)``.)
+ * Casting between the NA-type and other types that do not support NA: this is
+ allowed if the underlying type is allowed to cast to the other type, and is
+ performed by combining a cast to or from the underlying type (using the
+ above rules) with a cast to or from the other type (using the underlying
+ type's rules).
+ * Casting between the NA-type and other types that do support NA: if the
+ other type has NPY_NA_AUTO_CAST set, then we use the above rules plus the
+ usual dance with ``isna`` on one array being converted to ``NA_value``
+ elements in the other. If only one of the arrays has NPY_NA_AUTO_CAST set,
+ then it's assumed that that dtype knows what it's doing, and we don't do
+ any magic. (But this is one of the things that I'm not sure makes sense, as
+ per my caveat above.)
+
+Ufuncs
+------
+
+All ufuncs gain an additional optional keyword argument, ``skipNA=``, which
+defaults to False.
+
+If ``skipNA == True``, then the ufunc machinery *unconditionally* calls
+``isna`` for any dtype where NPY_NA_SUPPORTED(dtype) is true, and then acts as
+if any values for which isna returns True were masked out in the ``where=``
+argument (see miniNEP 1 for the behavior of ``where=``). If a ``where=``
+argument is also given, then it acts as if the ``isna`` values had be ANDed out
+of the ``where=`` mask, though it does not actually modify the mask. Unlike the
+other changes below, this is performed *unconditionally* for any dtype which
+has an ``isna`` function defined; the NPY_NA_AUTO_UFUNC flag is *not* checked.
+
+If NPY_NA_AUTO_UFUNC is set, then ufunc loop lookup is modified so that
+whenever it checks for the existence of a loop on the current dtype, and does
+not find one, then it also checks for a loop on the ``NA_extends`` dtype. If
+that loop is found, then it uses it in the normal way, with the exceptions that
+(1) it is only called for values which are not NA according to ``isna``, (2) if
+the output array has NPY_NA_AUTO_UFUNC set, then ``clearna`` is called on it
+before calling the ufunc loop, (3) pointer offsets are adjusted by
+``NA_extends_offset`` before calling the ufunc loop. In addition, if
+NPY_NA_AUTO_UFUNC_CHECK is set, then after evaluating the ufunc loop we call
+``isna`` on the *output* array, and if there are any NAs in the output which
+were not in the input, then we raise an error. (The intention of this is to
+catch cases where, say, we represent NA using the most-negative integer, and
+then someone's arithmetic overflows to create such a value by accident.)
+
+FIXME: We should go into more detail here about how NPY_NA_AUTO_UFUNC works
+when there are multiple input arrays, of which potentially some have the flag
+set and some do not.
+
+Printing
+--------
+
+FIXME: There should be some sort of mechanism by which values which are NA are
+automatically repr'ed as NA, but I don't really understand how numpy printing
+works, so I'll let someone else fill in this section.
+
+Indexing
+--------
+
+Scalar indexing like ``a[12]`` goes via the ``getitem`` function, so according
+to the proposal as described above, if a dtype delegates ``getitem``, then
+scalar indexing on NAs will return the object ``np.NA``. (If it doesn't
+delegate ``getitem``, of course, then it can return whatever it wants.)
+
+This seems like the simplest approach, but an alternative would be to add a
+special case to scalar indexing, where if an ``NPY_NA_AUTO_INDEX`` flag were
+set, then it would call ``isna`` on the specified element. If this returned
+false, it would call ``getitem`` as usual; otherwise, it would return a 0-d
+array containing the specified element. The problem with this is that it breaks
+expressions like ``if a[i] is np.NA: ...``. (Of course, there is nothing nearly
+so convenient as that for NaN values now, but then, NaN values don't have their
+own global singleton.) So for now we stick to scalar indexing just returning
+``np.NA``, but this can be revisited if anyone objects.
+
+Python API for generic NA support
+=================================
+
+NumPy will gain a global singleton called numpy.NA, similar to None, but with
+semantics reflecting its status as a missing value. In particular, trying to
+treat it as a boolean will raise an exception, and comparisons with it will
+produce numpy.NA instead of True or False. These basics are adopted from the
+behavior of the NA value in the R project. To dig deeper into the ideas,
+http://en.wikipedia.org/wiki/Ternary_logic#Kleene_logic provides a starting
+point.
+
+Most operations on ``np.NA`` (e.g., ``__add__``, ``__mul__``) are overridden to
+unconditionally return ``np.NA``.
+
+The automagic dtype detection used for expressions like ``np.asarray([1, 2,
+3])``, ``np.asarray([1.0, 2.0. 3.0])`` will be extended to recognize the
+``np.NA`` value, and use it to automatically switch to a built-in NA-enabled
+dtype (which one being determined by the other elements in the array). A simple
+``np.asarray([np.NA])`` will use an NA-enabled float64 dtype (which is
+analogous to what you get from ``np.asarray([])``). Note that this means that
+expressions like ``np.log(np.NA)`` will work: first ``np.NA`` will be coerced
+to a 0-d NA-float array, and then ``np.log`` will be called on that.
+
+Python-level dtype objects gain the following new fields::
+
+ NA_supported
+ NA_value
+
+``NA_supported`` is a boolean which simply exposes the value of the
+``NPY_NA_SUPPORTED`` flag; it should be true if this dtype allows for NAs,
+false otherwise. [FIXME: would it be better to just key this off the existence
+of the ``isna`` function? Even if a dtype decides to implement all other NA
+handling itself, it still has to define ``isna`` in order to make ``skipNA=``
+work correctly.]
+
+``NA_value`` is a 0-d array of the given dtype, and its sole element contains
+the same bit-pattern as the dtype's underlying ``NA_value`` field. This makes
+it possible to determine the default bit-pattern for NA values for this type
+(e.g., with ``np.view(mydtype.NA_value, dtype=int8)``).
+
+We *do not* expose the ``NA_extends`` and ``NA_extends_offset`` values at the
+Python level, at least for now; they're considered an implementation detail
+(and it's easier to expose them later if they're needed then unexpose them if
+they aren't).
+
+Two new ufuncs are defined: ``np.isNA`` returns a logical array, with true
+values where-ever the dtype's ``isna`` function returned true. ``np.isnumber``
+is only defined for numeric dtypes, and returns True for all elements which are
+not NA, and for which ``np.isfinite`` would return True.
+
+Builtin NA dtypes
+=================
+
+The above describes the generic machinery for NA support in dtypes. It's
+flexible enough to handle all sorts of situations, but we also want to define a
+few generally useful NA-supporting dtypes that are available by default.
+
+For each built-in dtype, we define an associated NA-supporting dtype, as
+follows:
+
+* floats: the associated dtype uses a specific NaN bit-pattern to indicate NA
+ (chosen for R compatibility)
+* complex: we do whatever R does (FIXME: look this up -- two NA floats,
+ probably?)
+* signed integers: the most-negative signed value is used as NA (chosen for R
+ compatibility)
+* unsigned integers: the most-positive value is used as NA (no R compatibility
+ possible).
+* strings: the first byte (or, in the case of unicode strings, first 4 bytes)
+ is used as a flag to indicate NA, and the rest of the data gives the actual
+ string. (no R compatibility possible)
+* objects: Two options (FIXME): either we don't include an NA-ful version, or
+ we use np.NA as the NA bit pattern.
+* boolean: we do whatever R does (FIXME: look this up -- 0 == FALSE, 1 == TRUE,
+ 2 == NA?)
+
+Each of these dtypes is trivially defined using the above machinery, and are
+what are automatically used by the automagic type inference machinery (for
+``np.asarray([True, np.NA, False])``, etc.).
+
+They can also be accessed via a new function ``np.withNA``, which takes a
+regular dtype (or an object that can be coerced to a dtype, like 'float') and
+returns one of the above dtypes. Ideally ``withNA`` should also take some
+optional arguments that let you describe which values you want to count as NA,
+etc., but I'll leave that for a future draft (FIXME).
+
+FIXME: If ``d`` is one of the above dtypes, then should ``d.type`` return?
+
+The NEP also contains a proposal for a somewhat elaborate
+domain-specific-language for describing NA dtypes. I'm not sure how great an
+idea that is. (I have a bias against using strings as data structures, and find
+the already existing strings confusing enough as it is -- also, apparently the
+NEP version of numpy uses strings like 'f8' when printing dtypes, while my
+numpy uses object names like 'float64', so I'm not sure what's going on there.
+``withNA(float64, arg1=value1)`` seems like a more pleasant way to print a
+dtype than "NA[f8,value1]", at least to me.) But if people want it, then cool.
+
+Type hierarchy
+--------------
+
+FIXME: how should we do subtype checks, etc., for NA dtypes? What does
+``issubdtype(withNA(float), float)`` return? How about
+``issubdtype(withNA(float), np.floating)``?
+
+Serialization
+-------------
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0026-missing-data-summary.rst b/doc/neps/nep-0026-missing-data-summary.rst
new file mode 100644
index 000000000..e99138cdd
--- /dev/null
+++ b/doc/neps/nep-0026-missing-data-summary.rst
@@ -0,0 +1,730 @@
+====================================================
+NEP 26 — Summary of Missing Data NEPs and discussion
+====================================================
+
+:Author: Mark Wiebe <mwwiebe@gmail.com>, Nathaniel J. Smith <njs@pobox.com>
+:Status: Deferred
+:Type: Standards Track
+:Created: 2012-04-22
+
+*Context*: this NEP was written as summary of the large number of discussions
+and proposals (`NEP 12`_, `NEP 24`_, `NEP 25`_), regarding missing data
+functionality.
+
+The debate about how NumPy should handle missing data, a subject with
+many preexisting approaches, requirements, and conventions, has been long and
+contentious. There has been more than one proposal for how to implement
+support into NumPy, and there is a testable implementation which is
+merged into NumPy's current master. The vast number of emails and differing
+points of view has made it difficult for interested parties to understand
+the issues and be comfortable with the direction NumPy is going.
+
+Here is our (Mark and Nathaniel's) attempt to summarize the
+problem, proposals, and points of agreement/disagreement in a single
+place, to help the community move towards consensus.
+
+The NumPy developers' problem
+=============================
+
+For this discussion, "missing data" means array elements
+which can be indexed (e.g. A[3] in an array A with shape (5,)),
+but have, in some sense, no value.
+
+It does not refer to compressed or sparse storage techniques where
+the value for A[3] is not actually stored in memory, but still has a
+well-defined value like 0.
+
+This is still vague, and to create an actual implementation,
+it is necessary to answer such questions as:
+
+* What values are computed when doing element-wise ufuncs.
+* What values are computed when doing reductions.
+* Whether the storage for an element gets overwritten when marking
+ that value missing.
+* Whether computations resulting in NaN automatically treat in the
+ same way as a missing value.
+* Whether one interacts with missing values using a placeholder object
+ (e.g. called "NA" or "masked"), or through a separate boolean array.
+* Whether there is such a thing as an array object that cannot hold
+ missing array elements.
+* How the (C and Python) API is expressed, in terms of dtypes,
+ masks, and other constructs.
+* If we decide to answer some of these questions in multiple ways,
+ then that creates the question of whether that requires multiple
+ systems, and if so how they should interact.
+
+There's clearly a very large space of missing-data APIs that *could*
+be implemented. There is likely at least one user, somewhere, who
+would find any possible implementation to be just the thing they
+need to solve some problem. On the other hand, much of NumPy's power
+and clarity comes from having a small number of orthogonal concepts,
+such as strided arrays, flexible indexing, broadcasting, and ufuncs,
+and we'd like to preserve that simplicity.
+
+There has been dissatisfaction among several major groups of NumPy users
+about the existing status quo of missing data support. In particular,
+neither the numpy.ma component nor use of floating-point NaNs as a
+missing data signal fully satisfy the performance requirements and
+ease of use for these users. The example of R, where missing data
+is treated via an NA placeholder and is deeply integrated into all
+computation, is where many of these users point to indicate what
+functionality they would like. Doing a deep integration of missing
+data like in R must be considered carefully, it must be clear it
+is not being done in a way which sacrifices existing performance
+or functionality.
+
+Our problem is, how can we choose some incremental additions to
+NumPy that will make a large class of users happy, be
+reasonably elegant, complement the existing design, and that we're
+comfortable we won't regret being stuck with in the long term.
+
+Prior art
+=========
+
+So a major (maybe *the* major) problem is figuring out how ambitious
+the project to add missing data support to NumPy should be, and which
+kinds of problems are in scope. Let's start with the
+best understood situation where "missing data" comes into play:
+
+"Statistical missing data"
+--------------------------
+
+In statistics, social science, etc., "missing data" is a term of art
+referring to a specific (but extremely common and important)
+situation: we have tried to gather some measurements according to some
+scheme, but some of these measurements are missing. For example, if we
+have a table listing the height, age, and income of a number of
+individuals, but one person did not provide their income, then we need
+some way to represent this::
+
+ Person | Height | Age | Income
+ ------------------------------
+ 1 | 63 | 25 | 15000
+ 2 | 58 | 32 | <missing>
+ 3 | 71 | 45 | 30000
+
+The traditional way is to record that income as, say, "-99", and
+document this in the README along with the data set. Then, you have to
+remember to check for and handle such incomes specially; if you
+forget, you'll get superficially reasonable but completely incorrect
+results, like calculating the average income on this data set as
+14967. If you're in one of these fields, then such missing-ness is
+routine and inescapable, and if you use the "-99" approach then it's a
+pitfall you have to remember to check for explicitly on literally
+*every* calculation you ever do. This is, obviously, an unpleasant way
+to live.
+
+Let's call this situation the "statistical missing data" situation,
+just to have a convenient handle for it. (As mentioned, practitioners
+just call this "missing data", and what to do about it is literally an
+entire sub-field of statistics; if you google "missing data" then
+every reference is on how to handle it.) NumPy isn't going to do
+automatic imputation or anything like that, but it could help a great
+deal by providing some standard way to at least represent data which
+is missing in this sense.
+
+The main prior art for how this could be done comes from the S/S+/R
+family of languages. Their strategy is, for each type they support,
+to define a special value called "NA". (For ints this is INT_MAX,
+for floats it's a special NaN value that's distinguishable from
+other NaNs, ...) Then, they arrange that in computations, this
+value has a special semantics that we will call "NA semantics".
+
+NA Semantics
+------------
+
+The idea of NA semantics is that any computations involving NA
+values should be consistent with what would have happened if we
+had known the correct value.
+
+For example, let's say we want to compute the mean income, how might
+we do this? One way would be to just ignore the missing entry, and
+compute the mean of the remaining entries. This gives us (15000 +
+30000)/2, or 22500.
+
+Is this result consistent with discovering the income of person 2?
+Let's say we find out that person 2's income is 50000. This means
+the correct answer is (15000 + 50000 + 30000)/3, or 31666.67,
+indicating clearly that it is not consistent. Therefore, the mean
+income is NA, i.e. a specific number whose value we are unable
+to compute.
+
+This motivates the following rules, which are how R implements NA:
+
+Assignment:
+ NA values are understood to represent specific
+ unknown values, and thus should have value-like semantics with
+ respect to assignment and other basic data manipulation
+ operations. Code which does not actually look at the values involved
+ should work the same regardless of whether some of them are
+ missing. For example, one might write::
+
+ income[:] = income[np.argsort(height)]
+
+ to perform an in-place sort of the ``income`` array, and know that
+ the shortest person's income would end up being first. It turns out
+ that the shortest person's income is not known, so the array should
+ end up being ``[NA, 15000, 30000]``, but there's nothing
+ special about NAness here.
+
+Propagation:
+ In the example above, we concluded that an operation like ``mean``
+ should produce NA when one of its data values was NA.
+ If you ask me, "what is 3 plus x?", then my only possible answer is
+ "I don't know what x is, so I don't know what 3 + x is either". NA
+ means "I don't know", so 3 + NA is NA.
+
+ This is important for safety when analyzing data: missing data often
+ requires special handling for correctness -- the fact that you are
+ missing information might mean that something you wanted to compute
+ cannot actually be computed, and there are whole books written on
+ how to compensate in various situations. Plus, it's easy to not
+ realize that you have missing data, and write code that assumes you
+ have all the data. Such code should not silently produce the wrong
+ answer.
+
+ There is an important exception to characterizing this as propagation,
+ in the case of boolean values. Consider the calculation::
+
+ v = np.any([False, False, NA, True])
+
+ If we strictly propagate, ``v`` will become NA. However, no
+ matter whether we place True or False into the third array position,
+ ``v`` will then get the value True. The answer to the question
+ "Is the result True consistent with later discovering the value
+ that was missing?" is yes, so it is reasonable to not propagate here,
+ and instead return the value True. This is what R does::
+
+ > any(c(F, F, NA, T))
+ [1] TRUE
+ > any(c(F, F, NA, F))
+ [1] NA
+
+Other:
+ NaN and NA are conceptually distinct. 0.0/0.0 is not a mysterious,
+ unknown value -- it's defined to be NaN by IEEE floating point, Not
+ a Number. NAs are numbers (or strings, or whatever), just unknown
+ ones. Another small but important difference is that in Python, ``if
+ NaN: ...`` treats NaN as True (NaN is "truthy"); but ``if NA: ...``
+ would be an error.
+
+ In R, all reduction operations implement an alternative semantics,
+ activated by passing a special argument (``na.rm=TRUE`` in R).
+ ``sum(a)`` means "give me the sum of all the
+ values" (which is NA if some of the values are NA);
+ ``sum(a, na.rm=True)`` means "give me the sum of all the non-NA
+ values".
+
+Other prior art
+---------------
+
+Once we move beyond the "statistical missing data" case, the correct
+behavior for missing data becomes less clearly defined. There are many
+cases where specific elements are singled out to be treated specially
+or excluded from computations, and these could often be conceptualized
+as involving 'missing data' in some sense.
+
+In image processing, it's common to use a single image together with
+one or more boolean masks to e.g. composite subsets of an image. As
+Joe Harrington pointed out on the list, in the context of processing
+astronomical images, it's also common to generalize to a
+floating-point valued mask, or alpha channel, to indicate degrees of
+"missingness". We think this is out of scope for the present design,
+but it is an important use case, and ideally NumPy should support
+natural ways of manipulating such data.
+
+After R, numpy.ma is probably the most mature source of
+experience on missing-data-related APIs. Its design is quite different
+from R; it uses different semantics -- reductions skip masked values
+by default and NaNs convert to masked -- and it uses a different
+storage strategy via a separate mask. While it seems to be generally
+considered sub-optimal for general use, it's hard to pin down whether
+this is because the API is immature but basically good, or the API
+is fundamentally broken, or the API is great but the code should be
+faster, or what. We looked at some of those users to try and get a
+better idea.
+
+Matplotlib is perhaps the best known package to rely on numpy.ma. It
+seems to use it in two ways. One is as a way for users to indicate
+what data is missing when passing it to be graphed. (Other ways are
+also supported, e.g., passing in NaN values gives the same result.) In
+this regard, matplotlib treats np.ma.masked and NaN values in the same way
+that R's plotting routines handle NA and NaN values. For these purposes,
+matplotlib doesn't really care what semantics or storage strategy is
+used for missing data.
+
+Internally, matplotlib uses numpy.ma arrays to store and pass around
+separately computed boolean masks containing 'validity' information
+for each input array in a cheap and non-destructive fashion. Mark's
+impression from some shallow code review is that mostly it works
+directly with the data and mask attributes of the masked arrays,
+not extensively using the particular computational semantics of
+numpy.ma. So, for this usage they do rely on the non-destructive
+mask-based storage, but this doesn't say much about what semantics
+are needed.
+
+Paul Hobson `posted some code`__ on the list that uses numpy.ma for
+storing arrays of contaminant concentration measurements. Here the
+mask indicates whether the corresponding number represents an actual
+measurement, or just the estimated detection limit for a concentration
+which was too small to detect. Nathaniel's impression from reading
+through this code is that it also mostly uses the .data and .mask
+attributes in preference to performing operations on the MaskedArray
+directly.
+
+__ https://mail.scipy.org/pipermail/numpy-discussion/2012-April/061743.html
+
+So, these examples make it clear that there is demand for a convenient
+way to keep a data array and a mask array (or even a floating point
+array) bundled up together and "aligned". But they don't tell us much
+about what semantics the resulting object should have with respect to
+ufuncs and friends.
+
+Semantics, storage, API, oh my!
+===============================
+
+We think it's useful to draw a clear line between use cases,
+semantics, and storage. Use cases are situations that users encounter,
+regardless of what NumPy does; they're the focus of the previous
+section. When we say *semantics*, we mean the result of different
+operations as viewed from the Python level without regard to the
+underlying implementation.
+
+*NA semantics* are the ones described above and used by R::
+
+ 1 + NA = NA
+ sum([1, 2, NA]) = NA
+ NA | False = NA
+ NA | True = True
+
+With ``na.rm=TRUE`` or ``skipNA=True``, this switches to::
+
+ 1 + NA = illegal # in R, only reductions take na.rm argument
+ sum([1, 2, NA], skipNA=True) = 3
+
+There's also been discussion of what we'll call *ignore
+semantics*. These are somewhat underdefined::
+
+ sum([1, 2, IGNORED]) = 3
+ # Several options here:
+ 1 + IGNORED = 1
+ # or
+ 1 + IGNORED = <leaves output array untouched>
+ # or
+ 1 + IGNORED = IGNORED
+
+The numpy.ma semantics are::
+
+ sum([1, 2, masked]) = 3
+ 1 + masked = masked
+
+If either NA or ignore semantics are implemented with masks, then there
+is a choice of what should be done to the value in the storage
+for an array element which gets assigned a missing value. Three
+possibilities are:
+
+* Leave that memory untouched (the choice made in the NEP).
+* Do the calculation with the values independently of the mask
+ (perhaps the most useful option for Paul Hobson's use-case above).
+* Copy whatever value is stored behind the input missing value into
+ the output (this is what numpy.ma does. Even that is ambiguous in
+ the case of ``masked + masked`` -- in this case numpy.ma copies the
+ value stored behind the leftmost masked value).
+
+When we talk about *storage*, we mean the debate about whether missing
+values should be represented by designating a particular value of the
+underlying data-type (the *bitpattern dtype* option, as used in R), or
+by using a separate *mask* stored alongside the data itself.
+
+For mask-based storage, there is also an important question about what
+the API looks like for accessing the mask, modifying the mask, and
+"peeking behind" the mask.
+
+Designs that have been proposed
+===============================
+
+One option is to just copy R, by implementing a mechanism whereby
+dtypes can arrange for certain bitpatterns to be given NA semantics.
+
+One option is to copy numpy.ma closely, but with a more optimized
+implementation. (Or to simply optimize the existing implementation.)
+
+One option is that described in `NEP 12`_, for which an implementation
+of mask-based missing data exists. This system is roughly:
+
+* There is both bitpattern and mask-based missing data, and both
+ have identical interoperable NA semantics.
+* Masks are modified by assigning np.NA or values to array elements.
+ The way to peek behind the mask or to unmask values is to keep a
+ view of the array that shares the data pointer but not the mask pointer.
+* Mark would like to add a way to access and manipulate the mask more
+ directly, to be used in addition to this view-based API.
+* If an array has both a bitpattern dtype and a mask, then assigning
+ np.NA writes to the mask, rather than to the array itself. Writing
+ a bitpattern NA to an array which supports both requires accessing
+ the data by "peeking under the mask".
+
+Another option is that described in `NEP 24`_, which is to implement
+bitpattern dtypes with NA semantics for the "statistical missing data"
+use case, and to also implement a totally independent API for masked
+arrays with ignore semantics and all mask manipulation done explicitly
+through a .mask attribute.
+
+Another option would be to define a minimalist aligned array container
+that holds multiple arrays and that can be used to pass them around
+together. It would support indexing (to help with the common problem
+of wanting to subset several arrays together without their becoming
+unaligned), but all arithmetic etc. would be done by accessing the
+underlying arrays directly via attributes. The "prior art" discussion
+above suggests that something like this holding a .data and a .mask
+array might actually be solve a number of people's problems without
+requiring any major architectural changes to NumPy. This is similar to
+a structured array, but with each field in a separately stored array
+instead of packed together.
+
+Several people have suggested that there should be a single system
+that has multiple missing values that each have different semantics,
+e.g., a MISSING value that has NA semantics, and a separate IGNORED
+value that has ignored semantics.
+
+None of these options are necessarily exclusive.
+
+The debate
+==========
+
+We both are dubious of using ignored semantics as a default missing
+data behavior. **Nathaniel** likes NA semantics because he is most
+interested in the "statistical missing data" use case, and NA semantics
+are exactly right for that. **Mark** isn't as interested in that use
+case in particular, but he likes the NA computational abstraction
+because it is unambiguous and well-defined in all cases, and has a
+lot of existing experience to draw from.
+
+What **Nathaniel** thinks, overall:
+
+* The "statistical missing data" use case is clear and compelling; the
+ other use cases certainly deserve our attention, but it's hard to say what
+ they *are* exactly yet, or even if the best way to support them is
+ by extending the ndarray object.
+* The "statistical missing data" use case is best served by an R-style
+ system that uses bitpattern storage to implement NA semantics. The
+ main advantage of bitpattern storage for this use case is that it
+ avoids the extra memory and speed overhead of storing and checking a
+ mask (especially for the common case of floating point data, where
+ some tricks with NaNs allow us to effectively hardware-accelerate
+ most NA operations). These concerns alone appears to make a
+ mask-based implementation unacceptable to many NA users,
+ particularly in areas like neuroscience (where memory is tight) or
+ financial modeling (where milliseconds are critical). In addition,
+ the bit-pattern approach is less confusing conceptually (e.g.,
+ assignment really is just assignment, no magic going on behind the
+ curtain), and it's possible to have in-memory compatibility with R
+ for inter-language calls via rpy2. The main disadvantage of the
+ bitpattern approach is the need to give up a value to represent NA,
+ but this is not an issue for the most important data types (float,
+ bool, strings, enums, objects); really, only integers are
+ affected. And even for integers, giving up a value doesn't really
+ matter for statistical problems. (Occupy Wall Street
+ notwithstanding, no-one's income is 2**63 - 1. And if it were, we'd
+ be switching to floats anyway to avoid overflow.)
+* Adding new dtypes requires some cooperation with the ufunc and
+ casting machinery, but doesn't require any architectural changes or
+ violations of NumPy's current orthogonality.
+* His impression from the mailing list discussion, esp. the `"what can
+ we agree on?" thread`__, is that many numpy.ma users specifically
+ like the combination of masked storage, the mask being easily
+ accessible through the API, and ignored semantics. He could be
+ wrong, of course. But he cannot remember seeing anybody besides Mark
+ advocate for the specific combination of masked storage and NA
+ semantics, which makes him nervous.
+
+ __ http://thread.gmane.org/gmane.comp.python.numeric.general/46704
+* Also, he personally is not very happy with the idea of having two
+ storage implementations that are almost-but-not-quite identical at
+ the Python level. While there likely are people who would like to
+ temporarily pretend that certain data is "statistically missing
+ data" without making a copy of their array, it's not at all clear
+ that they outnumber the people who would like to use bitpatterns and
+ masks simultaneously for distinct purposes. And honestly he'd like
+ to be able to just ignore masks if he wants and stick to
+ bitpatterns, which isn't possible if they're coupled together
+ tightly in the API. So he would say the jury is still very much out
+ on whether this aspect of the NEP design is an advantage or a
+ disadvantage. (Certainly he's never heard of any R users complaining
+ that they really wish they had an option of making a different
+ trade-off here.)
+* R's NA support is a `headline feature`__ and its target audience
+ consider it a compelling advantage over other platforms like Matlab
+ or Python. Working with statistical missing data is very painful
+ without platform support.
+
+ __ http://www.sr.bham.ac.uk/~ajrs/R/why_R.html
+* By comparison, we clearly have much more uncertainty about the use
+ cases that require a mask-based implementation, and it doesn't seem
+ like people will suffer too badly if they are forced for now to
+ settle for using NumPy's excellent mask-based indexing, the new
+ where= support, and even numpy.ma.
+* Therefore, bitpatterns with NA semantics seem to meet the criteria
+ of making a large class of users happy, in an elegant way, that fits
+ into the original design, and where we can have reasonable certainty
+ that we understand the problem and use cases well enough that we'll
+ be happy with them in the long run. But no mask-based storage
+ proposal does, yet.
+
+What **Mark** thinks, overall:
+
+* The idea of using NA semantics by default for missing data, inspired
+ by the "statistical missing data" problem, is better than all the
+ other default behaviors which were considered. This applies equally
+ to the bitpattern and the masked approach.
+
+* For NA-style functionality to get proper support by all NumPy
+ features and eventually all third-party libraries, it needs to be
+ in the core. How to correctly and efficiently handle missing data
+ differs by algorithm, and if thinking about it is required to fully
+ support NumPy, NA support will be broader and higher quality.
+
+* At the same time, providing two different missing data interfaces,
+ one for masks and one for bitpatterns, requires NumPy developers
+ and third-party NumPy plugin developers to separately consider the
+ question of what to do in either case, and do two additional
+ implementations of their code. This complicates their job,
+ and could lead to inconsistent support for missing data.
+
+* Providing the ability to work with both masks and bitpatterns through
+ the same C and Python programming interface makes missing data support
+ cleanly orthogonal with all other NumPy features.
+
+* There are many trade-offs of memory usage, performance, correctness, and
+ flexibility between masks and bitpatterns. Providing support for both
+ approaches allows users of NumPy to choose the approach which is
+ most compatible with their way of thinking, or has characteristics
+ which best match their use-case. Providing them through the same
+ interface further allows them to try both with minimal effort, and
+ choose the one which performs better or uses the least memory for
+ their programs.
+
+* Memory Usage
+
+ * With bitpatterns, less memory is used for storing a single array
+ containing some NAs.
+
+ * With masks, less memory is used for storing multiple arrays that
+ are identical except for the location of their NAs. (In this case a
+ single data array can be re-used with multiple mask arrays;
+ bitpattern NAs would need to copy the whole data array.)
+
+* Performance
+
+ * With bitpatterns, the floating point type can use native hardware
+ operations, with nearly correct behavior. For fully correct floating
+ point behavior and with other types, code must be written which
+ specially tests for equality with the missing-data bitpattern.
+
+ * With masks, there is always the overhead of accessing mask memory
+ and testing its truth value. The implementation that currently exists
+ has no performance tuning, so it is only good to judge a minimum
+ performance level. Optimal mask-based code is in general going to
+ be slower than optimal bitpattern-based code.
+
+* Correctness
+
+ * Bitpattern integer types must sacrifice a valid value to represent NA.
+ For larger integer types, there are arguments that this is ok, but for
+ 8-bit types there is no reasonable choice. In the floating point case,
+ if the performance of native floating point operations is chosen,
+ there is a small inconsistency that NaN+NA and NA+NaN are different.
+ * With masks, it works correctly in all cases.
+
+* Generality
+
+ * The bitpattern approach can work in a fully general way only when
+ there is a specific value which can be given up from the
+ data type. For IEEE floating point, a NaN is an obvious choice,
+ and for booleans represented as a byte, there are plenty of choices.
+ For integers, a valid value must be sacrificed to use this approach.
+ Third-party dtypes which plug into NumPy will also have to
+ make a bitpattern choice to support this system, something which
+ may not always be possible.
+
+ * The mask approach works universally with all data types.
+
+Recommendations for Moving Forward
+==================================
+
+**Nathaniel** thinks we should:
+
+* Go ahead and implement bitpattern NAs.
+* *Don't* implement masked arrays in the core -- or at least, not
+ yet. Instead, we should focus on figuring out how to implement them
+ out-of-core, so that people can try out different approaches without
+ us committing to any one approach. And so new prototypes can be
+ released more quickly than the NumPy release cycle. And anyway,
+ we're going to have to figure out how to experiment with such
+ changes out-of-core if NumPy is to continue to evolve without
+ forking -- might as well do it now. The existing code can live in
+ master, disabled, or it can live in a branch -- it'll still be there
+ once we know what we're doing.
+
+**Mark** thinks we should:
+
+* The existing code should remain as is, with a global run-time experimental
+ flag added which disables NA support by default.
+
+A more detailed rationale for this recommendation is:
+
+* A solid preliminary NA-mask implementation is currently in NumPy
+ master. This implementation has been extensively tested
+ against scipy and other third-party packages, and has been in master
+ in a stable state for a significant amount of time.
+* This implementation integrates deeply with the core, providing an
+ interface which is usable in the same way R's NA support is. It
+ provides a compelling, user-friendly answer to R's NA support.
+* The missing data NEP provides a plan for adding bitpattern-based
+ dtype support of NAs, which will operate through the same interface
+ but allow for the same performance/correctness tradeoffs that R has made.
+* Making it very easy for users to try out this implementation, which
+ has reasonable feature coverage and performance characteristics, is
+ the best way to get more concrete feedback about how NumPy's missing
+ data support should look.
+
+Because of its preliminary state, the existing implementation is marked
+as experimental in the NumPy documentation. It would be good for this
+to remain marked as experimental until it is more fleshed out, for
+example supporting struct and array dtypes and with a fuller set of
+NumPy operations.
+
+I think the code should stay as it is, except to add a run-time global
+NumPy flag, perhaps numpy.experimental.maskna, which defaults to
+False and can be toggled to True. In its default state, any NA feature
+usage would raise an "ExperimentalError" exception, a measure which
+would prevent it from being accidentally used and communicate its
+experimental status very clearly.
+
+The `ABI issues`__ seem very tricky to deal with effectively in the 1.x
+series of releases, but I believe that with proper implementation-hiding
+in a 2.0 release, evolving the software to support various other
+ABI ideas that have been discussed is feasible. This is the approach
+I like best.
+
+__ http://thread.gmane.org/gmane.comp.python.numeric.general/49485>
+
+**Nathaniel** notes in response that he doesn't really have any
+objection to shipping experimental APIs in the main numpy distribution
+*if* we're careful to make sure that they don't "leak out" in a way
+that leaves us stuck with them. And in principle some sort of "this
+violates your warranty" global flag could be a way to do that. (In
+fact, this might also be a useful strategy for the kinds of changes
+that he favors, of adding minimal hooks to enable us to build
+prototypes more easily -- we could have some "rapid prototyping only"
+hooks that let prototype hacks get deeper access to NumPy's internals
+than we were otherwise ready to support.)
+
+But, he wants to point out two things. First, it seems like we still
+have fundamental questions to answer about the NEP design, like
+whether masks should have NA semantics or ignore semantics, and there
+are already plans to majorly change how NEP masks are exposed and
+accessed. So he isn't sure what we'll learn by asking for feedback on
+the NEP code in its current state.
+
+And second, given the concerns about their causing (minor) ABI issues,
+it's not clear that we could really prevent them from leaking out. (He
+looks forward to 2.0 too, but we're not there yet.) So maybe it would
+be better if they weren't present in the C API at all, and the hoops
+required for testers were instead something like, 'we have included a
+hacky pure-Python prototype accessible by typing "import
+numpy.experimental.donttrythisathome.NEP" and would welcome feedback'?
+
+If so, then he should mention that he did implement a horribly klugy,
+pure Python implementation of the NEP API that works with NumPy
+1.6.1. This was mostly as an experiment to see how possible such
+prototyping was and to test out a possible ufunc override mechanism,
+but if there's interest, the module is available here:
+https://github.com/njsmith/numpyNEP
+
+It passes the maskna test-suite, with some minor issues described
+in a big comment at the top.
+
+**Mark** responds:
+
+I agree that it's important to be careful when adding new
+features to NumPy, but I also believe it is essential that the project
+have forward development momentum. A project like NumPy requires
+developers to write code for advancement to occur, and obstacles
+that impede the writing of code discourage existing developers
+from contributing more, and potentially scare away developers
+who are thinking about joining in.
+
+All software projects, both open source and closed source, must
+balance between short-term practicality and long-term planning.
+In the case of the missing data development, there was a short-term
+resource commitment to tackle this problem, which is quite immense
+in scope. If there isn't a high likelihood of getting a contribution
+into NumPy that concretely advances towards a solution, I expect
+that individuals and companies interested in doing such work will
+have a much harder time justifying a commitment of their resources.
+For a project which is core to so many other libraries, only
+relying on the good will of selfless volunteers would mean that
+NumPy could more easily be overtaken by another project.
+
+In the case of the existing NA contribution at issue, how we resolve
+this disagreement represents a decision about how NumPy's
+developers, contributers, and users should interact. If we create
+a document describing a dispute resolution process, how do we
+design it so that it doesn't introduce a large burden and excessive
+uncertainty on developers that could prevent them from productively
+contributing code?
+
+If we go this route of writing up a decision process which includes
+such a dispute resolution mechanism, I think the meat of it should
+be a roadmap that potential contributers and developers can follow
+to gain influence over NumPy. NumPy development needs broad support
+beyond code contributions, and tying influence in the project to
+contributions seems to me like it would be a good way to encourage
+people to take on tasks like bug triaging/management, continuous
+integration/build server administration, and the myriad other
+tasks that help satisfy the project's needs. No specific meritocratic,
+democratic, consensus-striving system will satisfy everyone, but the
+vigour of the discussions around governance and process indicate that
+something at least a little bit more formal than the current status
+quo is necessary.
+
+In conclusion, I would like the NumPy project to prioritize movement
+towards a more flexible and modular ABI/API, balanced with strong
+backwards-compatibility constraints and feature additions that
+individuals, universities, and companies want to contribute.
+I do not believe keeping the NA code in 1.7 as it is, with the small
+additional measure of requiring it to be enabled by an experimental
+flag, poses a risk of long-term ABI troubles. The greater risk I see
+is a continuing lack of developers contributing to the project,
+and I believe backing out this code because these worries would create a
+risk of reducing developer contribution.
+
+
+References and Footnotes
+------------------------
+
+`NEP 12`_ describes Mark's NA-semantics/mask implementation/view based mask
+handling API.
+
+`NEP 24`_ ("the alterNEP") was Nathaniel's initial attempt at separating MISSING
+and IGNORED handling into bit-patterns versus masks, though there's a bunch
+he would change about the proposal at this point.
+
+`NEP 25`_ ("miniNEP 2") was a later attempt by Nathaniel to sketch out an
+implementation strategy for NA dtypes.
+
+A further discussion overview page can be found at:
+https://github.com/njsmith/numpy/wiki/NA-discussion-status
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
+
+.. _NEP 12: http://www.numpy.org/neps/nep-0012-missing-data.html
+
+.. _NEP 24: http://www.numpy.org/neps/nep-0024-missing-data-2.html
+
+.. _NEP 25: http://www.numpy.org/neps/nep-0025-missing-data-3.html
diff --git a/doc/neps/nep-0027-zero-rank-arrarys.rst b/doc/neps/nep-0027-zero-rank-arrarys.rst
new file mode 100644
index 000000000..d932bb609
--- /dev/null
+++ b/doc/neps/nep-0027-zero-rank-arrarys.rst
@@ -0,0 +1,254 @@
+=========================
+NEP 27 — Zero Rank Arrays
+=========================
+
+:Author: Alexander Belopolsky (sasha), transcribed Matt Picus <matti.picus@gmail.com>
+:Status: Final
+:Type: Informational
+:Created: 2006-06-10
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-October/078824.html
+
+.. note ::
+
+ NumPy has both zero rank arrays and scalars. This design document, adapted
+ from a `2006 wiki entry`_, describes what zero rank arrays are and why they
+ exist. It was transcribed 2018-10-13 into a NEP and links were updated.
+ The pull request sparked `a lively discussion`_ about the continued need
+ for zero rank arrays and scalars in NumPy.
+
+ Some of the information here is dated, for instance indexing of 0-D arrays
+ now is now implemented and does not error.
+
+Zero-Rank Arrays
+----------------
+
+Zero-rank arrays are arrays with shape=(). For example:
+
+ >>> x = array(1)
+ >>> x.shape
+ ()
+
+
+Zero-Rank Arrays and Array Scalars
+----------------------------------
+
+Array scalars are similar to zero-rank arrays in many aspects::
+
+
+ >>> int_(1).shape
+ ()
+
+They even print the same::
+
+
+ >>> print int_(1)
+ 1
+ >>> print array(1)
+ 1
+
+
+However there are some important differences:
+
+* Array scalars are immutable
+* Array scalars have different python type for different data types
+
+Motivation for Array Scalars
+----------------------------
+
+Numpy's design decision to provide 0-d arrays and array scalars in addition to
+native python types goes against one of the fundamental python design
+principles that there should be only one obvious way to do it. In this section
+we will try to explain why it is necessary to have three different ways to
+represent a number.
+
+There were several numpy-discussion threads:
+
+
+* `rank-0 arrays`_ in a 2002 mailing list thread.
+* Thoughts about zero dimensional arrays vs Python scalars in a `2005 mailing list thread`_]
+
+It has been suggested several times that NumPy just use rank-0 arrays to
+represent scalar quantities in all case. Pros and cons of converting rank-0
+arrays to scalars were summarized as follows:
+
+- Pros:
+
+ - Some cases when Python expects an integer (the most
+ dramatic is when slicing and indexing a sequence:
+ _PyEval_SliceIndex in ceval.c) it will not try to
+ convert it to an integer first before raising an error.
+ Therefore it is convenient to have 0-dim arrays that
+ are integers converted for you by the array object.
+
+ - No risk of user confusion by having two types that
+ are nearly but not exactly the same and whose separate
+ existence can only be explained by the history of
+ Python and NumPy development.
+
+ - No problems with code that does explicit typechecks
+ ``(isinstance(x, float)`` or ``type(x) == types.FloatType)``. Although
+ explicit typechecks are considered bad practice in general, there are a
+ couple of valid reasons to use them.
+
+ - No creation of a dependency on Numeric in pickle
+ files (though this could also be done by a special case
+ in the pickling code for arrays)
+
+- Cons:
+
+ - It is difficult to write generic code because scalars
+ do not have the same methods and attributes as arrays.
+ (such as ``.type`` or ``.shape``). Also Python scalars have
+ different numeric behavior as well.
+
+ - This results in a special-case checking that is not
+ pleasant. Fundamentally it lets the user believe that
+ somehow multidimensional homoegeneous arrays
+ are something like Python lists (which except for
+ Object arrays they are not).
+
+Numpy implements a solution that is designed to have all the pros and none of the cons above.
+
+ Create Python scalar types for all of the 21 types and also
+ inherit from the three that already exist. Define equivalent
+ methods and attributes for these Python scalar types.
+
+The Need for Zero-Rank Arrays
+-----------------------------
+
+Once the idea to use zero-rank arrays to represent scalars was rejected, it was
+natural to consider whether zero-rank arrays can be eliminated alltogether.
+However there are some important use cases where zero-rank arrays cannot be
+replaced by array scalars. See also `A case for rank-0 arrays`_ from February
+2006.
+
+* Output arguments::
+
+ >>> y = int_(5)
+ >>> add(5,5,x)
+ array(10)
+ >>> x
+ array(10)
+ >>> add(5,5,y)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TypeError: return arrays must be of ArrayType
+
+* Shared data::
+
+ >>> x = array([1,2])
+ >>> y = x[1:2]
+ >>> y.shape = ()
+ >>> y
+ array(2)
+ >>> x[1] = 20
+ >>> y
+ array(20)
+
+Indexing of Zero-Rank Arrays
+----------------------------
+
+As of NumPy release 0.9.3, zero-rank arrays do not support any indexing::
+
+ >>> x[...]
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ IndexError: 0-d arrays can't be indexed.
+
+On the other hand there are several cases that make sense for rank-zero arrays.
+
+Ellipsis and empty tuple
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Alexander started a `Jan 2006 discussion`_ on scipy-dev
+with the following proposal:
+
+ ... it may be reasonable to allow ``a[...]``. This way
+ ellipsis can be interpereted as any number of ``:`` s including zero.
+ Another subscript operation that makes sense for scalars would be
+ ``a[...,newaxis]`` or even ``a[{newaxis, }* ..., {newaxis,}*]``, where
+ ``{newaxis,}*`` stands for any number of comma-separated newaxis tokens.
+ This will allow one to use ellipsis in generic code that would work on
+ any numpy type.
+
+Francesc Altet supported the idea of ``[...]`` on zero-rank arrays and
+`suggested`_ that ``[()]`` be supported as well.
+
+Francesc's proposal was::
+
+ In [65]: type(numpy.array(0)[...])
+ Out[65]: <type 'numpy.ndarray'>
+
+ In [66]: type(numpy.array(0)[()]) # Indexing a la numarray
+ Out[66]: <type 'int32_arrtype'>
+
+ In [67]: type(numpy.array(0).item()) # already works
+ Out[67]: <type 'int'>
+
+There is a consensus that for a zero-rank array ``x``, both ``x[...]`` and ``x[()]`` should be valid, but the question
+remains on what should be the type of the result - zero rank ndarray or ``x.dtype``?
+
+(Alexander)
+ First, whatever choice is made for ``x[...]`` and ``x[()]`` they should be
+ the same because ``...`` is just syntactic sugar for "as many `:` as
+ necessary", which in the case of zero rank leads to ``... = (:,)*0 = ()``.
+ Second, rank zero arrays and numpy scalar types are interchangeable within
+ numpy, but numpy scalars can be use in some python constructs where ndarrays
+ can't. For example::
+
+ >>> (1,)[array(0)]
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TypeError: tuple indices must be integers
+ >>> (1,)[int32(0)]
+ 1
+
+Since most if not all numpy function automatically convert zero-rank arrays to scalars on return, there is no reason for
+``[...]`` and ``[()]`` operations to be different.
+
+See SVN changeset 1864 (which became git commit `9024ff0`_) for
+implementation of ``x[...]`` and ``x[()]`` returning numpy scalars.
+
+See SVN changeset 1866 (which became git commit `743d922`_) for
+implementation of ``x[...] = v`` and ``x[()] = v``
+
+Increasing rank with newaxis
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Everyone who commented liked this feature, so as of SVN changeset 1871 (which became git commit `b32744e`_) any number of ellipses and
+newaxis tokens can be placed as a subscript argument for a zero-rank array. For
+example::
+
+ >>> x = array(1)
+ >>> x[newaxis,...,newaxis,...]
+ array([[1]])
+
+It is not clear why more than one ellipsis should be allowed, but this is the
+behavior of higher rank arrays that we are trying to preserve.
+
+Refactoring
+~~~~~~~~~~~
+
+Currently all indexing on zero-rank arrays is implemented in a special ``if (nd
+== 0)`` branch of code that used to always raise an index error. This ensures
+that the changes do not affect any existing usage (except, the usage that
+relies on exceptions). On the other hand part of motivation for these changes
+was to make behavior of ndarrays more uniform and this should allow to
+eliminate ``if (nd == 0)`` checks alltogether.
+
+Copyright
+---------
+
+The original document appeared on the scipy.org wiki, with no Copyright notice, and its `history`_ attributes it to sasha.
+
+.. _`2006 wiki entry`: https://web.archive.org/web/20100503065506/http://projects.scipy.org:80/numpy/wiki/ZeroRankArray
+.. _`history`: https://web.archive.org/web/20100503065506/http://projects.scipy.org:80/numpy/wiki/ZeroRankArray?action=history
+.. _`2005 mailing list thread`: https://sourceforge.net/p/numpy/mailman/message/11299166
+.. _`suggested`: https://mail.python.org/pipermail/numpy-discussion/2006-January/005572.html
+.. _`Jan 2006 discussion`: https://mail.python.org/pipermail/numpy-discussion/2006-January/005579.html
+.. _`A case for rank-0 arrays`: https://mail.python.org/pipermail/numpy-discussion/2006-February/006384.html
+.. _`rank-0 arrays`: https://mail.python.org/pipermail/numpy-discussion/2002-September/001600.html
+.. _`9024ff0`: https://github.com/numpy/numpy/commit/9024ff0dc052888b5922dde0f3e615607a9e99d7
+.. _`743d922`: https://github.com/numpy/numpy/commit/743d922bf5893acf00ac92e823fe12f460726f90
+.. _`b32744e`: https://github.com/numpy/numpy/commit/b32744e3fc5b40bdfbd626dcc1f72907d77c01c4
+.. _`a lively discussion`: https://github.com/numpy/numpy/pull/12166
diff --git a/doc/neps/nep-template.rst b/doc/neps/nep-template.rst
index 26515127d..e869ebae3 100644
--- a/doc/neps/nep-template.rst
+++ b/doc/neps/nep-template.rst
@@ -64,7 +64,7 @@ References and Footnotes
.. [1] Each NEP must either be explicitly labeled as placed in the public domain (see
this NEP as an example) or licensed under the `Open Publication License`_.
-.. _Open Publication License: http://www.opencontent.org/openpub/
+.. _Open Publication License: https://www.opencontent.org/openpub/
Copyright
diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst
new file mode 100644
index 000000000..a45423711
--- /dev/null
+++ b/doc/neps/roadmap.rst
@@ -0,0 +1,115 @@
+=============
+NumPy Roadmap
+=============
+
+This is a live snapshot of tasks and features we will be investing resources
+in. It may be used to encourage and inspire developers and to search for
+funding.
+
+Interoperability protocols & duck typing
+----------------------------------------
+
+- `__array_function__`
+
+ See `NEP 18`_ and a sample implementation_
+
+- Array Duck-Typing
+
+ `NEP 22`_ `np.asduckarray()`
+
+- Mixins like `NDArrayOperatorsMixin`:
+
+ - for mutable arrays
+ - for reduction methods implemented as ufuncs
+
+Better dtypes
+-------------
+
+- Easier custom dtypes
+ - Simplify and/or wrap the current C-API
+ - More consistent support for dtype metadata
+ - Support for writing a dtype in Python
+- New string dtype(s):
+ - Encoded strings with fixed-width storage (utf8, latin1, ...) and/or
+ - Variable length strings (could share implementation with dtype=object, but are explicitly type-checked)
+ - One of these should probably be the default for text data. The current behavior on Python 3 is neither efficient nor user friendly.
+- `np.int` should not be platform dependent
+- better coercion for string + number
+
+Random number generation policy & rewrite
+-----------------------------------------
+
+`NEP 19`_ and a `reference implementation`_
+
+Indexing
+--------
+
+vindex/oindex `NEP 21`_
+
+Infrastructure
+--------------
+
+NumPy is much more than just the code base itself, we also maintain
+docs, CI, benchmarks, etc.
+
+- Rewrite numpy.org
+- Benchmarking: improve the extent of the existing suite, and run & render
+ the results as part of the docs or website.
+
+ - Hardware: find a machine that can reliably run serial benchmarks
+ - ASV produces graphs, could we set up a site? Currently at
+ https://pv.github.io/numpy-bench/, should that become a community resource?
+
+Functionality outside core
+--------------------------
+
+Some things inside NumPy do not actually match the `Scope of NumPy`.
+
+- A backend system for `numpy.fft` (so that e.g. `fft-mkl` doesn't need to monkeypatch numpy)
+
+- Rewrite masked arrays to not be a ndarray subclass -- maybe in a separate project?
+- MaskedArray as a duck-array type, and/or
+- dtypes that support missing values
+
+- Write a strategy on how to deal with overlap between numpy and scipy for `linalg` and `fft` (and implement it).
+
+- Deprecate `np.matrix`
+
+Continuous Integration
+----------------------
+
+We depend on CI to discover problems as we continue to develop NumPy before the
+code reaches downstream users.
+
+- CI for more exotic platforms (e.g. ARM is now available from
+ http://www.shippable.com/, but it is not free).
+- Multi-package testing
+- Add an official channel for numpy dev builds for CI usage by other projects so
+ they may confirm new builds do not break their package.
+
+Typing
+------
+
+Python type annotation syntax should support ndarrays and dtypes.
+
+- Type annotations for NumPy: github.com/numpy/numpy-stubs
+- Support for typing shape and dtype in multi-dimensional arrays in Python more generally
+
+NumPy scalars
+-------------
+
+Numpy has both scalars and zero-dimensional arrays.
+
+- The current implementation adds a large maintenance burden -- can we remove
+ scalars and/or simplify it internally?
+- Zero dimensional arrays get converted into scalars by most NumPy
+ functions (i.e., output of `np.sin(x)` depends on whether `x` is
+ zero-dimensional or not). This inconsistency should be addressed,
+ so that one could, e.g., write sane type annotations.
+
+.. _`NEP 19`: https://www.numpy.org/neps/nep-0019-rng-policy.html
+.. _`NEP 22`: http://www.numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html
+.. _`NEP 18`: https://www.numpy.org/neps/nep-0018-array-function-protocol.html
+.. _implementation: https://gist.github.com/shoyer/1f0a308a06cd96df20879a1ddb8f0006
+.. _`reference implementation`: https://github.com/bashtage/randomgen
+.. _`NEP 21`: https://www.numpy.org/neps/nep-0021-advanced-indexing.html
diff --git a/doc/neps/scope.rst b/doc/neps/scope.rst
new file mode 100644
index 000000000..a675b8c96
--- /dev/null
+++ b/doc/neps/scope.rst
@@ -0,0 +1,46 @@
+==============
+Scope of NumPy
+==============
+
+Here, we describe aspects of N-d array computation that are within scope for NumPy development. This is *not* an aspirational definition of where NumPy should aim, but instead captures the status quo—areas which we have decided to continue supporting, at least for the time being.
+
+- **In-memory, N-dimensional, homogeneously typed (single pointer + strided) arrays on CPUs**
+
+ - Support for a wide range of data types
+ - Not specialized hardware such as GPUs
+ - But, do support wide range of CPUs (e.g. ARM, PowerX)
+
+- **Higher level APIs for N-dimensional arrays**
+
+ - NumPy is a *de facto* standard for array APIs in Python
+ - Indexing and fast iteration over elements (ufunc)
+ - Interoperability protocols with other data container implementations (like `__array_ufunc__`).
+
+- **Python API and a C API** to the ndarray's methods and attributes.
+
+- Other **specialized types or uses of N-dimensional arrays**:
+
+ - Masked arrays
+ - Structured arrays (informally known as record arrays)
+ - Memory mapped arrays
+
+- Historically, NumPy has included the following **basic functionality
+ in support of scientific computation**. We intend to keep supporting
+ (but not to expand) what is currently included:
+
+ - Linear algebra
+ - Fast Fourier transforms and windowing
+ - Pseudo-random number generators
+ - Polynomial fitting
+
+- NumPy provides some **infrastructure for other packages in the scientific Python ecosystem**:
+
+ - numpy.distutils (build support for C++, Fortran, BLAS/LAPACK, and other relevant libraries for scientific computing
+ - f2py (generating bindings for Fortran code)
+ - testing utilities
+
+- **Speed**: we take performance concerns seriously and aim to execute
+ operations on large arrays with similar performance as native C
+ code. That said, where conflict arises, maintenance and portability take
+ precedence over performance. We aim to prevent regressions where
+ possible (e.g., through asv).
diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py
index 65225c995..d9c4f690b 100644
--- a/doc/neps/tools/build_index.py
+++ b/doc/neps/tools/build_index.py
@@ -40,6 +40,10 @@ def nep_metadata():
tags['Title'] = lines[1].strip()
tags['Filename'] = source
+ if not tags['Title'].startswith(f'NEP {nr} — '):
+ raise RuntimeError(
+ f'Title for NEP {nr} does not start with "NEP {nr} — " '
+ '(note that — here is a special, enlongated dash)')
if tags['Status'] in ('Accepted', 'Rejected', 'Withdrawn'):
if not 'Resolution' in tags:
diff --git a/doc/release/1.13.0-notes.rst b/doc/release/1.13.0-notes.rst
index 4554e53ea..3b719db09 100644
--- a/doc/release/1.13.0-notes.rst
+++ b/doc/release/1.13.0-notes.rst
@@ -183,11 +183,11 @@ override the behavior of NumPy's ufuncs. This works quite similarly to Python's
``__mul__`` and other binary operation routines. See the documentation for a
more detailed description of the implementation and behavior of this new
option. The API is provisional, we do not yet guarantee backward compatibility
-as modifications may be made pending feedback. See the NEP_ and
+as modifications may be made pending feedback. See `NEP 13`_ and
documentation_ for more details.
-.. _NEP: https://github.com/numpy/numpy/blob/master/doc/neps/ufunc-overrides.rst
-.. _documentation: https://github.com/charris/numpy/blob/master/doc/source/reference/arrays.classes.rst
+.. _`NEP 13`: http://www.numpy.org/neps/nep-0013-ufunc-overrides.html
+.. _documentation: https://github.com/numpy/numpy/blob/master/doc/source/reference/arrays.classes.rst
New ``positive`` ufunc
----------------------
diff --git a/doc/release/1.13.1-notes.rst b/doc/release/1.13.1-notes.rst
index 807296a85..88a4bc3dd 100644
--- a/doc/release/1.13.1-notes.rst
+++ b/doc/release/1.13.1-notes.rst
@@ -13,7 +13,7 @@ used with 3.6.0 due to Python bug 29943_. NumPy 1.13.2 will be released shortly
after Python 3.6.2 is out to fix that problem. If you are using 3.6.0 the
workaround is to upgrade to 3.6.1 or use an earlier Python version.
-.. _#29943: https://bugs.python.org/issue29943
+.. _29943: https://bugs.python.org/issue29943
Pull requests merged
@@ -21,7 +21,7 @@ Pull requests merged
A total of 19 pull requests were merged for this release.
* #9240 DOC: BLD: fix lots of Sphinx warnings/errors.
-* #9255 Revert "DEP: Raise TypeError for subtract(bool_, bool_)."
+* #9255 Revert "DEP: Raise TypeError for subtract(bool, bool)."
* #9261 BUG: don't elide into readonly and updateifcopy temporaries for...
* #9262 BUG: fix missing keyword rename for common block in numpy.f2py
* #9263 BUG: handle resize of 0d array
diff --git a/doc/release/1.14.0-notes.rst b/doc/release/1.14.0-notes.rst
index 0f14f7703..462631de6 100644
--- a/doc/release/1.14.0-notes.rst
+++ b/doc/release/1.14.0-notes.rst
@@ -14,11 +14,11 @@ dropping Python 2.7 support in the runup to 2020. The decision has been made to
support 2.7 for all releases made in 2018, with the last release being
designated a long term release with support for bug fixes extending through
2019. In 2019 support for 2.7 will be dropped in all new releases. More details
-can be found in the relevant NEP_.
+can be found in `NEP 12`_.
This release supports Python 2.7 and 3.4 - 3.6.
-.. _NEP: https://github.com/numpy/numpy/blob/master/doc/neps/dropping-python2.7-proposal.rst
+.. _`NEP 12`: http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html
Highlights
@@ -134,8 +134,8 @@ are marked readonly. In the past, it was possible to get away with::
var_arr = np.asarray(val)
val_arr += 1 # now errors, previously changed np.ma.masked.data
-``np.ma`` functions producing ``fill_value``s have changed
-----------------------------------------------------------
+``np.ma`` functions producing ``fill_value`` s have changed
+-----------------------------------------------------------
Previously, ``np.ma.default_fill_value`` would return a 0d array, but
``np.ma.minimum_fill_value`` and ``np.ma.maximum_fill_value`` would return a
tuple of the fields. Instead, all three methods return a structured ``np.void``
diff --git a/doc/release/1.14.1-notes.rst b/doc/release/1.14.1-notes.rst
index 2ed4c3e14..7b95c2e28 100644
--- a/doc/release/1.14.1-notes.rst
+++ b/doc/release/1.14.1-notes.rst
@@ -67,7 +67,7 @@ A total of 36 pull requests were merged for this release.
* `#10431 <https://github.com/numpy/numpy/pull/10431>`__: REL: Add 1.14.1 release notes template
* `#10435 <https://github.com/numpy/numpy/pull/10435>`__: MAINT: Use ValueError for duplicate field names in lookup (backport)
* `#10534 <https://github.com/numpy/numpy/pull/10534>`__: BUG: Provide a better error message for out-of-order fields
-* `#10536 <https://github.com/numpy/numpy/pull/10536>`__: BUG: Resize bytes_ columns in genfromtxt (backport of #10401)
+* `#10536 <https://github.com/numpy/numpy/pull/10536>`__: BUG: Resize bytes columns in genfromtxt (backport of #10401)
* `#10537 <https://github.com/numpy/numpy/pull/10537>`__: BUG: multifield-indexing adds padding bytes: revert for 1.14.1
* `#10539 <https://github.com/numpy/numpy/pull/10539>`__: BUG: fix np.save issue with python 2.7.5
* `#10540 <https://github.com/numpy/numpy/pull/10540>`__: BUG: Add missing DECREF in Py2 int() cast
diff --git a/doc/release/1.15.0-notes.rst b/doc/release/1.15.0-notes.rst
index 0e3d2a525..7235ca915 100644
--- a/doc/release/1.15.0-notes.rst
+++ b/doc/release/1.15.0-notes.rst
@@ -99,7 +99,7 @@ Deprecations
* Users of ``nditer`` should use the nditer object as a context manager
anytime one of the iterator operands is writeable, so that numpy can
manage writeback semantics, or should call ``it.close()``. A
- `RuntimeWarning` may be emitted otherwise in these cases.
+ `RuntimeWarning` may be emitted otherwise in these cases.
* The ``normed`` argument of ``np.histogram``, deprecated long ago in 1.6.0,
now emits a ``DeprecationWarning``.
@@ -227,13 +227,6 @@ Changes to ``PyArray_GetDTypeTransferFunction``
significant performance hit, consider implementing ``copyswapn`` to reflect the
implementation of ``PyArray_GetStridedCopyFn``. See `#10898
<https://github.com/numpy/numpy/pull/10898>`__.
-* Functions ``npy_get_floatstatus_barrier`` and ``npy_clear_floatstatus_barrier``
- have been added and should be used in place of the ``npy_get_floatstatus``and
- ``npy_clear_status`` functions. Optimizing compilers like GCC 8.1 and Clang
- were rearranging the order of operations when the previous functions were
- used in the ufunc SIMD functions, resulting in the floatstatus flags being '
- checked before the operation whose status we wanted to check was run.
- See `#10339 <https://github.com/numpy/numpy/issues/10370>`__.
New Features
diff --git a/doc/release/1.16.0-notes.rst b/doc/release/1.16.0-notes.rst
new file mode 100644
index 000000000..de636933f
--- /dev/null
+++ b/doc/release/1.16.0-notes.rst
@@ -0,0 +1,534 @@
+==========================
+NumPy 1.16.0 Release Notes
+==========================
+
+This NumPy release is the last one to support Python 2.7 and will be maintained
+as a long term release with bug fixes until 2020. Support for Python 3.4 been
+dropped, the supported Python versions are 2.7 and 3.5-3.7. The wheels on PyPI
+are linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+This release has seen a lot of refactoring and features many bug fixes, improved
+code organization, and better cross platform compatibility. Not all of these
+improvements will be visible to users, but they should help make maintenance
+easier going forward.
+
+
+Highlights
+==========
+
+* Experimental support for overriding numpy functions,
+ see ``__array_function__`` below.
+
+* The ``matmul`` function is now a ufunc. This provides better
+ performance and allows overriding with ``__array_ufunc__``.
+
+* Improved support for the ARM and POWER architectures.
+
+* Improved support for AIX and PyPy.
+
+* Improved interop with ctypes.
+
+* Improved support for PEP 3118.
+
+
+
+New functions
+=============
+
+* New functions added to the `numpy.lib.recfuntions` module to ease the
+ structured assignment changes:
+
+ * ``assign_fields_by_name``
+ * ``structured_to_unstructured``
+ * ``unstructured_to_structured``
+ * ``apply_along_fields``
+ * ``require_fields``
+
+ See the user guide at <https://docs.scipy.org/doc/numpy/user/basics.rec.html>
+ for more info.
+
+
+New deprecations
+================
+
+* The type dictionaries `numpy.core.typeNA` and `numpy.core.sctypeNA` are
+ deprecated. They were buggy and not documented and will be removed in the
+ 1.18 release. Use`numpy.sctypeDict` instead.
+
+* The `numpy.asscalar` function is deprecated. It is an alias to the more
+ powerful `numpy.ndarray.item`, not tested, and fails for scalars.
+
+* The `numpy.set_array_ops` and `numpy.get_array_ops` functions are deprecated.
+ As part of `NEP 15`, they have been deprecated along with the C-API functions
+ :c:func:`PyArray_SetNumericOps` and :c:func:`PyArray_GetNumericOps`. Users
+ who wish to override the inner loop functions in built-in ufuncs should use
+ :c:func:`PyUFunc_ReplaceLoopBySignature`.
+
+* The `numpy.unravel_index` keyword argument ``dims`` is deprecated, use
+ ``shape`` instead.
+
+* The `numpy.histogram` ``normed`` argument is deprecated. It was deprecated
+ previously, but no warning was issued.
+
+* The ``positive`` operator (``+``) applied to non-numerical arrays is
+ deprecated. See below for details.
+
+* Passing an iterator to the stack functions is deprecated
+
+
+Expired deprecations
+====================
+
+* NaT comparisons now return ``False`` without a warning, finishing a
+ deprecation cycle begun in NumPy 1.11.
+
+* ``np.lib.function_base.unique`` was removed, finishing a deprecation cycle
+ begun in NumPy 1.4. Use `numpy.unique` instead.
+
+* multi-field indexing now returns views instead of copies, finishing a
+ deprecation cycle begun in NumPy 1.7. The change was previously attempted in
+ NumPy 1.14 but reverted until now.
+
+* ``np.PackageLoader`` and ``np.pkgload`` have been removed. These were
+ deprecated in 1.10, had no tests, and seem to no longer work in 1.15.
+
+
+Future changes
+==============
+
+* NumPy 1.17 will drop support for Python 2.7.
+
+
+Compatibility notes
+===================
+
+f2py script on Windows
+----------------------
+On Windows, the installed script for running f2py is now an ``.exe`` file
+rather than a ``*.py`` file and should be run from the command line as ``f2py``
+whenever the ``Scripts`` directory is in the path. Running ``f2py`` as a module
+``python -m numpy.f2py [...]`` will work without path modification in any
+version of NumPy.
+
+NaT comparisons
+---------------
+Consistent with the behavior of NaN, all comparisons other than inequality
+checks with datetime64 or timedelta64 NaT ("not-a-time") values now always
+return ``False``, and inequality checks with NaT now always return ``True``.
+This includes comparisons beteween NaT values. For compatibility with the
+old behavior, use ``np.isnat`` to explicitly check for NaT or convert
+datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making
+comparisons.
+
+complex64/128 alignment has changed
+-----------------------------------
+The memory alignment of complex types is now the same as a C-struct composed of
+two floating point values, while before it was equal to the size of the type.
+For many users (for instance on x64/unix/gcc) this means that complex64 is now
+4-byte aligned instead of 8-byte aligned. An important consequence is that
+aligned structured dtypes may now have a different size. For instance,
+``np.dtype('c8,u1', align=True)`` used to have an itemsize of 16 (on x64/gcc)
+but now it is 12.
+
+More in detail, the complex64 type now has the same alignment as a C-struct
+``struct {float r, i;}``, according to the compiler used to compile numpy, and
+similarly for the complex128 and complex256 types.
+
+nd_grid __len__ removal
+-----------------------
+``len(np.mgrid)`` and ``len(np.ogrid)`` are now considered nonsensical
+and raise a ``TypeError``.
+
+``np.unravel_index`` now accepts ``shape`` keyword argument
+-----------------------------------------------------------
+Previously, only the ``dims`` keyword argument was accepted
+for specification of the shape of the array to be used
+for unraveling. ``dims`` remains supported, but is now deprecated.
+
+multi-field views return a view instead of a copy
+-------------------------------------------------
+Indexing a structured array with multiple fields, e.g., ``arr[['f1', 'f3']]``,
+returns a view into the original array instead of a copy. The returned view
+will often have extra padding bytes corresponding to intervening fields in the
+original array, unlike before, which will affect code such as
+``arr[['f1', 'f3']].view('float64')``. This change has been planned since numpy
+1.7. Operations hitting this path have emitted ``FutureWarnings`` since then.
+Additional ``FutureWarnings`` about this change were added in 1.12.
+
+To help users update their code to account for these changes, a number of
+functions have been added to the ``numpy.lib.recfunctions`` module which
+safely allow such operations. For instance, the code above can be replaced
+with ``structured_to_unstructured(arr[['f1', 'f3']], dtype='float64')``.
+See the "accessing multiple fields" section of the
+`user guide <https://docs.scipy.org/doc/numpy/user/basics.rec.html#accessing-multiple-fields>`__.
+
+
+C API changes
+=============
+
+The :c:data:`NPY_API_VERSION` was incremented to 0x0000D, due to the addition
+of:
+
+* :c:member:`PyUFuncObject.core_dim_flags`
+* :c:member:`PyUFuncObject.core_dim_sizes`
+* :c:member:`PyUFuncObject.identity_value`
+* :c:function:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`
+
+
+New Features
+============
+
+Integrated squared error (ISE) estimator added to ``histogram``
+---------------------------------------------------------------
+This method (``bins='stone'``) for optimizing the bin number is a
+generalization of the Scott's rule. The Scott's rule assumes the distribution
+is approximately Normal, while the ISE_ is a non-parametric method based on
+cross-validation.
+
+.. _ISE: https://en.wikipedia.org/wiki/Histogram#Minimizing_cross-validation_estimated_squared_error
+
+``max_rows`` keyword added for ``np.loadtxt``
+---------------------------------------------
+New keyword ``max_rows`` in `numpy.loadtxt` sets the maximum rows of the
+content to be read after ``skiprows``, as in `numpy.genfromtxt`.
+
+modulus operator support added for ``np.timedelta64`` operands
+--------------------------------------------------------------
+The modulus (remainder) operator is now supported for two operands
+of type ``np.timedelta64``. The operands may have different units
+and the return value will match the type of the operands.
+
+
+Improvements
+============
+
+no-copy pickling of numpy arrays
+--------------------------------
+Up to protocol 4, numpy array pickling created 2 spurious copies of the data
+being serialized. With pickle protocol 5, and the ``PickleBuffer`` API, a
+large variety of numpy arrays can now be serialized without any copy using
+out-of-band buffers, and with one less copy using in-band buffers. This
+results, for large arrays, in an up to 66% drop in peak memory usage.
+
+build shell independence
+------------------------
+NumPy builds should no longer interact with the host machine
+shell directly. ``exec_command`` has been replaced with
+``subprocess.check_output`` where appropriate.
+
+`np.polynomial.Polynomial` classes render in LaTeX in Jupyter notebooks
+-----------------------------------------------------------------------
+When used in a front-end that supports it, `Polynomial` instances are now
+rendered through LaTeX. The current format is experimental, and is subject to
+change.
+
+``randint`` and ``choice`` now work on empty distributions
+----------------------------------------------------------
+Even when no elements needed to be drawn, ``np.random.randint`` and
+``np.random.choice`` raised an error when the arguments described an empty
+distribution. This has been fixed so that e.g.
+``np.random.choice([], 0) == np.array([], dtype=float64)``.
+
+``linalg.lstsq``, ``linalg.qr``, and ``linalg.svd`` now work with empty arrays
+------------------------------------------------------------------------------
+Previously, a ``LinAlgError`` would be raised when an empty matrix/empty
+matrices (with zero rows and/or columns) is/are passed in. Now outputs of
+appropriate shapes are returned.
+
+Chain exceptions to give better error messages for invalid PEP3118 format strings
+---------------------------------------------------------------------------------
+This should help track down problems.
+
+Einsum optimization path updates and efficiency improvements
+------------------------------------------------------------
+Einsum was synchronized with the current upstream work.
+
+`numpy.angle` and `numpy.expand_dims` now work on ``ndarray`` subclasses
+------------------------------------------------------------------------
+In particular, they now work for masked arrays.
+
+``NPY_NO_DEPRECATED_API`` compiler warning suppression
+------------------------------------------------------
+Setting ``NPY_NO_DEPRECATED_API`` to a value of 0 will suppress the current compiler
+warnings when the deprecated numpy API is used.
+
+``np.diff`` Added kwargs prepend and append
+-------------------------------------------
+New kwargs ``prepend`` and ``append``, allow for values to be inserted on
+either end of the differences. Similar to options for `ediff1d`. Now the
+inverse of `cumsum` can be obtained easily via ``prepend=0``.
+
+ARM support updated
+-------------------
+Support for ARM CPUs has been updated to accommodate 32 and 64 bit targets,
+and also big and little endian byte ordering. AARCH32 memory alignment issues
+have been addressed. CI testing has been expanded to include AARCH64 targets
+via the services of shippable.com.
+
+Appending to build flags
+------------------------
+`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and
+other similar such environment variables for compiling Fortran extensions.
+Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the
+behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`,
+`F90FLAGS`, `FREEFLAGS`, `FOPT`, `FDEBUG`, and `FFLAGS`. See gh-11525 for more
+details.
+
+Generalized ufunc signatures now allow fixed-size dimensions
+------------------------------------------------------------
+By using a numerical value in the signature of a generalized ufunc, one can
+indicate that the given function requires input or output to have dimensions
+with the given size. E.g., the signature of a function that converts a polar
+angle to a two-dimensional cartesian unit vector would be ``()->(2)``; that
+for one that converts two spherical angles to a three-dimensional unit vector
+would be ``(),()->(3)``; and that for the cross product of two
+three-dimensional vectors would be ``(3),(3)->(3)``.
+
+Note that to the elementary function these dimensions are not treated any
+differently from variable ones indicated with a name starting with a letter;
+the loop still is passed the corresponding size, but it can now count on that
+size being equal to the fixed one given in the signature.
+
+Generalized ufunc signatures now allow flexible dimensions
+----------------------------------------------------------
+Some functions, in particular numpy's implementation of ``@`` as ``matmul``,
+are very similar to generalized ufuncs in that they operate over core
+dimensions, but one could not present them as such because they were able to
+deal with inputs in which a dimension is missing. To support this, it is now
+allowed to postfix a dimension name with a question mark to indicate that the
+dimension does not necessarily have to be present.
+
+With this addition, the signature for ``matmul`` can be expressed as
+``(m?,n),(n,p?)->(m?,p?)``. This indicates that if, e.g., the second operand
+has only one dimension, for the purposes of the elementary function it will be
+treated as if that input has core shape ``(n, 1)``, and the output has the
+corresponding core shape of ``(m, 1)``. The actual output array, however, has
+the flexible dimension removed, i.e., it will have shape ``(..., m)``.
+Similarly, if both arguments have only a single dimension, the inputs will be
+presented as having shapes ``(1, n)`` and ``(n, 1)`` to the elementary
+function, and the output as ``(1, 1)``, while the actual output array returned
+will have shape ``()``. In this way, the signature allows one to use a
+single elementary function for four related but different signatures,
+``(m,n),(n,p)->(m,p)``, ``(n),(n,p)->(p)``, ``(m,n),(n)->(m)`` and
+``(n),(n)->()``.
+
+``np.clip`` and the ``clip`` method check for memory overlap
+------------------------------------------------------------
+The ``out`` argument to these functions is now always tested for memory overlap
+to avoid corrupted results when memory overlap occurs.
+
+New value ``unscaled`` for option ``cov`` in ``np.polyfit``
+-----------------------------------------------------------
+A further possible value has been added to the ``cov`` parameter of the
+``np.polyfit`` function. With ``cov='unscaled'`` the scaling of the covariance
+matrix is disabled completely (similar to setting ``absolute_sigma=True`` in
+``scipy.optimize.curve_fit``). This would be useful in occasions, where the
+weights are given by 1/sigma with sigma being the (known) standard errors of
+(Gaussian distributed) data points, in which case the unscaled matrix is
+already a correct estimate for the covariance matrix.
+
+Detailed docstrings for scalar numeric types
+--------------------------------------------
+The ``help`` function, when applied to numeric types such as `numpy.intc`,
+`numpy.int_`, and `numpy.longlong`, now lists all of the aliased names for that
+type, distinguishing between platform -dependent and -independent aliases.
+
+``__module__`` attribute now points to public modules
+-----------------------------------------------------
+The ``__module__`` attribute on most NumPy functions has been updated to refer
+to the preferred public module from which to access a function, rather than
+the module in which the function happens to be defined. This produces more
+informative displays for functions in tools such as IPython, e.g., instead of
+``<function 'numpy.core.fromnumeric.sum'>`` you now see
+``<function 'numpy.sum'>``.
+
+Large allocations marked as suitable for transparent hugepages
+--------------------------------------------------------------
+On systems that support transparent hugepages over the madvise system call
+numpy now marks that large memory allocations can be backed by hugepages which
+reduces page fault overhead and can in some fault heavy cases improve
+performance significantly. On Linux the setting for huge pages to be used,
+`/sys/kernel/mm/transparent_hugepage/enabled`, must be at least `madvise`.
+Systems which already have it set to `always` will not see much difference as
+the kernel will automatically use huge pages where appropriate.
+
+Users of very old Linux kernels (~3.x and older) should make sure that
+`/sys/kernel/mm/transparent_hugepage/defrag` is not set to `always` to avoid
+performance problems due concurrency issues in the memory defragmentation.
+
+Alpine Linux (and other musl c library distros) support
+-------------------------------------------------------
+We now default to use `fenv.h` for floating point status error reporting.
+Previously we had a broken default that sometimes would not report underflow,
+overflow, and invalid floating point operations. Now we can support non-glibc
+distrubutions like Alpine Linux as long as they ship `fenv.h`.
+
+Speedup ``np.block`` for large arrays
+-------------------------------------
+Large arrays (greater than ``512 * 512``) now use a blocking algorithm based on
+copying the data directly into the appropriate slice of the resulting array.
+This results in significant speedups for these large arrays, particularly for
+arrays being blocked along more than 2 dimensions.
+
+``arr.ctypes.data_as(...)`` holds a reference to arr
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Previously the caller was responsible for keeping the array alive for the
+lifetime of the pointer.
+
+Speedup ``np.take`` for read-only arrays
+----------------------------------------
+The implementation of ``np.take`` no longer makes an unnecessary copy of the
+source array when its ``writeable`` flag is set to ``False``.
+
+Support path-like objects for more functions
+--------------------------------------------
+The ``np.core.records.fromfile`` function now supports ``pathlib.Path``
+and other path-like objects in addition to a file object. Furthermore, the
+``np.load`` function now also supports path-like objects when using memory
+mapping (``mmap_mode`` keyword argument).
+
+Better behaviour of ufunc identities during reductions
+------------------------------------------------------
+Universal functions have an ``.identity`` which is used when ``.reduce`` is
+called on an empty axis.
+
+As of this release, the logical binary ufuncs, `logical_and`, `logical_or`,
+and `logical_xor`, now have ``identity`` s of type `bool`, where previously they
+were of type `int`. This restores the 1.14 behavior of getting ``bool`` s when
+reducing empty object arrays with these ufuncs, while also keeping the 1.15
+behavior of getting ``int`` s when reducing empty object arrays with arithmetic
+ufuncs like ``add`` and ``multiply``.
+
+Additionally, `logaddexp` now has an identity of ``-inf``, allowing it to be
+called on empty sequences, where previously it could not be.
+
+This is possible thanks to the new
+:c:function:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`, which allows
+arbitrary values to be used as identities now.
+
+Improved conversion from ctypes objects
+---------------------------------------
+Numpy has always supported taking a value or type from ``ctypes`` and
+converting it into an array or dtype, but only behaved correctly for simpler
+types. As of this release, this caveat is lifted - now:
+
+* The ``_pack_`` attribute of ``ctypes.Structure``, used to emulate C's
+ ``__attribute__((packed))``, is respected.
+* Endianness of all ctypes objects is preserved
+* ``ctypes.Union`` is supported
+* Non-representable constructs raise exceptions, rather than producing
+ dangerously incorrect results:
+
+ * Bitfields are no longer interpreted as sub-arrays
+ * Pointers are no longer replaced with the type that they point to
+
+A new ``ndpointer.contents`` member
+-----------------------------------
+This matches the ``.contents`` member of normal ctypes arrays, and can be used
+to construct an ``np.array`` around the pointers contents. This replaces
+``np.array(some_nd_pointer)``, which stopped working in 1.15. As a side effect
+of this change, ``ndpointer`` now supports dtypes with overlapping fields and
+padding.
+
+``matmul`` is now a ``ufunc``
+-----------------------------
+`numpy.matmul` is now a ufunc which means that both the function and the
+``__matmul__`` operator can now be overridden by ``__array_ufunc__``. Its
+implementation has also changed. It uses the same BLAS routines as
+`numpy.dot`, ensuring its performance is similar for large matrices.
+
+Start and stop arrays for ``linspace``, ``logspace`` and ``geomspace``
+----------------------------------------------------------------------
+These functions used to be limited to scalar stop and start values, but can
+now take arrays, which will be properly broadcast and result in an output
+which has one axis prepended. This can be used, e.g., to obtain linearly
+interpolated points between sets of points.
+
+CI extended with additional services
+------------------------------------
+We now use additional free CI services, thanks to the companies that provide:
+
+* Codecoverage testing via codecov.io
+* Arm testing via shippable.com
+* Additional test runs on azure pipelines
+
+These are in addition to our continued use of travis, appveyor (for wheels) and
+LGTM
+
+
+Changes
+=======
+
+Comparison ufuncs will now error rather than return NotImplemented
+------------------------------------------------------------------
+Previously, comparison ufuncs such as ``np.equal`` would return
+`NotImplemented` if their arguments had structured dtypes, to help comparison
+operators such as ``__eq__`` deal with those. This is no longer needed, as the
+relevant logic has moved to the comparison operators proper (which thus do
+continue to return `NotImplemented` as needed). Hence, like all other ufuncs,
+the comparison ufuncs will now error on structured dtypes.
+
+Positive will now raise a deprecation warning for non-numerical arrays
+----------------------------------------------------------------------
+Previously, ``+array`` unconditionally returned a copy. Now, it will
+raise a ``DeprecationWarning`` if the array is not numerical (i.e.,
+if ``np.positive(array)`` raises a ``TypeError``. For ``ndarray``
+subclasses that override the default ``__array_ufunc__`` implementation,
+the ``TypeError`` is passed on.
+
+``NDArrayOperatorsMixin`` now implements matrix multiplication
+--------------------------------------------------------------
+Previously, ``np.lib.mixins.NDArrayOperatorsMixin`` did not implement the
+special methods for Python's matrix multiplication operator (``@``). This has
+changed now that ``matmul`` is a ufunc and can be overridden using
+``__array_ufunc__``.
+
+The scaling of the covariance matrix in ``np.polyfit`` is different
+-------------------------------------------------------------------
+So far, ``np.polyfit`` used a non-standard factor in the scaling of the the
+covariance matrix. Namely, rather than using the standard ``chisq/(M-N)``, it
+scaled it with ``chisq/(M-N-2)`` where M is the number of data points and N is the
+number of parameters. This scaling is inconsistent with other fitting programs
+such as e.g. ``scipy.optimize.curve_fit`` and was changed to ``chisq/(M-N)``.
+
+``maximum`` and ``minimum`` no longer emit warnings
+---------------------------------------------------
+As part of code introduced in 1.10, ``float32`` and ``float64`` set invalid
+float status when a Nan is encountered in `numpy.maximum` and `numpy.minimum`,
+when using SSE2 semantics. This caused a `RuntimeWarning` to sometimes be
+emitted. In 1.15 we fixed the inconsistencies which caused the warnings to
+become more conspicuous. Now no warnings will be emitted.
+
+Umath and multiarray c-extension modules merged into a single module
+--------------------------------------------------------------------
+The two modules were merged, according to `NEP 15`_. Previously `np.core.umath`
+and `np.core.multiarray` were seperate c-extension modules. They are now python
+wrappers to the single `np.core/_multiarray_math` c-extension module.
+
+.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html
+
+``getfield`` validity checks extended
+-------------------------------------
+`numpy.ndarray.getfield` now checks the dtype and offset arguments to prevent
+accessing invalid memory locations.
+
+NumPy functions now support overrides with ``__array_function__``
+-----------------------------------------------------------------
+It is now possible to override the implementation of almost all NumPy functions
+on non-NumPy arrays by defining a ``__array_function__`` method, as described
+in `NEP 18`_. The sole exception are functions for explicitly casting to NumPy
+arrays such as ``np.array``. As noted in the NEP, this feature remains
+experimental and the details of how to implement such overrides may change in
+the future.
+
+.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html
+.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
+
+Arrays based off readonly buffers cannot be set ``writeable``
+-------------------------------------------------------------
+We now disallow setting the ``writeable`` flag True on arrays created
+from ``fromstring(readonly-buffer)``.
diff --git a/doc/release/1.3.0-notes.rst b/doc/release/1.3.0-notes.rst
index 3ec93e0b0..239714246 100644
--- a/doc/release/1.3.0-notes.rst
+++ b/doc/release/1.3.0-notes.rst
@@ -14,7 +14,7 @@ Python 2.6 support
Python 2.6 is now supported on all previously supported platforms, including
windows.
-http://www.python.org/dev/peps/pep-0361/
+https://www.python.org/dev/peps/pep-0361/
Generalized ufuncs
------------------
@@ -235,7 +235,7 @@ This should make the porting to new platforms easier, and more robust. In
particular, the configuration stage does not need to execute any code on the
target platform, which is a first step toward cross-compilation.
-http://numpy.github.io/neps/math_config_clean.html
+https://www.numpy.org/neps/nep-0003-math_config_clean.html
umath refactor
--------------
@@ -247,7 +247,7 @@ Improvements to build warnings
Numpy can now build with -W -Wall without warnings
-http://numpy.github.io/neps/warnfix.html
+https://www.numpy.org/neps/nep-0002-warnfix.html
Separate core math library
--------------------------
diff --git a/doc/release/1.7.0-notes.rst b/doc/release/1.7.0-notes.rst
index 72aab4d4f..f111f80dc 100644
--- a/doc/release/1.7.0-notes.rst
+++ b/doc/release/1.7.0-notes.rst
@@ -101,7 +101,7 @@ to NumPy 1.6:
The notes in `doc/source/reference/arrays.datetime.rst <https://github.com/numpy/numpy/blob/maintenance/1.7.x/doc/source/reference/arrays.datetime.rst>`_
(also available in the online docs at `arrays.datetime.html
-<http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html>`_) should be
+<https://docs.scipy.org/doc/numpy/reference/arrays.datetime.html>`_) should be
consulted for more details.
Custom formatter for printing arrays
@@ -280,9 +280,9 @@ The macros in old_defines.h are deprecated and will be removed in the next
major release (>= 2.0). The sed script tools/replace_old_macros.sed can be
used to replace these macros with the newer versions.
-You can test your code against the deprecated C API by #defining
-NPY_NO_DEPRECATED_API to the target version number, for example
-NPY_1_7_API_VERSION, before including any NumPy headers.
+You can test your code against the deprecated C API by adding a line
+composed of ``#define NPY_NO_DEPRECATED_API`` and the target version number,
+such as ``NPY_1_7_API_VERSION``, before including any NumPy headers.
The ``NPY_CHAR`` member of the ``NPY_TYPES`` enum is deprecated and will be
removed in NumPy 1.8. See the discussion at
diff --git a/doc/release/template.rst b/doc/release/template.rst
new file mode 100644
index 000000000..db9458ac1
--- /dev/null
+++ b/doc/release/template.rst
@@ -0,0 +1,43 @@
+==========================
+NumPy 1.xx.x Release Notes
+==========================
+
+
+Highlights
+==========
+
+
+New functions
+=============
+
+
+New deprecations
+================
+
+
+Expired deprecations
+====================
+
+
+Future changes
+==============
+
+
+Compatibility notes
+===================
+
+
+C API changes
+=============
+
+
+New Features
+============
+
+
+Improvements
+============
+
+
+Changes
+=======
diff --git a/doc/release/time_based_proposal.rst b/doc/release/time_based_proposal.rst
index 555be6863..2eb13562d 100644
--- a/doc/release/time_based_proposal.rst
+++ b/doc/release/time_based_proposal.rst
@@ -123,7 +123,7 @@ References
* Proposed schedule for Gnome from Havoc Pennington (one of the core
GTK and Gnome manager):
- http://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html
+ https://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html
The proposed schedule is heavily based on this email
- * http://live.gnome.org/ReleasePlanning/Freezes
+ * https://wiki.gnome.org/ReleasePlanning/Freezes
diff --git a/doc/source/_templates/autosummary/attribute.rst b/doc/source/_templates/autosummary/attribute.rst
new file mode 100644
index 000000000..a6ed600ef
--- /dev/null
+++ b/doc/source/_templates/autosummary/attribute.rst
@@ -0,0 +1,10 @@
+:orphan:
+
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+attribute
+
+.. auto{{ objtype }}:: {{ objname }}
+
diff --git a/doc/source/_templates/autosummary/member.rst b/doc/source/_templates/autosummary/member.rst
new file mode 100644
index 000000000..f1f30e123
--- /dev/null
+++ b/doc/source/_templates/autosummary/member.rst
@@ -0,0 +1,11 @@
+:orphan:
+
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+member
+
+.. auto{{ objtype }}:: {{ objname }}
+
+
diff --git a/doc/source/_templates/autosummary/method.rst b/doc/source/_templates/autosummary/method.rst
new file mode 100644
index 000000000..8abda8677
--- /dev/null
+++ b/doc/source/_templates/autosummary/method.rst
@@ -0,0 +1,10 @@
+:orphan:
+
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+method
+
+.. auto{{ objtype }}:: {{ objname }}
+
diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html
index fbd8930ae..008eaaa7c 100644
--- a/doc/source/_templates/indexcontent.html
+++ b/doc/source/_templates/indexcontent.html
@@ -36,7 +36,7 @@
<td width="50%">
<p class="biglink"><a class="biglink" href="{{ pathto("bugs") }}">Reporting bugs</a></p>
<p class="biglink"><a class="biglink" href="{{ pathto("about") }}">About NumPy</a></p>
- <p class="biglink"><a class="biglink" href="http://www.numpy.org/neps/index.html">
+ <p class="biglink"><a class="biglink" href="https://www.numpy.org/neps/index.html">
NumPy Enhancement Proposals</a><br/>
</td><td width="50%">
<p class="biglink"><a class="biglink" href="{{ pathto("release") }}">Release Notes</a></p>
diff --git a/doc/source/_templates/indexsidebar.html b/doc/source/_templates/indexsidebar.html
index 9edb003af..51e7c4308 100644
--- a/doc/source/_templates/indexsidebar.html
+++ b/doc/source/_templates/indexsidebar.html
@@ -1,4 +1,4 @@
<h3>Resources</h3>
<ul>
- <li><a href="http://scipy.org/">Scipy.org website</a></li>
+ <li><a href="https://scipy.org/">Scipy.org website</a></li>
</ul>
diff --git a/doc/source/about.rst b/doc/source/about.rst
index 776488ea4..5ac4facbb 100644
--- a/doc/source/about.rst
+++ b/doc/source/about.rst
@@ -18,9 +18,7 @@ data types can be defined. This allows *NumPy* to seamlessly and
speedily integrate with a wide variety of databases.
NumPy is a successor for two earlier scientific Python libraries:
-NumPy derives from the old *Numeric* code base and can be used
-as a replacement for *Numeric*. It also adds the features introduced
-by *Numarray* and can also be used to replace *Numarray*.
+Numeric and Numarray.
NumPy community
---------------
@@ -32,13 +30,13 @@ even better, contact us and participate in fixing the problem.
Our main means of communication are:
-- `scipy.org website <http://scipy.org/>`__
+- `scipy.org website <https://scipy.org/>`__
-- `Mailing lists <http://scipy.org/Mailing_Lists>`__
+- `Mailing lists <https://scipy.org/scipylib/mailing-lists.html>`__
- `NumPy Issues <https://github.com/numpy/numpy/issues>`__ (bug reports go here)
-- `Old NumPy Trac <http://projects.scipy.org/numpy>`__ (no longer used)
+- `Old NumPy Trac <http://projects.scipy.org/numpy>`__ (dead link)
More information about the development of NumPy can be found at our `Developer Zone <https://scipy.scipy.org/scipylib/dev-zone.html>`__.
diff --git a/doc/source/bugs.rst b/doc/source/bugs.rst
index 950934b14..304a4136a 100644
--- a/doc/source/bugs.rst
+++ b/doc/source/bugs.rst
@@ -5,7 +5,7 @@ Reporting bugs
File bug reports or feature requests, and make contributions
(e.g. code patches), by opening a "new issue" on GitHub:
-- NumPy Issues: http://github.com/numpy/numpy/issues
+- NumPy Issues: https://github.com/numpy/numpy/issues
Please give as much information as you can in the ticket. It is extremely
useful if you can supply a small self-contained code snippet that reproduces
@@ -15,5 +15,5 @@ the milestone.
Report bugs to the appropriate GitHub project (there is one for NumPy
and a different one for SciPy).
-More information can be found on the http://scipy.org/Developer_Zone
-website.
+More information can be found on the
+https://www.scipy.org/scipylib/dev-zone.html website.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 5ba0cfe63..072a3b44e 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -39,7 +39,7 @@ source_suffix = '.rst'
# General substitutions.
project = 'NumPy'
-copyright = '2008-2018, The SciPy community'
+copyright = '2008-2019, The SciPy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
@@ -104,8 +104,8 @@ if 'scipyorg' in tags:
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
- "rootlinks": [("http://scipy.org/", "Scipy.org"),
- ("http://docs.scipy.org/", "Docs")]
+ "rootlinks": [("https://scipy.org/", "Scipy.org"),
+ ("https://docs.scipy.org/", "Docs")]
}
else:
# Default build
@@ -350,8 +350,8 @@ def linkcode_resolve(domain, info):
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
- return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
+ return "https://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
- return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
+ return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
diff --git a/doc/source/contents.rst b/doc/source/contents.rst
index fad9be76e..019dcc71d 100644
--- a/doc/source/contents.rst
+++ b/doc/source/contents.rst
@@ -8,6 +8,7 @@ NumPy manual contents
reference/index
f2py/index
dev/index
+ docs/index
release
about
bugs
diff --git a/doc/source/dev/conduct/code_of_conduct.rst b/doc/source/dev/conduct/code_of_conduct.rst
new file mode 100644
index 000000000..aca39d8a7
--- /dev/null
+++ b/doc/source/dev/conduct/code_of_conduct.rst
@@ -0,0 +1,163 @@
+NumPy Code of Conduct
+=====================
+
+
+Introduction
+------------
+
+This code of conduct applies to all spaces managed by the NumPy project,
+including all public and private mailing lists, issue trackers, wikis, blogs,
+Twitter, and any other communication channel used by our community. The NumPy
+project does not organise in-person events, however events related to our
+community should have a code of conduct similar in spirit to this one.
+
+This code of conduct should be honored by everyone who participates in
+the NumPy community formally or informally, or claims any affiliation with the
+project, in any project-related activities and especially when representing the
+project, in any role.
+
+This code is not exhaustive or complete. It serves to distill our common
+understanding of a collaborative, shared environment and goals. Please try to
+follow this code in spirit as much as in letter, to create a friendly and
+productive environment that enriches the surrounding community.
+
+
+Specific Guidelines
+-------------------
+
+We strive to:
+
+1. Be open. We invite anyone to participate in our community. We prefer to use
+ public methods of communication for project-related messages, unless
+ discussing something sensitive. This applies to messages for help or
+ project-related support, too; not only is a public support request much more
+ likely to result in an answer to a question, it also ensures that any
+ inadvertent mistakes in answering are more easily detected and corrected.
+
+2. Be empathetic, welcoming, friendly, and patient. We work together to resolve
+ conflict, and assume good intentions. We may all experience some frustration
+ from time to time, but we do not allow frustration to turn into a personal
+ attack. A community where people feel uncomfortable or threatened is not a
+ productive one.
+
+3. Be collaborative. Our work will be used by other people, and in turn we will
+ depend on the work of others. When we make something for the benefit of the
+ project, we are willing to explain to others how it works, so that they can
+ build on the work to make it even better. Any decision we make will affect
+ users and colleagues, and we take those consequences seriously when making
+ decisions.
+
+4. Be inquisitive. Nobody knows everything! Asking questions early avoids many
+ problems later, so we encourage questions, although we may direct them to
+ the appropriate forum. We will try hard to be responsive and helpful.
+
+5. Be careful in the words that we choose. We are careful and respectful in
+ our communication and we take responsibility for our own speech. Be kind to
+ others. Do not insult or put down other participants. We will not accept
+ harassment or other exclusionary behaviour, such as:
+
+ - Violent threats or language directed against another person.
+ - Sexist, racist, or otherwise discriminatory jokes and language.
+ - Posting sexually explicit or violent material.
+ - Posting (or threatening to post) other people's personally identifying information ("doxing").
+ - Sharing private content, such as emails sent privately or non-publicly,
+ or unlogged forums such as IRC channel history, without the sender's consent.
+ - Personal insults, especially those using racist or sexist terms.
+ - Unwelcome sexual attention.
+ - Excessive profanity. Please avoid swearwords; people differ greatly in their sensitivity to swearing.
+ - Repeated harassment of others. In general, if someone asks you to stop, then stop.
+ - Advocating for, or encouraging, any of the above behaviour.
+
+
+Diversity Statement
+-------------------
+
+The NumPy project welcomes and encourages participation by everyone. We are
+committed to being a community that everyone enjoys being part of. Although
+we may not always be able to accommodate each individual's preferences, we try
+our best to treat everyone kindly.
+
+No matter how you identify yourself or how others perceive you: we welcome you.
+Though no list can hope to be comprehensive, we explicitly honour diversity in:
+age, culture, ethnicity, genotype, gender identity or expression, language,
+national origin, neurotype, phenotype, political beliefs, profession, race,
+religion, sexual orientation, socioeconomic status, subculture and technical
+ability, to the extent that these do not conflict with this code of conduct.
+
+
+Though we welcome people fluent in all languages, NumPy development is
+conducted in English.
+
+Standards for behaviour in the NumPy community are detailed in the Code of
+Conduct above. Participants in our community should uphold these standards
+in all their interactions and help others to do so as well (see next section).
+
+
+Reporting Guidelines
+--------------------
+
+We know that it is painfully common for internet communication to start at or
+devolve into obvious and flagrant abuse. We also recognize that sometimes
+people may have a bad day, or be unaware of some of the guidelines in this Code
+of Conduct. Please keep this in mind when deciding on how to respond to a
+breach of this Code.
+
+For clearly intentional breaches, report those to the Code of Conduct committee
+(see below). For possibly unintentional breaches, you may reply to the person
+and point out this code of conduct (either in public or in private, whatever is
+most appropriate). If you would prefer not to do that, please feel free to
+report to the Code of Conduct Committee directly, or ask the Committee for
+advice, in confidence.
+
+You can report issues to the NumPy Code of Conduct committee, at
+numpy-conduct@googlegroups.com. Currently, the committee consists of:
+
+- Stefan van der Walt
+- Nathaniel J. Smith
+- Ralf Gommers
+
+If your report involves any members of the committee, or if they feel they have
+a conflict of interest in handling it, then they will recuse themselves from
+considering your report. Alternatively, if for any reason you feel
+uncomfortable making a report to the committee, then you can also contact:
+
+- Senior `NumFOCUS staff <https://numfocus.org/code-of-conduct#persons-responsible>`__: conduct@numfocus.org
+
+
+Incident reporting resolution & Code of Conduct enforcement
+-----------------------------------------------------------
+
+*This section summarizes the most important points, more details can be found
+in* :ref:`CoC_reporting_manual`.
+
+We will investigate and respond to all complaints. The NumPy Code of Conduct
+Committee and the NumPy Steering Committee (if involved) will protect the
+identity of the reporter, and treat the content of complaints as confidential
+(unless the reporter agrees otherwise).
+
+In case of severe and obvious breaches, e.g. personal threat or violent, sexist
+or racist language, we will immediately disconnect the originator from NumPy
+communication channels; please see the manual for details.
+
+In cases not involving clear severe and obvious breaches of this code of
+conduct, the process for acting on any received code of conduct violation
+report will be:
+
+1. acknowledge report is received
+2. reasonable discussion/feedback
+3. mediation (if feedback didn't help, and only if both reporter and reportee agree to this)
+4. enforcement via transparent decision (see :ref:`CoC_resolutions`) by the
+ Code of Conduct Committee
+
+The committee will respond to any report as soon as possible, and at most
+within 72 hours.
+
+
+Endnotes
+--------
+
+We are thankful to the groups behind the following documents, from which we
+drew content and inspiration:
+
+- `The SciPy Code of Conduct <https://docs.scipy.org/doc/scipy/reference/dev/conduct/code_of_conduct.html>`_
+
diff --git a/doc/source/dev/conduct/report_handling_manual.rst b/doc/source/dev/conduct/report_handling_manual.rst
new file mode 100644
index 000000000..d39b615bb
--- /dev/null
+++ b/doc/source/dev/conduct/report_handling_manual.rst
@@ -0,0 +1,220 @@
+:orphan:
+
+.. _CoC_reporting_manual:
+
+NumPy Code of Conduct - How to follow up on a report
+----------------------------------------------------
+
+This is the manual followed by NumPy's Code of Conduct Committee. It's used
+when we respond to an issue to make sure we're consistent and fair.
+
+Enforcing the Code of Conduct impacts our community today and for the future.
+It's an action that we do not take lightly. When reviewing enforcement
+measures, the Code of Conduct Committee will keep the following values and
+guidelines in mind:
+
+* Act in a personal manner rather than impersonal. The Committee can engage
+ the parties to understand the situation, while respecting the privacy and any
+ necessary confidentiality of reporters. However, sometimes it is necessary
+ to communicate with one or more individuals directly: the Committee's goal is
+ to improve the health of our community rather than only produce a formal
+ decision.
+
+* Emphasize empathy for individuals rather than judging behavior, avoiding
+ binary labels of "good" and "bad/evil". Overt, clear-cut aggression and
+ harassment exists and we will be address that firmly. But many scenarios
+ that can prove challenging to resolve are those where normal disagreements
+ devolve into unhelpful or harmful behavior from multiple parties.
+ Understanding the full context and finding a path that re-engages all is
+ hard, but ultimately the most productive for our community.
+
+* We understand that email is a difficult medium and can be isolating.
+ Receiving criticism over email, without personal contact, can be
+ particularly painful. This makes it especially important to keep an
+ atmosphere of open-minded respect of the views of others. It also means
+ that we must be transparent in our actions, and that we will do everything
+ in our power to make sure that all our members are treated fairly and with
+ sympathy.
+
+* Discrimination can be subtle and it can be unconscious. It can show itself
+ as unfairness and hostility in otherwise ordinary interactions. We know
+ that this does occur, and we will take care to look out for it. We would
+ very much like to hear from you if you feel you have been treated unfairly,
+ and we will use these procedures to make sure that your complaint is heard
+ and addressed.
+
+* Help increase engagement in good discussion practice: try to identify where
+ discussion may have broken down and provide actionable information, pointers
+ and resources that can lead to positive change on these points.
+
+* Be mindful of the needs of new members: provide them with explicit support
+ and consideration, with the aim of increasing participation from
+ underrepresented groups in particular.
+
+* Individuals come from different cultural backgrounds and native languages.
+ Try to identify any honest misunderstandings caused by a non-native speaker
+ and help them understand the issue and what they can change to avoid causing
+ offence. Complex discussion in a foreign language can be very intimidating,
+ and we want to grow our diversity also across nationalities and cultures.
+
+*Mediation*: voluntary, informal mediation is a tool at our disposal. In
+contexts such as when two or more parties have all escalated to the point of
+inappropriate behavior (something sadly common in human conflict), it may be
+useful to facilitate a mediation process. This is only an example: the
+Committee can consider mediation in any case, mindful that the process is meant
+to be strictly voluntary and no party can be pressured to participate. If the
+Committee suggests mediation, it should:
+
+* Find a candidate who can serve as a mediator.
+* Obtain the agreement of the reporter(s). The reporter(s) have complete
+ freedom to decline the mediation idea, or to propose an alternate mediator.
+* Obtain the agreement of the reported person(s).
+* Settle on the mediator: while parties can propose a different mediator than
+ the suggested candidate, only if common agreement is reached on all terms can
+ the process move forward.
+* Establish a timeline for mediation to complete, ideally within two weeks.
+
+The mediator will engage with all the parties and seek a resolution that is
+satisfactory to all. Upon completion, the mediator will provide a report
+(vetted by all parties to the process) to the Committee, with recommendations
+on further steps. The Committee will then evaluate these results (whether
+satisfactory resolution was achieved or not) and decide on any additional
+action deemed necessary.
+
+
+How the committee will respond to reports
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the committee (or a committee member) receives a report, they will first
+determine whether the report is about a clear and severe breach (as defined
+below). If so, immediate action needs to be taken in addition to the regular
+report handling process.
+
+Clear and severe breach actions
++++++++++++++++++++++++++++++++
+
+We know that it is painfully common for internet communication to start at or
+devolve into obvious and flagrant abuse. We will deal quickly with clear and
+severe breaches like personal threats, violent, sexist or racist language.
+
+When a member of the Code of Conduct committee becomes aware of a clear and
+severe breach, they will do the following:
+
+* Immediately disconnect the originator from all NumPy communication channels.
+* Reply to the reporter that their report has been received and that the
+ originator has been disconnected.
+* In every case, the moderator should make a reasonable effort to contact the
+ originator, and tell them specifically how their language or actions
+ qualify as a "clear and severe breach". The moderator should also say
+ that, if the originator believes this is unfair or they want to be
+ reconnected to NumPy, they have the right to ask for a review, as below, by
+ the Code of Conduct Committee.
+ The moderator should copy this explanation to the Code of Conduct Committee.
+* The Code of Conduct Committee will formally review and sign off on all cases
+ where this mechanism has been applied to make sure it is not being used to
+ control ordinary heated disagreement.
+
+Report handling
++++++++++++++++
+
+When a report is sent to the committee they will immediately reply to the
+reporter to confirm receipt. This reply must be sent within 72 hours, and the
+group should strive to respond much quicker than that.
+
+If a report doesn't contain enough information, the committee will obtain all
+relevant data before acting. The committee is empowered to act on the Steering
+Council’s behalf in contacting any individuals involved to get a more complete
+account of events.
+
+The committee will then review the incident and determine, to the best of their
+ability:
+
+* What happened.
+* Whether this event constitutes a Code of Conduct violation.
+* Who are the responsible party(ies).
+* Whether this is an ongoing situation, and there is a threat to anyone's
+ physical safety.
+
+This information will be collected in writing, and whenever possible the
+group's deliberations will be recorded and retained (i.e. chat transcripts,
+email discussions, recorded conference calls, summaries of voice conversations,
+etc).
+
+It is important to retain an archive of all activities of this committee to
+ensure consistency in behavior and provide institutional memory for the
+project. To assist in this, the default channel of discussion for this
+committee will be a private mailing list accessible to current and future
+members of the committee as well as members of the Steering Council upon
+justified request. If the Committee finds the need to use off-list
+communications (e.g. phone calls for early/rapid response), it should in all
+cases summarize these back to the list so there's a good record of the process.
+
+The Code of Conduct Committee should aim to have a resolution agreed upon within
+two weeks. In the event that a resolution can't be determined in that time, the
+committee will respond to the reporter(s) with an update and projected timeline
+for resolution.
+
+
+.. _CoC_resolutions:
+
+Resolutions
+~~~~~~~~~~~
+
+The committee must agree on a resolution by consensus. If the group cannot reach
+consensus and deadlocks for over a week, the group will turn the matter over to
+the Steering Council for resolution.
+
+
+Possible responses may include:
+
+* Taking no further action
+
+ - if we determine no violations have occurred.
+ - if the matter has been resolved publicly while the committee was considering responses.
+
+* Coordinating voluntary mediation: if all involved parties agree, the
+ Committee may facilitate a mediation process as detailed above.
+* Remind publicly, and point out that some behavior/actions/language have been
+ judged inappropriate and why in the current context, or can but hurtful to
+ some people, requesting the community to self-adjust.
+* A private reprimand from the committee to the individual(s) involved. In this
+ case, the group chair will deliver that reprimand to the individual(s) over
+ email, cc'ing the group.
+* A public reprimand. In this case, the committee chair will deliver that
+ reprimand in the same venue that the violation occurred, within the limits of
+ practicality. E.g., the original mailing list for an email violation, but
+ for a chat room discussion where the person/context may be gone, they can be
+ reached by other means. The group may choose to publish this message
+ elsewhere for documentation purposes.
+* A request for a public or private apology, assuming the reporter agrees to
+ this idea: they may at their discretion refuse further contact with the
+ violator. The chair will deliver this request. The committee may, if it
+ chooses, attach "strings" to this request: for example, the group may ask a
+ violator to apologize in order to retain one’s membership on a mailing list.
+* A "mutually agreed upon hiatus" where the committee asks the individual to
+ temporarily refrain from community participation. If the individual chooses
+ not to take a temporary break voluntarily, the committee may issue a
+ "mandatory cooling off period".
+* A permanent or temporary ban from some or all NumPy spaces (mailing lists,
+ gitter.im, etc.). The group will maintain records of all such bans so that
+ they may be reviewed in the future or otherwise maintained.
+
+Once a resolution is agreed upon, but before it is enacted, the committee will
+contact the original reporter and any other affected parties and explain the
+proposed resolution. The committee will ask if this resolution is acceptable,
+and must note feedback for the record.
+
+Finally, the committee will make a report to the NumPy Steering Council (as
+well as the NumPy core team in the event of an ongoing resolution, such as a
+ban).
+
+The committee will never publicly discuss the issue; all public statements will
+be made by the chair of the Code of Conduct Committee or the NumPy Steering
+Council.
+
+
+Conflicts of Interest
+~~~~~~~~~~~~~~~~~~~~~
+
+In the event of any conflict of interest, a committee member must immediately
+notify the other members, and recuse themselves if necessary.
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index 0c9f307f8..aa4326f63 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -59,7 +59,7 @@ For development, you can set up an in-place build so that changes made to
This allows you to import the in-place built NumPy *from the repo base
directory only*. If you want the in-place build to be visible outside that
base dir, you need to point your ``PYTHONPATH`` environment variable to this
-directory. Some IDEs (Spyder for example) have utilities to manage
+directory. Some IDEs (`Spyder`_ for example) have utilities to manage
``PYTHONPATH``. On Linux and OSX, you can run the command::
$ export PYTHONPATH=$PWD
@@ -78,6 +78,8 @@ installs a ``.egg-link`` file into your site-packages as well as adjusts the
``easy-install.pth`` there, so its a more permanent (and magical) operation.
+.. _Spyder: https://www.spyder-ide.org/
+
Other build options
-------------------
@@ -214,13 +216,13 @@ Understanding the code & getting started
----------------------------------------
The best strategy to better understand the code base is to pick something you
-want to change and start reading the code to figure out how it works. When in
+want to change and start reading the code to figure out how it works. When in
doubt, you can ask questions on the mailing list. It is perfectly okay if your
-pull requests aren't perfect, the community is always happy to help. As a
-volunteer project, things do sometimes get dropped and it's totally fine to
+pull requests aren't perfect, the community is always happy to help. As a
+volunteer project, things do sometimes get dropped and it's totally fine to
ping us if something has sat without a response for about two to four weeks.
-So go ahead and pick something that annoys or confuses you about numpy,
-experiment with the code, hang around for discussions or go through the
-reference documents to try to fix it. Things will fall in place and soon
+So go ahead and pick something that annoys or confuses you about numpy,
+experiment with the code, hang around for discussions or go through the
+reference documents to try to fix it. Things will fall in place and soon
you'll have a pretty good understanding of the project as a whole. Good Luck!
diff --git a/doc/source/dev/gitwash/development_workflow.rst b/doc/source/dev/gitwash/development_workflow.rst
index c6884a7cf..9561e25f7 100644
--- a/doc/source/dev/gitwash/development_workflow.rst
+++ b/doc/source/dev/gitwash/development_workflow.rst
@@ -374,7 +374,7 @@ Deleting a branch on github_
git push origin :my-unwanted-branch
(Note the colon ``:`` before ``test-branch``. See also:
-http://github.com/guides/remove-a-remote-branch
+https://github.com/guides/remove-a-remote-branch
Several people sharing a single repository
@@ -387,7 +387,7 @@ share it via github_.
First fork NumPy into your account, as from :ref:`forking`.
Then, go to your forked repository github page, say
-``http://github.com/your-user-name/numpy``
+``https://github.com/your-user-name/numpy``
Click on the 'Admin' button, and add anyone else to the repo as a
collaborator:
diff --git a/doc/source/dev/gitwash/git_links.inc b/doc/source/dev/gitwash/git_links.inc
index 30532da99..cebbb3a67 100644
--- a/doc/source/dev/gitwash/git_links.inc
+++ b/doc/source/dev/gitwash/git_links.inc
@@ -9,57 +9,57 @@
nipy, NIPY, Nipy, etc...
.. git stuff
-.. _git: http://git-scm.com/
-.. _github: http://github.com
-.. _github help: http://help.github.com
-.. _msysgit: http://code.google.com/p/msysgit/downloads/list
-.. _git-osx-installer: http://code.google.com/p/git-osx-installer/downloads/list
+.. _git: https://git-scm.com/
+.. _github: https://github.com
+.. _github help: https://help.github.com
+.. _msysgit: https://code.google.com/p/msysgit/downloads/list
+.. _git-osx-installer: https://code.google.com/p/git-osx-installer/downloads/list
.. _subversion: http://subversion.tigris.org/
-.. _git cheat sheet: http://github.com/guides/git-cheat-sheet
-.. _pro git book: http://progit.org/
-.. _git svn crash course: http://git-scm.com/course/svn.html
-.. _learn.github: http://learn.github.com/
-.. _network graph visualizer: http://github.com/blog/39-say-hello-to-the-network-graph-visualizer
-.. _git user manual: http://www.kernel.org/pub/software/scm/git/docs/user-manual.html
-.. _git tutorial: http://www.kernel.org/pub/software/scm/git/docs/gittutorial.html
-.. _git community book: http://book.git-scm.com/
+.. _git cheat sheet: http://cheat.errtheblog.com/s/git
+.. _pro git book: https://git-scm.com/book/
+.. _git svn crash course: https://git-scm.com/course/svn.html
+.. _learn.github: https://learn.github.com/
+.. _network graph visualizer: https://github.com/blog/39-say-hello-to-the-network-graph-visualizer
+.. _git user manual: https://www.kernel.org/pub/software/scm/git/docs/user-manual.html
+.. _git tutorial: https://www.kernel.org/pub/software/scm/git/docs/gittutorial.html
+.. _git community book: https://book.git-scm.com/
.. _git ready: http://www.gitready.com/
.. _git casts: http://www.gitcasts.com/
.. _Fernando's git page: http://www.fperez.org/py4science/git.html
.. _git magic: http://www-cs-students.stanford.edu/~blynn/gitmagic/index.html
.. _git concepts: http://www.eecs.harvard.edu/~cduan/technical/git/
-.. _git clone: http://www.kernel.org/pub/software/scm/git/docs/git-clone.html
-.. _git checkout: http://www.kernel.org/pub/software/scm/git/docs/git-checkout.html
-.. _git commit: http://www.kernel.org/pub/software/scm/git/docs/git-commit.html
-.. _git push: http://www.kernel.org/pub/software/scm/git/docs/git-push.html
-.. _git pull: http://www.kernel.org/pub/software/scm/git/docs/git-pull.html
-.. _git add: http://www.kernel.org/pub/software/scm/git/docs/git-add.html
-.. _git status: http://www.kernel.org/pub/software/scm/git/docs/git-status.html
-.. _git diff: http://www.kernel.org/pub/software/scm/git/docs/git-diff.html
-.. _git log: http://www.kernel.org/pub/software/scm/git/docs/git-log.html
-.. _git branch: http://www.kernel.org/pub/software/scm/git/docs/git-branch.html
-.. _git remote: http://www.kernel.org/pub/software/scm/git/docs/git-remote.html
-.. _git config: http://www.kernel.org/pub/software/scm/git/docs/git-config.html
+.. _git clone: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html
+.. _git checkout: https://www.kernel.org/pub/software/scm/git/docs/git-checkout.html
+.. _git commit: https://www.kernel.org/pub/software/scm/git/docs/git-commit.html
+.. _git push: https://www.kernel.org/pub/software/scm/git/docs/git-push.html
+.. _git pull: https://www.kernel.org/pub/software/scm/git/docs/git-pull.html
+.. _git add: https://www.kernel.org/pub/software/scm/git/docs/git-add.html
+.. _git status: https://www.kernel.org/pub/software/scm/git/docs/git-status.html
+.. _git diff: https://www.kernel.org/pub/software/scm/git/docs/git-diff.html
+.. _git log: https://www.kernel.org/pub/software/scm/git/docs/git-log.html
+.. _git branch: https://www.kernel.org/pub/software/scm/git/docs/git-branch.html
+.. _git remote: https://www.kernel.org/pub/software/scm/git/docs/git-remote.html
+.. _git config: https://www.kernel.org/pub/software/scm/git/docs/git-config.html
.. _why the -a flag?: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
.. _git staging area: http://www.gitready.com/beginner/2009/01/18/the-staging-area.html
-.. _tangled working copy problem: http://tomayko.com/writings/the-thing-about-git
+.. _tangled working copy problem: https://tomayko.com/writings/the-thing-about-git
.. _git management: http://kerneltrap.org/Linux/Git_Management
-.. _linux git workflow: http://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
+.. _linux git workflow: https://www.mail-archive.com/dri-devel@lists.sourceforge.net/msg39091.html
.. _ipython git workflow: http://mail.python.org/pipermail/ipython-dev/2010-October/006746.html
.. _git parable: http://tom.preston-werner.com/2009/05/19/the-git-parable.html
.. _git foundation: http://matthew-brett.github.com/pydagogue/foundation.html
.. _numpy/master: https://github.com/numpy/numpy
.. _git cherry-pick: https://www.kernel.org/pub/software/scm/git/docs/git-cherry-pick.html
.. _git blame: https://www.kernel.org/pub/software/scm/git/docs/git-blame.html
-.. _this blog post: http://github.com/blog/612-introducing-github-compare-view
-.. _this article on merging conflicts: http://git-scm.com/book/en/Git-Branching-Basic-Branching-and-Merging#Basic-Merge-Conflicts
+.. _this blog post: https://github.com/blog/612-introducing-github-compare-view
+.. _this article on merging conflicts: https://git-scm.com/book/en/Git-Branching-Basic-Branching-and-Merging#Basic-Merge-Conflicts
.. _learn git: https://www.atlassian.com/git/tutorials/
.. _filing pull requests: https://help.github.com/articles/using-pull-requests/#initiating-the-pull-request
.. _pull request review: https://help.github.com/articles/using-pull-requests/#reviewing-the-pull-request
.. other stuff
-.. _python: http://www.python.org
-.. _NumPy: http://www.numpy.org
-.. _`NumPy github`: http://github.com/numpy/numpy
-.. _`NumPy mailing list`: http://scipy.org/Mailing_Lists
+.. _python: https://www.python.org
+.. _NumPy: https://www.numpy.org
+.. _`NumPy github`: https://github.com/numpy/numpy
+.. _`NumPy mailing list`: https://scipy.org/scipylib/mailing-lists.html
diff --git a/doc/source/dev/gitwash_links.txt b/doc/source/dev/gitwash_links.txt
index f9536828c..36ca0b65f 100644
--- a/doc/source/dev/gitwash_links.txt
+++ b/doc/source/dev/gitwash_links.txt
@@ -1,3 +1,3 @@
-.. _NumPy: http://www.numpy.org
-.. _`NumPy github`: http://github.com/numpy/numpy
-.. _`NumPy mailing list`: http://scipy.org/Mailing_Lists
+.. _NumPy: https://www.numpy.org
+.. _`NumPy github`: https://github.com/numpy/numpy
+.. _`NumPy mailing list`: https://scipy.org/scipylib/mailing-lists.html
diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst
index b22852a5a..7b8d3cab0 100644
--- a/doc/source/dev/governance/people.rst
+++ b/doc/source/dev/governance/people.rst
@@ -28,6 +28,8 @@ Steering council
* Allan Haldane
+* Stefan van der Walt
+
Emeritus members
----------------
@@ -54,7 +56,7 @@ NumFOCUS Subcommittee
Institutional Partners
----------------------
-* UC Berkeley (Nathaniel Smith)
+* UC Berkeley (Stefan van der Walt)
Document history
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index 543194119..825b93b53 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -5,8 +5,11 @@ Contributing to NumPy
.. toctree::
:maxdepth: 3
+ conduct/code_of_conduct
gitwash/index
development_environment
+ style_guide
+ releasing
governance/index
For core developers: see :ref:`development-workflow`.
diff --git a/doc/source/dev/releasing.rst b/doc/source/dev/releasing.rst
new file mode 100644
index 000000000..61fa19514
--- /dev/null
+++ b/doc/source/dev/releasing.rst
@@ -0,0 +1,16 @@
+===================
+Releasing a Version
+===================
+
+------------------------
+How to Prepare a Release
+------------------------
+
+.. include:: ../../HOWTO_RELEASE.rst.txt
+
+-----------------------
+Step-by-Step Directions
+-----------------------
+
+.. include:: ../../RELEASE_WALKTHROUGH.rst.txt
+
diff --git a/doc/source/dev/style_guide.rst b/doc/source/dev/style_guide.rst
new file mode 100644
index 000000000..bede3424d
--- /dev/null
+++ b/doc/source/dev/style_guide.rst
@@ -0,0 +1,8 @@
+.. _style_guide:
+
+===================
+NumPy C Style Guide
+===================
+
+.. include:: ../../C_STYLE_GUIDE.rst.txt
+ :start-line: 4
diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst
index 383bed96d..cdf490c37 100644
--- a/doc/source/docs/howto_build_docs.rst
+++ b/doc/source/docs/howto_build_docs.rst
@@ -10,11 +10,11 @@ documentation for NumPy. You will need Sphinx 1.0.1 or newer.
If you only want to get the documentation, note that pre-built
versions can be found at
- http://docs.scipy.org/
+ https://docs.scipy.org/
in several different formats.
-.. _Sphinx: http://sphinx.pocoo.org
+.. _Sphinx: http://www.sphinx-doc.org/
Instructions
@@ -66,11 +66,11 @@ which will rebuild NumPy, install it to a temporary location, and
build the documentation in all formats. This will most likely again
only work on Unix platforms.
-The documentation for NumPy distributed at http://docs.scipy.org in html and
+The documentation for NumPy distributed at https://docs.scipy.org in html and
pdf format is also built with ``make dist``. See `HOWTO RELEASE`_ for details on
-how to update http://docs.scipy.org.
+how to update https://docs.scipy.org.
-.. _Matplotlib: http://matplotlib.org/
+.. _Matplotlib: https://matplotlib.org/
.. _HOWTO RELEASE: https://github.com/numpy/numpy/blob/master/doc/HOWTO_RELEASE.rst.txt
Sphinx extensions
@@ -83,5 +83,5 @@ above), and are automatically enabled when building NumPy's documentation.
If you want to make use of these extensions in third-party
projects, they are available on PyPi_ as the numpydoc_ package.
-.. _PyPi: http://python.org/pypi
-.. _numpydoc: http://python.org/pypi/numpydoc
+.. _PyPi: https://pypi.org/
+.. _numpydoc: https://python.org/pypi/numpydoc
diff --git a/doc/source/docs/howto_document.rst b/doc/source/docs/howto_document.rst
index de7d06cf8..2a97a100d 100644
--- a/doc/source/docs/howto_document.rst
+++ b/doc/source/docs/howto_document.rst
@@ -4,7 +4,7 @@
A Guide to NumPy/SciPy Documentation
====================================
-When using `Sphinx <http://sphinx.pocoo.org/>`__ in combination with the
+When using `Sphinx <http://www.sphinx-doc.org/>`__ in combination with the
numpy conventions, you should use the ``numpydoc`` extension so that your
docstrings will be handled correctly. For example, Sphinx will extract the
``Parameters`` section from your docstring and convert it into a field
@@ -19,7 +19,7 @@ Some features described in this document require a recent version of
It is available from:
-* `numpydoc on PyPI <http://pypi.python.org/pypi/numpydoc>`_
+* `numpydoc on PyPI <https://pypi.python.org/pypi/numpydoc>`_
* `numpydoc on GitHub <https://github.com/numpy/numpydoc/>`_
Note that for documentation within numpy, it is not necessary to do
diff --git a/doc/source/f2py/compile_session.dat b/doc/source/f2py/compile_session.dat
index 0d8408198..5c42742be 100644
--- a/doc/source/f2py/compile_session.dat
+++ b/doc/source/f2py/compile_session.dat
@@ -1,10 +1,10 @@
->>> import f2py2e
+>>> import numpy.f2py
>>> fsource = '''
... subroutine foo
... print*, "Hello world!"
... end
... '''
->>> f2py2e.compile(fsource,modulename='hello',verbose=0)
+>>> numpy.f2py.compile(fsource, modulename='hello', verbose=0)
0
>>> import hello
>>> hello.foo()
diff --git a/doc/source/f2py/getting-started.rst b/doc/source/f2py/getting-started.rst
index fffd61c45..3d8ea24e4 100644
--- a/doc/source/f2py/getting-started.rst
+++ b/doc/source/f2py/getting-started.rst
@@ -45,9 +45,9 @@ to run
::
- f2py -c fib1.f -m fib1
+ python -m numpy.f2py -c fib1.f -m fib1
-This command builds (see ``-c`` flag, execute ``f2py`` without
+This command builds (see ``-c`` flag, execute ``python -m numpy.f2py`` without
arguments to see the explanation of command line options) an extension
module ``fib1.so`` (see ``-m`` flag) to the current directory. Now, in
Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``::
@@ -162,7 +162,7 @@ one.
::
- f2py fib1.f -m fib2 -h fib1.pyf
+ python -m numpy.f2py fib1.f -m fib2 -h fib1.pyf
The signature file is saved to ``fib1.pyf`` (see ``-h`` flag) and
its contents is shown below.
@@ -188,7 +188,7 @@ one.
::
- f2py -c fib2.pyf fib1.f
+ python -m numpy.f2py -c fib2.pyf fib1.f
In Python::
@@ -243,7 +243,7 @@ __ fib3.f
Building the extension module can be now carried out in one command::
- f2py -c -m fib3 fib3.f
+ python -m numpy.f2py -c -m fib3 fib3.f
Notice that the resulting wrapper to ``FIB`` is as "smart" as in
previous case::
diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst
index 8b7d1453a..d6773a76f 100644
--- a/doc/source/f2py/index.rst
+++ b/doc/source/f2py/index.rst
@@ -26,6 +26,5 @@ from Python.
distutils
advanced
-.. _Python: http://www.python.org/
-.. _NumPy: http://www.numpy.org/
-.. _SciPy: http://www.numpy.org/
+.. _Python: https://www.python.org/
+.. _NumPy: https://www.numpy.org/
diff --git a/doc/source/f2py/run_main_session.dat b/doc/source/f2py/run_main_session.dat
index 29ecc3dfe..b9a7e1b0d 100644
--- a/doc/source/f2py/run_main_session.dat
+++ b/doc/source/f2py/run_main_session.dat
@@ -1,14 +1,14 @@
->>> import f2py2e
->>> r=f2py2e.run_main(['-m','scalar','docs/usersguide/scalar.f'])
+>>> import numpy.f2py
+>>> r = numpy.f2py.run_main(['-m','scalar','doc/source/f2py/scalar.f'])
Reading fortran codes...
- Reading file 'docs/usersguide/scalar.f'
+ Reading file 'doc/source/f2py/scalar.f' (format:fix,strict)
Post-processing...
Block: scalar
Block: FOO
Building modules...
Building module "scalar"...
Wrote C/API module "scalar" to file "./scalarmodule.c"
->>> print r
-{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py2e/src/fortranobject.h'],
+>>> printr(r)
+{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py/src/fortranobject.h'],
'csrc': ['./scalarmodule.c',
- '/home/users/pearu/src_cvs/f2py2e/src/fortranobject.c']}}
+ '/home/users/pearu/src_cvs/f2py/src/fortranobject.c']}}
diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst
index bd926f33c..8e5a9710c 100644
--- a/doc/source/f2py/signature-file.rst
+++ b/doc/source/f2py/signature-file.rst
@@ -303,7 +303,7 @@ Other statements:
``pymethoddef`` statement can be used only inside
``python module`` block.
- __ http://www.python.org/doc/current/ext/ext.html
+ __ https://docs.python.org/extending/index.html
Attributes
------------
diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst
index a6f093154..0f5068e0e 100644
--- a/doc/source/f2py/usage.rst
+++ b/doc/source/f2py/usage.rst
@@ -3,7 +3,19 @@ Using F2PY
===========
F2PY can be used either as a command line tool ``f2py`` or as a Python
-module ``f2py2e``.
+module ``numpy.f2py``. While we try to install the command line tool as part
+of the numpy setup, some platforms like Windows make it difficult to
+reliably put the executable on the ``PATH``. We will refer to ``f2py``
+in this document but you may have to run it as a module
+
+```
+python -m numpy.f2py
+```
+
+If you run ``f2py`` with no arguments, and the line ``numpy Version`` at the
+end matches the NumPy version printed from ``python -m numpy.f2py``, then you
+can use the shorter version. If not, or if you cannot run ``f2py``, you should
+replace all calls to ``f2py`` here with the longer version.
Command ``f2py``
=================
@@ -194,15 +206,15 @@ Other options:
Execute ``f2py`` without any options to get an up-to-date list of
available options.
-Python module ``f2py2e``
-=========================
+Python module ``numpy.f2py``
+============================
.. warning::
- The current Python interface to ``f2py2e`` module is not mature and
- may change in future depending on users needs.
+ The current Python interface to the ``f2py`` module is not mature and
+ may change in the future.
-The following functions are provided by the ``f2py2e`` module:
+The following functions are provided by the ``numpy.f2py`` module:
``run_main(<list>)``
Equivalent to running::
diff --git a/doc/source/reference/alignment.rst b/doc/source/reference/alignment.rst
new file mode 100644
index 000000000..ebc8f353c
--- /dev/null
+++ b/doc/source/reference/alignment.rst
@@ -0,0 +1,104 @@
+.. _alignment:
+
+Memory Alignment
+================
+
+Numpy Alignment Goals
+---------------------
+
+There are three use-cases related to memory alignment in numpy (as of 1.14):
+
+ 1. Creating structured datatypes with fields aligned like in a C-struct.
+ 2. Speeding up copy operations by using uint assignment in instead of memcpy
+ 3. Guaranteeing safe aligned access for ufuncs/setitem/casting code
+
+Numpy uses two different forms of alignment to achieve these goals:
+"True alignment" and "Uint alignment".
+
+"True" alignment refers to the architecture-dependent alignment of an
+equivalent C-type in C. For example, in x64 systems ``numpy.float64`` is
+equivalent to ``double`` in C. On most systems this has either an alignment of
+4 or 8 bytes (and this can be controlled in gcc by the option
+``malign-double``). A variable is aligned in memory if its memory offset is a
+multiple of its alignment. On some systems (eg sparc) memory alignment is
+required, on others it gives a speedup.
+
+"Uint" alignment depends on the size of a datatype. It is defined to be the
+"True alignment" of the uint used by numpy's copy-code to copy the datatype, or
+undefined/unaligned if there is no equivalent uint. Currently numpy uses uint8,
+uint16, uint32, uint64 and uint64 to copy data of size 1,2,4,8,16 bytes
+respectively, and all other sized datatypes cannot be uint-aligned.
+
+For example, on a (typical linux x64 gcc) system, the numpy ``complex64``
+datatype is implemented as ``struct { float real, imag; }``. This has "true"
+alignment of 4 and "uint" alignment of 8 (equal to the true alignment of
+``uint64``).
+
+Some cases where uint and true alignment are different (default gcc linux):
+ arch type true-aln uint-aln
+ ---- ---- -------- --------
+ x86_64 complex64 4 8
+ x86_64 float128 16 8
+ x86 float96 4 -
+
+
+Variables in Numpy which control and describe alignment
+-------------------------------------------------------
+
+There are 4 relevant uses of the word ``align`` used in numpy:
+
+ * The ``dtype.alignment`` attribute (``descr->alignment`` in C). This is meant
+ to reflect the "true alignment" of the type. It has arch-dependent default
+ values for all datatypes, with the exception of structured types created
+ with ``align=True`` as described below.
+ * The ``ALIGNED`` flag of an ndarray, computed in ``IsAligned`` and checked
+ by ``PyArray_ISALIGNED``. This is computed from ``dtype.alignment``.
+ It is set to ``True`` if every item in the array is at a memory location
+ consistent with ``dtype.alignment``, which is the case if the data ptr and
+ all strides of the array are multiples of that alignment.
+ * The ``align`` keyword of the dtype constructor, which only affects structured
+ arrays. If the structure's field offsets are not manually provided numpy
+ determines offsets automatically. In that case, ``align=True`` pads the
+ structure so that each field is "true" aligned in memory and sets
+ ``dtype.alignment`` to be the largest of the field "true" alignments. This
+ is like what C-structs usually do. Otherwise if offsets or itemsize were
+ manually provided ``align=True`` simply checks that all the fields are
+ "true" aligned and that the total itemsize is a multiple of the largest
+ field alignment. In either case ``dtype.isalignedstruct`` is also set to
+ True.
+ * ``IsUintAligned`` is used to determine if an ndarray is "uint aligned" in
+ an analagous way to how ``IsAligned`` checks for true-alignment.
+
+Consequences of alignment
+-------------------------
+
+Here is how the variables above are used:
+
+ 1. Creating aligned structs: In order to know how to offset a field when
+ ``align=True``, numpy looks up ``field.dtype.alignment``. This includes
+ fields which are nested structured arrays.
+ 2. Ufuncs: If the ``ALIGNED`` flag of an array is False, ufuncs will
+ buffer/cast the array before evaluation. This is needed since ufunc inner
+ loops access raw elements directly, which might fail on some archs if the
+ elements are not true-aligned.
+ 3. Getitem/setitem/copyswap function: Similar to ufuncs, these functions
+ generally have two code paths. If ``ALIGNED`` is False they will
+ use a code path that buffers the arguments so they are true-aligned.
+ 4. Strided copy code: Here, "uint alignment" is used instead. If the itemsize
+ of an array is equal to 1, 2, 4, 8 or 16 bytes and the array is uint
+ aligned then instead numpy will do ``*(uintN*)dst) = *(uintN*)src)`` for
+ appropriate N. Otherwise numpy copies by doing ``memcpy(dst, src, N)``.
+ 5. Nditer code: Since this often calls the strided copy code, it must
+ check for "uint alignment".
+ 6. Cast code: This checks for "true" alignment, as it does
+ ``*dst = CASTFUNC(*src)`` if aligned. Otherwise, it does
+ ``memmove(srcval, src); dstval = CASTFUNC(srcval); memmove(dst, dstval)``
+ where dstval/srcval are aligned.
+
+Note that the strided-copy and strided-cast code are deeply intertwined and so
+any arrays being processed by them must be both uint and true aligned, even
+though the copy-code only needs uint alignment and the cast code only true
+alignment. If there is ever a big rewrite of this code it would be good to
+allow them to use different alignments.
+
+
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index e64d0c17e..387515f59 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -119,6 +119,9 @@ simple datetime calculations.
>>> np.timedelta64(1,'W') / np.timedelta64(1,'D')
7.0
+ >>> np.timedelta64(1,'W') % np.timedelta64(10,'D')
+ numpy.timedelta64(7,'D')
+
There are two Timedelta units ('Y', years and 'M', months) which are treated
specially, because how much time they represent changes depending
on when they are used. While a timedelta day unit is equivalent to
diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst
index dcf04b453..f2072263f 100644
--- a/doc/source/reference/arrays.dtypes.rst
+++ b/doc/source/reference/arrays.dtypes.rst
@@ -391,8 +391,8 @@ Type strings
When the optional keys *offsets* and *titles* are provided,
their values must each be lists of the same length as the *names*
and *formats* lists. The *offsets* value is a list of byte offsets
- (integers) for each field, while the *titles* value is a list of
- titles for each field (:const:`None` can be used if no title is
+ (limited to `ctypes.c_int`) for each field, while the *titles* value is a
+ list of titles for each field (:const:`None` can be used if no title is
desired for that field). The *titles* can be any :class:`string`
or :class:`unicode` object and will add another entry to the
fields dictionary keyed by the title and referencing the same
@@ -402,7 +402,8 @@ Type strings
The *itemsize* key allows the total size of the dtype to be
set, and must be an integer large enough so all the fields
are within the dtype. If the dtype being constructed is aligned,
- the *itemsize* must also be divisible by the struct alignment.
+ the *itemsize* must also be divisible by the struct alignment. Total dtype
+ *itemsize* is limited to `ctypes.c_int`.
.. admonition:: Example
@@ -457,6 +458,7 @@ Type strings
Both arguments must be convertible to data-type objects with the same total
size.
+
.. admonition:: Example
32-bit integer, whose first two bytes are interpreted as an integer
diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst
index ba1bfd312..027a57f26 100644
--- a/doc/source/reference/arrays.indexing.rst
+++ b/doc/source/reference/arrays.indexing.rst
@@ -173,6 +173,7 @@ concepts to remember include:
of arbitrary dimension.
.. data:: newaxis
+ :noindex:
The :const:`newaxis` object can be used in all slicing operations to
create an axis of length one. :const:`newaxis` is an alias for
@@ -287,7 +288,7 @@ understood with an example.
Combining advanced and basic indexing
"""""""""""""""""""""""""""""""""""""
-When there is at least one slice (``:``), ellipsis (``...``) or ``np.newaxis``
+When there is at least one slice (``:``), ellipsis (``...``) or :const:`newaxis`
in the index (or the array has more dimensions than there are advanced indexes),
then the behaviour can be more complicated. It is like concatenating the
indexing result for each advanced index element
@@ -310,7 +311,7 @@ the subspace defined by the basic indexing (excluding integers) and the
subspace from the advanced indexing part. Two cases of index combination
need to be distinguished:
-* The advanced indexes are separated by a slice, ellipsis or newaxis.
+* The advanced indexes are separated by a slice, :const:`Ellipsis` or :const:`newaxis`.
For example ``x[arr1, :, arr2]``.
* The advanced indexes are all next to each other.
For example ``x[..., arr1, arr2, :]`` but *not* ``x[arr1, :, 1]``
@@ -512,14 +513,10 @@ only the part of the data in the specified field. Also
:ref:`record array <arrays.classes.rec>` scalars can be "indexed" this way.
Indexing into a structured array can also be done with a list of field names,
-*e.g.* ``x[['field-name1','field-name2']]``. Currently this returns a new
-array containing a copy of the values in the fields specified in the list.
-As of NumPy 1.7, returning a copy is being deprecated in favor of returning
-a view. A copy will continue to be returned for now, but a FutureWarning
-will be issued when writing to the copy. If you depend on the current
-behavior, then we suggest copying the returned array explicitly, i.e. use
-x[['field-name1','field-name2']].copy(). This will work with both past and
-future versions of NumPy.
+*e.g.* ``x[['field-name1','field-name2']]``. As of NumPy 1.16 this returns a
+view containing only those fields. In older versions of numpy it returned a
+copy. See the user guide section on :ref:`structured_arrays` for more
+information on multifield indexing.
If the accessed field is a sub-array, the dimensions of the sub-array
are appended to the shape of the result.
diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst
index 4a5fe62bf..f361ccb06 100644
--- a/doc/source/reference/arrays.interface.rst
+++ b/doc/source/reference/arrays.interface.rst
@@ -22,7 +22,7 @@ The Array Interface
described here.
__ http://cython.org/
-__ http://wiki.cython.org/tutorials/numpy
+__ https://github.com/cython/cython/wiki/tutorials-numpy
:version: 3
diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst
index 4c8bbf66d..306d22f43 100644
--- a/doc/source/reference/arrays.ndarray.rst
+++ b/doc/source/reference/arrays.ndarray.rst
@@ -517,7 +517,7 @@ Arithmetic:
``__r{op}__`` special methods are not directly defined.
- The functions called to implement many arithmetic special methods
- for arrays can be modified using :func:`set_numeric_ops`.
+ for arrays can be modified using :class:`__array_ufunc__ <numpy.class.__array_ufunc__>`.
Arithmetic, in-place:
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst
index 5ea7bfcfc..76aa680ae 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api.array.rst
@@ -122,9 +122,11 @@ sub-types).
.. c:function:: PyObject *PyArray_GETITEM(PyArrayObject* arr, void* itemptr)
- Get a Python object from the ndarray, *arr*, at the location
- pointed to by itemptr. Return ``NULL`` on failure.
-
+ Get a Python object of a builtin type from the ndarray, *arr*,
+ at the location pointed to by itemptr. Return ``NULL`` on failure.
+
+ `numpy.ndarray.item` is identical to PyArray_GETITEM.
+
.. c:function:: int PyArray_SETITEM( \
PyArrayObject* arr, void* itemptr, PyObject* obj)
@@ -200,7 +202,8 @@ From scratch
PyTypeObject* subtype, PyArray_Descr* descr, int nd, npy_intp* dims, \
npy_intp* strides, void* data, int flags, PyObject* obj)
- This function steals a reference to *descr*.
+ This function steals a reference to *descr*. The easiest way to get one
+ is using :c:func:`PyArray_DescrFromType`.
This is the main array creation function. Most new arrays are
created with this flexible function.
@@ -214,9 +217,11 @@ From scratch
:c:data:`&PyArray_Type<PyArray_Type>`, then *obj* is the object to pass to
the :obj:`~numpy.class.__array_finalize__` method of the subclass.
- If *data* is ``NULL``, then new memory will be allocated and *flags*
- can be non-zero to indicate a Fortran-style contiguous array. If
- *data* is not ``NULL``, then it is assumed to point to the memory
+ If *data* is ``NULL``, then new unitinialized memory will be allocated and
+ *flags* can be non-zero to indicate a Fortran-style contiguous array. Use
+ :c:ref:`PyArray_FILLWBYTE` to initialze the memory.
+
+ If *data* is not ``NULL``, then it is assumed to point to the memory
to be used for the array and the *flags* argument is used as the
new flags for the array (except the state of :c:data:`NPY_OWNDATA`,
:c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and :c:data:`NPY_ARRAY_UPDATEIFCOPY`
@@ -230,6 +235,12 @@ From scratch
provided *dims* and *strides* are copied into newly allocated
dimension and strides arrays for the new array object.
+ :c:func:`PyArray_CheckStrides` can help verify non- ``NULL`` stride
+ information.
+
+ If ``data`` is provided, it must stay alive for the life of the array. One
+ way to manage this is through :c:func:`PyArray_SetBaseObject`
+
.. c:function:: PyObject* PyArray_NewLikeArray( \
PyArrayObject* prototype, NPY_ORDER order, PyArray_Descr* descr, \
int subok)
@@ -405,10 +416,6 @@ From other objects
the array is constructed that way. Almost always this
parameter is ``NULL``.
- In versions 1.6 and earlier of NumPy, the following flags
- did not have the ``_ARRAY_`` macro namespace in them. That form
- of the constant names is deprecated in 1.7.
-
.. c:var:: NPY_ARRAY_C_CONTIGUOUS
Make sure the returned array is C-style contiguous
@@ -2847,7 +2854,10 @@ Data-type descriptors
Returns a data-type object corresponding to *typenum*. The
*typenum* can be one of the enumerated types, a character code for
- one of the enumerated types, or a user-defined type.
+ one of the enumerated types, or a user-defined type. If you want to use a
+ flexible size array, then you need to ``flexible typenum`` and set the
+ results ``elsize`` parameter to the desired size. The typenum is one of the
+ :c:data:`NPY_TYPES`.
.. c:function:: int PyArray_DescrConverter(PyObject* obj, PyArray_Descr** dtype)
@@ -3207,12 +3217,16 @@ Internal Flexibility
setting a Python Error) if one of the objects being assigned is not
callable.
+ .. deprecated:: 1.16
+
.. c:function:: PyObject* PyArray_GetNumericOps(void)
Return a Python dictionary containing the callable Python objects
stored in the internal arithmetic operation table. The keys of
this dictionary are given in the explanation for :c:func:`PyArray_SetNumericOps`.
+ .. deprecated:: 1.16
+
.. c:function:: void PyArray_SetStringFunction(PyObject* op, int repr)
This function allows you to alter the tp_str and tp_repr methods
diff --git a/doc/source/reference/c-api.coremath.rst b/doc/source/reference/c-api.coremath.rst
index ad92235da..691f73287 100644
--- a/doc/source/reference/c-api.coremath.rst
+++ b/doc/source/reference/c-api.coremath.rst
@@ -222,7 +222,7 @@ Those can be useful for precise floating point comparison.
Returns the previous status mask.
.. versionadded:: 1.15.0
-n
+
Complex functions
~~~~~~~~~~~~~~~~~
@@ -297,10 +297,10 @@ External Links:
* `OpenGL Half Float Pixel Support`__
* `The OpenEXR image format`__.
-__ http://ieeexplore.ieee.org/servlet/opac?punumber=4610933
-__ http://en.wikipedia.org/wiki/Half_precision_floating-point_format
-__ http://www.opengl.org/registry/specs/ARB/half_float_pixel.txt
-__ http://www.openexr.com/about.html
+__ https://ieeexplore.ieee.org/document/4610935/
+__ https://en.wikipedia.org/wiki/Half-precision_floating-point_format
+__ https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_half_float_pixel.txt
+__ https://www.openexr.com/about.html
.. c:var:: NPY_HALF_ZERO
diff --git a/doc/source/reference/c-api.dtype.rst b/doc/source/reference/c-api.dtype.rst
index 8af3a9080..9ac46b284 100644
--- a/doc/source/reference/c-api.dtype.rst
+++ b/doc/source/reference/c-api.dtype.rst
@@ -25,6 +25,8 @@ select the precision desired.
Enumerated Types
----------------
+.. c:var:: NPY_TYPES
+
There is a list of enumerated types defined providing the basic 24
data types plus some useful generic names. Whenever the code requires
a type number, one of these enumerated types is requested. The types
diff --git a/doc/source/reference/c-api.generalized-ufuncs.rst b/doc/source/reference/c-api.generalized-ufuncs.rst
index dd8cf6558..b59f077ad 100644
--- a/doc/source/reference/c-api.generalized-ufuncs.rst
+++ b/doc/source/reference/c-api.generalized-ufuncs.rst
@@ -127,34 +127,56 @@ The formal syntax of signatures is as follows::
<Output arguments> ::= <Argument list>
<Argument list> ::= nil | <Argument> | <Argument> "," <Argument list>
<Argument> ::= "(" <Core dimension list> ")"
- <Core dimension list> ::= nil | <Core dimension name> |
- <Core dimension name> "," <Core dimension list>
- <Core dimension name> ::= valid Python variable name
-
+ <Core dimension list> ::= nil | <Core dimension> |
+ <Core dimension> "," <Core dimension list>
+ <Core dimension> ::= <Dimension name> <Dimension modifier>
+ <Dimension name> ::= valid Python variable name | valid integer
+ <Dimension modifier> ::= nil | "?"
Notes:
#. All quotes are for clarity.
-#. Core dimensions that share the same name must have the exact same size.
+#. Unmodified core dimensions that share the same name must have the same size.
Each dimension name typically corresponds to one level of looping in the
elementary function's implementation.
#. White spaces are ignored.
+#. An integer as a dimension name freezes that dimension to the value.
+#. If the name is suffixed with the "?" modifier, the dimension is a core
+ dimension only if it exists on all inputs and outputs that share it;
+ otherwise it is ignored (and replaced by a dimension of size 1 for the
+ elementary function).
Here are some examples of signatures:
-+-------------+------------------------+-----------------------------------+
-| add | ``(),()->()`` | |
-+-------------+------------------------+-----------------------------------+
-| inner1d | ``(i),(i)->()`` | |
-+-------------+------------------------+-----------------------------------+
-| sum1d | ``(i)->()`` | |
-+-------------+------------------------+-----------------------------------+
-| dot2d | ``(m,n),(n,p)->(m,p)`` | matrix multiplication |
-+-------------+------------------------+-----------------------------------+
-| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, |
-| | | outer over the second to last, |
-| | | and loop/broadcast over the rest. |
-+-------------+------------------------+-----------------------------------+
++-------------+----------------------------+-----------------------------------+
+| name | signature | common usage |
++=============+============================+===================================+
+| add | ``(),()->()`` | binary ufunc |
++-------------+----------------------------+-----------------------------------+
+| sum1d | ``(i)->()`` | reduction |
++-------------+----------------------------+-----------------------------------+
+| inner1d | ``(i),(i)->()`` | vector-vector multiplication |
++-------------+----------------------------+-----------------------------------+
+| matmat | ``(m,n),(n,p)->(m,p)`` | matrix multiplication |
++-------------+----------------------------+-----------------------------------+
+| vecmat | ``(n),(n,p)->(p)`` | vector-matrix multiplication |
++-------------+----------------------------+-----------------------------------+
+| matvec | ``(m,n),(n)->(m)`` | matrix-vector multiplication |
++-------------+----------------------------+-----------------------------------+
+| matmul | ``(m?,n),(n,p?)->(m?,p?)`` | combination of the four above |
++-------------+----------------------------+-----------------------------------+
+| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, |
+| | | outer over the second to last, |
+| | | and loop/broadcast over the rest. |
++-------------+----------------------------+-----------------------------------+
+| cross1d | ``(3),(3)->(3)`` | cross product where the last |
+| | | dimension is frozen and must be 3 |
++-------------+----------------------------+-----------------------------------+
+
+.. _frozen:
+
+The last is an instance of freezing a core dimension and can be used to
+improve ufunc performance
C-API for implementing Elementary Functions
-------------------------------------------
diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst
index dcebd1ede..f04d65ee1 100644
--- a/doc/source/reference/c-api.types-and-structures.rst
+++ b/doc/source/reference/c-api.types-and-structures.rst
@@ -133,9 +133,9 @@ PyArray_Type
is related to this array. There are two use cases: 1) If this array
does not own its own memory, then base points to the Python object
that owns it (perhaps another array object), 2) If this array has
- the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
+ the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
:c:data:NPY_ARRAY_WRITEBACKIFCOPY`: flag set, then this array is
- a working copy of a "misbehaved" array. When
+ a working copy of a "misbehaved" array. When
``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to by base
will be updated with the contents of this array.
@@ -182,8 +182,18 @@ PyArrayDescr_Type
.. c:type:: PyArray_Descr
- The format of the :c:type:`PyArray_Descr` structure that lies at the
- heart of the :c:data:`PyArrayDescr_Type` is
+ The :c:type:`PyArray_Descr` structure lies at the heart of the
+ :c:data:`PyArrayDescr_Type`. While it is described here for
+ completeness, it should be considered internal to NumPy and manipulated via
+ ``PyArrayDescr_*`` or ``PyDataType*`` functions and macros. The size of this
+ structure is subject to change across versions of NumPy. To ensure
+ compatibility:
+
+ - Never declare a non-pointer instance of the struct
+ - Never perform pointer arithmatic
+ - Never use ``sizof(PyArray_Descr)``
+
+ It has the following structure:
.. code-block:: c
@@ -683,7 +693,16 @@ PyUFunc_Type
The core of the ufunc is the :c:type:`PyUFuncObject` which contains all
the information needed to call the underlying C-code loops that
- perform the actual work. It has the following structure:
+ perform the actual work. While it is described here for completeness, it
+ should be considered internal to NumPy and manipulated via ``PyUFunc_*``
+ functions. The size of this structure is subject to change across versions
+ of NumPy. To ensure compatibility:
+
+ - Never declare a non-pointer instance of the struct
+ - Never perform pointer arithmetic
+ - Never use ``sizeof(PyUFuncObject)``
+
+ It has the following structure:
.. code-block:: c
@@ -703,8 +722,21 @@ PyUFunc_Type
void *ptr;
PyObject *obj;
PyObject *userloops;
+ int core_enabled;
+ int core_num_dim_ix;
+ int *core_num_dims;
+ int *core_dim_ixs;
+ int *core_offsets;
+ char *core_signature;
+ PyUFunc_TypeResolutionFunc *type_resolver;
+ PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
+ PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
npy_uint32 *op_flags;
npy_uint32 *iter_flags;
+ /* new in API version 0x0000000D */
+ npy_intp *core_dim_sizes;
+ npy_intp *core_dim_flags;
+
} PyUFuncObject;
.. c:macro: PyUFuncObject.PyObject_HEAD
@@ -764,6 +796,10 @@ PyUFunc_Type
specifies how many different 1-d loops (of the builtin data
types) are available.
+ .. c:member:: int PyUFuncObject.reserved1
+
+ Unused.
+
.. c:member:: char *PyUFuncObject.name
A string name for the ufunc. This is used dynamically to build
@@ -804,6 +840,51 @@ PyUFunc_Type
User defined type numbers are always larger than
:c:data:`NPY_USERDEF`.
+ .. c:member:: int PyUFuncObject.core_enabled
+
+ 0 for scalar ufuncs; 1 for generalized ufuncs
+
+ .. c:member:: int PyUFuncObject.core_num_dim_ix
+
+ Number of distinct core dimension names in the signature
+
+ .. c:member:: int *PyUFuncObject.core_num_dims
+
+ Number of core dimensions of each argument
+
+ .. c:member:: int *PyUFuncObject.core_dim_ixs
+
+ Dimension indices in a flattened form; indices of argument ``k`` are
+ stored in ``core_dim_ixs[core_offsets[k] : core_offsets[k] +
+ core_numdims[k]]``
+
+ .. c:member:: int *PyUFuncObject.core_offsets
+
+ Position of 1st core dimension of each argument in ``core_dim_ixs``,
+ equivalent to cumsum(``core_num_dims``)
+
+ .. c:member:: char *PyUFuncObject.core_signature
+
+ Core signature string
+
+ .. c:member:: PyUFunc_TypeResolutionFunc *PyUFuncObject.type_resolver
+
+ A function which resolves the types and fills an array with the dtypes
+ for the inputs and outputs
+
+ .. c:member:: PyUFunc_LegacyInnerLoopSelectionFunc *PyUFuncObject.legacy_inner_loop_selector
+
+ A function which returns an inner loop. The ``legacy`` in the name arises
+ because for NumPy 1.6 a better variant had been planned. This variant
+ has not yet come about.
+
+ .. c:member:: void *PyUFuncObject.reserved2
+
+ For a possible future loop selector with a different signature.
+
+ .. c:member:: PyUFunc_MaskedInnerLoopSelectionFunc *PyUFuncObject.masked_inner_loop_selector
+
+ Function which returns a masked inner loop for the ufunc
.. c:member:: npy_uint32 PyUFuncObject.op_flags
@@ -813,6 +894,21 @@ PyUFunc_Type
Override the default nditer flags for the ufunc.
+ Added in API version 0x0000000D
+
+ .. c:member:: npy_intp *PyUFuncObject.core_dim_sizes
+
+ For each distinct core dimension, the possible
+ :ref:`frozen <frozen>` size if :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` is 0
+
+ .. c:member:: npy_uint32 *PyUFuncObject.core_dim_flags
+
+ For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags
+
+ - :c:data:`UFUNC_CORE_DIM_CAN_IGNORE` if the dim name ends in ``?``
+ - :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` if the dim size will be
+ determined from the operands and not from a :ref:`frozen <frozen>` signature
+
PyArrayIter_Type
----------------
diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api.ufunc.rst
index 02a35cf56..0499ccf5b 100644
--- a/doc/source/reference/c-api.ufunc.rst
+++ b/doc/source/reference/c-api.ufunc.rst
@@ -85,28 +85,77 @@ Functions
Must to an array of length *ntypes* containing
:c:type:`PyUFuncGenericFunction` items. These items are pointers to
functions that actually implement the underlying
- (element-by-element) function :math:`N` times.
+ (element-by-element) function :math:`N` times with the following
+ signature:
+
+ .. c:function:: void loopfunc(
+ char** args, npy_intp* dimensions, npy_intp* steps, void* data)
+
+ *args*
+
+ An array of pointers to the actual data for the input and output
+ arrays. The input arguments are given first followed by the output
+ arguments.
+
+ *dimensions*
+
+ A pointer to the size of the dimension over which this function is
+ looping.
+
+ *steps*
+
+ A pointer to the number of bytes to jump to get to the
+ next element in this dimension for each of the input and
+ output arguments.
+
+ *data*
+
+ Arbitrary data (extra arguments, function names, *etc.* )
+ that can be stored with the ufunc and will be passed in
+ when it is called.
+
+ This is an example of a func specialized for addition of doubles
+ returning doubles.
+
+ .. code-block:: c
+
+ static void
+ double_add(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *extra)
+ {
+ npy_intp i;
+ npy_intp is1 = steps[0], is2 = steps[1];
+ npy_intp os = steps[2], n = dimensions[0];
+ char *i1 = args[0], *i2 = args[1], *op = args[2];
+ for (i = 0; i < n; i++) {
+ *((double *)op) = *((double *)i1) +
+ *((double *)i2);
+ i1 += is1;
+ i2 += is2;
+ op += os;
+ }
+ }
:param data:
Should be ``NULL`` or a pointer to an array of size *ntypes*
. This array may contain arbitrary extra-data to be passed to
- the corresponding 1-d loop function in the func array.
+ the corresponding loop function in the func array.
:param types:
Length ``(nin + nout) * ntypes`` array of ``char`` encoding the
- :ref:`PyArray_Descr.type_num` (built-in only) that the corresponding
+ `numpy.dtype.num` (built-in only) that the corresponding
function in the ``func`` array accepts. For instance, for a comparison
ufunc with three ``ntypes``, two ``nin`` and one ``nout``, where the
- first function accepts :ref:`npy_int32` and the the second
- :ref:`npy_int64`, with both returning :ref:`npy_bool`, ``types`` would
+ first function accepts `numpy.int32` and the the second
+ `numpy.int64`, with both returning `numpy.bool_`, ``types`` would
be ``(char[]) {5, 5, 0, 7, 7, 0}`` since ``NPY_INT32`` is 5,
- ``NPY_INT64`` is 7, and ``NPY_BOOL`` is 0 (on the python side, these
- are exposed via :ref:`dtype.num`, i.e., for the example here,
- ``dtype(np.int32).num``, ``dtype(np.int64).num``, and
- ``dtype(np.bool_).num``, resp.).
+ ``NPY_INT64`` is 7, and ``NPY_BOOL`` is 0.
+
+ The bit-width names can also be used (e.g. :c:data:`NPY_INT32`,
+ :c:data:`NPY_COMPLEX128` ) if desired.
- :ref:`casting-rules` will be used at runtime to find the first
- ``func`` callable by the input/output provided.
+ :ref:`ufuncs.casting` will be used at runtime to find the first
+ ``func`` callable by the input/output provided.
:param ntypes:
How many different data-type-specific functions the ufunc has implemented.
@@ -117,13 +166,23 @@ Functions
:param nout:
The number of outputs
+ :param identity:
+
+ Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
+ :c:data:`PyUFunc_MinusOne`, or :c:data:`PyUFunc_None`.
+ This specifies what should be returned when
+ an empty array is passed to the reduce method of the ufunc.
+ The special value :c:data:`PyUFunc_IdentityValue` may only be used with
+ the :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity` method, to
+ allow an arbitrary python object to be used as the identity.
+
:param name:
- The name for the ufunc. Specifying a name of 'add' or
- 'multiply' enables a special behavior for integer-typed
- reductions when no dtype is given. If the input type is an
- integer (or boolean) data type smaller than the size of the int_
- data type, it will be internally upcast to the int_ (or uint)
- data type.
+ The name for the ufunc as a ``NULL`` terminated string. Specifying
+ a name of 'add' or 'multiply' enables a special behavior for
+ integer-typed reductions when no dtype is given. If the input type is an
+ integer (or boolean) data type smaller than the size of the `numpy.int_`
+ data type, it will be internally upcast to the `numpy.int_` (or
+ `numpy.uint`) data type.
:param doc:
Allows passing in a documentation string to be stored with the
@@ -151,6 +210,21 @@ Functions
to calling PyUFunc_FromFuncAndData. A copy of the string is made,
so the passed in buffer can be freed.
+.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
+ PyUFuncGenericFunction *func, void **data, char *types, int ntypes, \
+ int nin, int nout, int identity, char *name, char *doc, int unused, char *signature,
+ PyObject *identity_value)
+
+ This function is very similar to `PyUFunc_FromFuncAndDataAndSignature` above,
+ but has an extra *identity_value* argument, to define an arbitrary identity
+ for the ufunc when ``identity`` is passed as ``PyUFunc_IdentityValue``.
+
+ :param identity_value:
+ The identity for the new gufunc. Must be passed as ``NULL`` unless the
+ ``identity`` argument is ``PyUFunc_IdentityValue``. Setting it to NULL
+ is equivalent to calling PyUFunc_FromFuncAndDataAndSignature.
+
+
.. c:function:: int PyUFunc_RegisterLoopForType( \
PyUFuncObject* ufunc, int usertype, PyUFuncGenericFunction function, \
int* arg_types, void* data)
diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst
index 289822909..88e533832 100644
--- a/doc/source/reference/distutils.rst
+++ b/doc/source/reference/distutils.rst
@@ -13,8 +13,7 @@ distutils, use the :func:`setup <core.setup>` command from
:mod:`numpy.distutils.misc_util` that can make it easier to construct
keyword arguments to pass to the setup function (by passing the
dictionary obtained from the todict() method of the class). More
-information is available in the NumPy Distutils Users Guide in
-``<site-packages>/numpy/doc/DISTUTILS.txt``.
+information is available in the :ref:`distutils-user-guide`.
.. index::
diff --git a/doc/source/reference/distutils_guide.rst b/doc/source/reference/distutils_guide.rst
new file mode 100644
index 000000000..081719d16
--- /dev/null
+++ b/doc/source/reference/distutils_guide.rst
@@ -0,0 +1,7 @@
+.. _distutils-user-guide:
+
+NumPy Distutils - Users Guide
+=============================
+
+.. include:: ../../DISTUTILS.rst.txt
+ :start-line: 6
diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst
index 2140c57f7..6accb8535 100644
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -23,6 +23,7 @@ For learning how to use NumPy, see also :ref:`user`.
ufuncs
routines
distutils
+ distutils_guide
c-api
internals
swig
@@ -35,4 +36,4 @@ Large parts of this manual originate from Travis E. Oliphant's book
`Guide to NumPy <https://archive.org/details/NumPyBook>`__ (which generously
entered Public Domain in August 2008). The reference documentation for many of
the functions are written by numerous contributors and developers of
-NumPy. \ No newline at end of file
+NumPy.
diff --git a/doc/source/reference/internals.code-explanations.rst b/doc/source/reference/internals.code-explanations.rst
index ca81e1676..65553e07e 100644
--- a/doc/source/reference/internals.code-explanations.rst
+++ b/doc/source/reference/internals.code-explanations.rst
@@ -17,7 +17,7 @@ pieces of code. The purpose behind these explanations is to enable
somebody to be able to understand the ideas behind the implementation
somewhat more easily than just staring at the code. Perhaps in this
way, the algorithms can be improved on, borrowed from, and/or
-optimized.
+optimized by more people.
Memory model
@@ -133,9 +133,9 @@ Broadcasting
.. index::
single: broadcasting
-In Numeric, broadcasting was implemented in several lines of code
-buried deep in ufuncobject.c. In NumPy, the notion of broadcasting has
-been abstracted so that it can be performed in multiple places.
+In Numeric, the ancestor of Numpy, broadcasting was implemented in several
+lines of code buried deep in ufuncobject.c. In NumPy, the notion of broadcasting
+has been abstracted so that it can be performed in multiple places.
Broadcasting is handled by the function :c:func:`PyArray_Broadcast`. This
function requires a :c:type:`PyArrayMultiIterObject` (or something that is a
binary equivalent) to be passed in. The :c:type:`PyArrayMultiIterObject` keeps
diff --git a/doc/source/reference/internals.rst b/doc/source/reference/internals.rst
index e1d6644a6..03d081bf9 100644
--- a/doc/source/reference/internals.rst
+++ b/doc/source/reference/internals.rst
@@ -5,5 +5,6 @@ NumPy internals
.. toctree::
internals.code-explanations
+ alignment
.. automodule:: numpy.doc.internals
diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst
index 07ad6c292..7375d60fb 100644
--- a/doc/source/reference/maskedarray.generic.rst
+++ b/doc/source/reference/maskedarray.generic.rst
@@ -2,7 +2,7 @@
.. _maskedarray.generic:
-
+.. module:: numpy.ma
The :mod:`numpy.ma` module
==========================
diff --git a/doc/source/reference/routines.ctypeslib.rst b/doc/source/reference/routines.ctypeslib.rst
index b04713b61..71b944a63 100644
--- a/doc/source/reference/routines.ctypeslib.rst
+++ b/doc/source/reference/routines.ctypeslib.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.ctypeslib
+
***********************************************************
C-Types Foreign Function Interface (:mod:`numpy.ctypeslib`)
***********************************************************
diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst
index 0520df413..c6bffc874 100644
--- a/doc/source/reference/routines.linalg.rst
+++ b/doc/source/reference/routines.linalg.rst
@@ -1,5 +1,7 @@
.. _routines.linalg:
+.. module:: numpy.linalg
+
Linear algebra (:mod:`numpy.linalg`)
************************************
diff --git a/doc/source/reference/routines.matlib.rst b/doc/source/reference/routines.matlib.rst
index a35eaec78..c7f675425 100644
--- a/doc/source/reference/routines.matlib.rst
+++ b/doc/source/reference/routines.matlib.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.matlib
+
Matrix library (:mod:`numpy.matlib`)
************************************
diff --git a/doc/source/reference/routines.numarray.rst b/doc/source/reference/routines.numarray.rst
deleted file mode 100644
index 9e84f49b9..000000000
--- a/doc/source/reference/routines.numarray.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-**********************
-Numarray compatibility
-**********************
-
-The numarray module was removed in NumPy 1.9.0.
diff --git a/doc/source/reference/routines.oldnumeric.rst b/doc/source/reference/routines.oldnumeric.rst
deleted file mode 100644
index 2120fc69e..000000000
--- a/doc/source/reference/routines.oldnumeric.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-*************************
-Old Numeric compatibility
-*************************
-
-.. currentmodule:: numpy
-
-The oldnumeric module was removed in NumPy 1.9.0.
diff --git a/doc/source/reference/routines.polynomials.classes.rst b/doc/source/reference/routines.polynomials.classes.rst
index f44ddd46c..da0394305 100644
--- a/doc/source/reference/routines.polynomials.classes.rst
+++ b/doc/source/reference/routines.polynomials.classes.rst
@@ -35,11 +35,11 @@ degree :math:`n`, but could just as easily be the basis functions of
any of the other classes. The convention for all the classes is that
the coefficient :math:`c[i]` goes with the basis function of degree i.
-All of the classes have the same methods, and especially they implement the
-Python numeric operators +, -, \*, //, %, divmod, \*\*, ==,
-and !=. The last two can be a bit problematic due to floating point
-roundoff errors. We now give a quick demonstration of the various
-operations using NumPy version 1.7.0.
+All of the classes are immutable and have the same methods, and
+especially they implement the Python numeric operators +, -, \*, //, %,
+divmod, \*\*, ==, and !=. The last two can be a bit problematic due to
+floating point roundoff errors. We now give a quick demonstration of the
+various operations using NumPy version 1.7.0.
Basics
------
@@ -187,6 +187,11 @@ Which gives the polynomial `p` in Chebyshev form. This works because
the original polynomial. However, all the multiplications and divisions
will be done using Chebyshev series, hence the type of the result.
+It is intended that all polynomial instances are immutable, therefore
+augmented operations (``+=``, ``-=``, etc.) and any other functionality that
+would violate the immutablity of a polynomial instance are intentionally
+unimplemented.
+
Calculus
--------
diff --git a/doc/source/reference/routines.polynomials.package.rst b/doc/source/reference/routines.polynomials.package.rst
index 61cb57fbb..7e40d9f00 100644
--- a/doc/source/reference/routines.polynomials.package.rst
+++ b/doc/source/reference/routines.polynomials.package.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.polynomial
+
Polynomial Package
==================
diff --git a/doc/source/reference/routines.polynomials.polynomial.rst b/doc/source/reference/routines.polynomials.polynomial.rst
index 8194ca867..365c8da98 100644
--- a/doc/source/reference/routines.polynomials.polynomial.rst
+++ b/doc/source/reference/routines.polynomials.polynomial.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.polynomial.polynomial
+
Polynomial Module (:mod:`numpy.polynomial.polynomial`)
======================================================
diff --git a/doc/source/reference/routines.random.rst b/doc/source/reference/routines.random.rst
index c8b097d7d..cda4e2b61 100644
--- a/doc/source/reference/routines.random.rst
+++ b/doc/source/reference/routines.random.rst
@@ -1,5 +1,7 @@
.. _routines.random:
+.. module:: numpy.random
+
Random sampling (:mod:`numpy.random`)
*************************************
diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst
index 5a52a40d6..77c046768 100644
--- a/doc/source/reference/routines.testing.rst
+++ b/doc/source/reference/routines.testing.rst
@@ -1,5 +1,7 @@
.. _numpy-testing:
+.. module:: numpy.testing
+
Test Support (:mod:`numpy.testing`)
===================================
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 28e9fd5be..1cf215549 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -2,6 +2,7 @@
Release Notes
*************
+.. include:: ../release/1.16.0-notes.rst
.. include:: ../release/1.15.4-notes.rst
.. include:: ../release/1.15.3-notes.rst
.. include:: ../release/1.15.2-notes.rst
@@ -44,4 +45,3 @@ Release Notes
.. include:: ../release/1.5.0-notes.rst
.. include:: ../release/1.4.0-notes.rst
.. include:: ../release/1.3.0-notes.rst
-
diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst
index 65584b1fd..4e9016ee0 100644
--- a/doc/source/user/basics.broadcasting.rst
+++ b/doc/source/user/basics.broadcasting.rst
@@ -2,6 +2,10 @@
Broadcasting
************
-.. seealso:: :class:`numpy.broadcast`
+.. seealso::
+ :class:`numpy.broadcast`
+
+ :ref:`array-broadcasting-in-numpy`
+ An introduction to the concepts discussed here
.. automodule:: numpy.doc.broadcasting
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index 76eb48487..d224951dd 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -16,7 +16,7 @@ Building NumPy requires the following software installed:
On Debian and derivatives (Ubuntu): python, python-dev (or python3-dev)
On Windows: the official python installer at
- `www.python.org <http://www.python.org>`_ is enough
+ `www.python.org <https://www.python.org>`_ is enough
Make sure that the Python package distutils is installed before
continuing. For example, in Debian GNU/Linux, installing python-dev
diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst
index 5c321088d..d4d941a5e 100644
--- a/doc/source/user/c-info.beyond-basics.rst
+++ b/doc/source/user/c-info.beyond-basics.rst
@@ -358,38 +358,6 @@ previously created. Then you call :c:func:`PyUFunc_RegisterLoopForType`
this function is ``0`` if the process was successful and ``-1`` with
an error condition set if it was not successful.
-.. c:function:: int PyUFunc_RegisterLoopForType( \
- PyUFuncObject* ufunc, int usertype, PyUFuncGenericFunction function, \
- int* arg_types, void* data)
-
- *ufunc*
-
- The ufunc to attach this loop to.
-
- *usertype*
-
- The user-defined type this loop should be indexed under. This number
- must be a user-defined type or an error occurs.
-
- *function*
-
- The ufunc inner 1-d loop. This function must have the signature as
- explained in Section `3 <#sec-creating-a-new>`__ .
-
- *arg_types*
-
- (optional) If given, this should contain an array of integers of at
- least size ufunc.nargs containing the data-types expected by the loop
- function. The data will be copied into a NumPy-managed structure so
- the memory for this argument should be deleted after calling this
- function. If this is NULL, then it will be assumed that all data-types
- are of type usertype.
-
- *data*
-
- (optional) Specify any optional data needed by the function which will
- be passed when the function is called.
-
.. index::
pair: dtype; adding new
@@ -481,7 +449,7 @@ type(s). In particular, to create a sub-type in C follow these steps:
module dictionary so it can be accessed from Python.
More information on creating sub-types in C can be learned by reading
-PEP 253 (available at http://www.python.org/dev/peps/pep-0253).
+PEP 253 (available at https://www.python.org/dev/peps/pep-0253).
Specific features of ndarray sub-typing
diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst
index 22c3b6e90..9738168d2 100644
--- a/doc/source/user/c-info.how-to-extend.rst
+++ b/doc/source/user/c-info.how-to-extend.rst
@@ -36,7 +36,7 @@ into Python as if it were a standard python file. It will contain
objects and methods that have been defined and compiled in C code. The
basic steps for doing this in Python are well-documented and you can
find more information in the documentation for Python itself available
-online at `www.python.org <http://www.python.org>`_ .
+online at `www.python.org <https://www.python.org>`_ .
In addition to the Python C-API, there is a full and rich C-API for
NumPy allowing sophisticated manipulations on a C-level. However, for
diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst
index 0152ac549..01d2a64d1 100644
--- a/doc/source/user/c-info.python-as-glue.rst
+++ b/doc/source/user/c-info.python-as-glue.rst
@@ -405,8 +405,8 @@ interface between Python and Fortran. There is decent documentation
for f2py found in the numpy/f2py/docs directory where-ever NumPy is
installed on your system (usually under site-packages). There is also
more information on using f2py (including how to use it to wrap C
-codes) at http://www.scipy.org/Cookbook under the "Using NumPy with
-Other Languages" heading.
+codes) at https://scipy-cookbook.readthedocs.io under the "Interfacing
+With Other Languages" heading.
The f2py method of linking compiled code is currently the most
sophisticated and integrated approach. It allows clean separation of
@@ -944,7 +944,7 @@ Linux system this is accomplished using::
Which creates a shared_library named code.so in the current directory.
On Windows don't forget to either add ``__declspec(dllexport)`` in front
of void on the line preceding each function definition, or write a
-code.def file that lists the names of the functions to be exported.
+``code.def`` file that lists the names of the functions to be exported.
A suitable Python interface to this shared library should be
constructed. To do this create a file named interface.py with the
@@ -954,25 +954,25 @@ following lines at the top:
__all__ = ['add', 'filter2d']
- import numpy as N
+ import numpy as np
import os
_path = os.path.dirname('__file__')
- lib = N.ctypeslib.load_library('code', _path)
- _typedict = {'zadd' : complex, 'sadd' : N.single,
- 'cadd' : N.csingle, 'dadd' : float}
+ lib = np.ctypeslib.load_library('code', _path)
+ _typedict = {'zadd' : complex, 'sadd' : np.single,
+ 'cadd' : np.csingle, 'dadd' : float}
for name in _typedict.keys():
val = getattr(lib, name)
val.restype = None
_type = _typedict[name]
- val.argtypes = [N.ctypeslib.ndpointer(_type,
+ val.argtypes = [np.ctypeslib.ndpointer(_type,
flags='aligned, contiguous'),
- N.ctypeslib.ndpointer(_type,
+ np.ctypeslib.ndpointer(_type,
flags='aligned, contiguous'),
- N.ctypeslib.ndpointer(_type,
+ np.ctypeslib.ndpointer(_type,
flags='aligned, contiguous,'\
'writeable'),
- N.ctypeslib.c_intp]
+ np.ctypeslib.c_intp]
This code loads the shared library named ``code.{ext}`` located in the
same path as this file. It then adds a return type of void to the
@@ -989,13 +989,13 @@ strides and shape of an ndarray) as the last two arguments.:
.. code-block:: python
lib.dfilter2d.restype=None
- lib.dfilter2d.argtypes = [N.ctypeslib.ndpointer(float, ndim=2,
+ lib.dfilter2d.argtypes = [np.ctypeslib.ndpointer(float, ndim=2,
flags='aligned'),
- N.ctypeslib.ndpointer(float, ndim=2,
+ np.ctypeslib.ndpointer(float, ndim=2,
flags='aligned, contiguous,'\
'writeable'),
- ctypes.POINTER(N.ctypeslib.c_intp),
- ctypes.POINTER(N.ctypeslib.c_intp)]
+ ctypes.POINTER(np.ctypeslib.c_intp),
+ ctypes.POINTER(np.ctypeslib.c_intp)]
Next, define a simple selection function that chooses which addition
function to call in the shared library based on the data-type:
@@ -1020,11 +1020,11 @@ written simply as:
def add(a, b):
requires = ['CONTIGUOUS', 'ALIGNED']
- a = N.asanyarray(a)
+ a = np.asanyarray(a)
func, dtype = select(a.dtype)
- a = N.require(a, dtype, requires)
- b = N.require(b, dtype, requires)
- c = N.empty_like(a)
+ a = np.require(a, dtype, requires)
+ b = np.require(b, dtype, requires)
+ c = np.empty_like(a)
func(a,b,c,a.size)
return c
@@ -1033,8 +1033,8 @@ and:
.. code-block:: python
def filter2d(a):
- a = N.require(a, float, ['ALIGNED'])
- b = N.zeros_like(a)
+ a = np.require(a, float, ['ALIGNED'])
+ b = np.zeros_like(a)
lib.dfilter2d(a, b, a.ctypes.strides, a.ctypes.shape)
return b
diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst
index 5818ff182..96a73f9a6 100644
--- a/doc/source/user/c-info.ufunc-tutorial.rst
+++ b/doc/source/user/c-info.ufunc-tutorial.rst
@@ -17,7 +17,7 @@ Creating a new universal function
Before reading this, it may help to familiarize yourself with the basics
of C extensions for Python by reading/skimming the tutorials in Section 1
of `Extending and Embedding the Python Interpreter
-<http://docs.python.org/extending/index.html>`_ and in :doc:`How to extend
+<https://docs.python.org/extending/index.html>`_ and in :doc:`How to extend
NumPy <c-info.how-to-extend>`
The umath module is a computer-generated C-module that creates many
@@ -893,9 +893,9 @@ Example NumPy ufunc with structured array dtype arguments
This example shows how to create a ufunc for a structured array dtype.
For the example we show a trivial ufunc for adding two arrays with dtype
'u8,u8,u8'. The process is a bit different from the other examples since
-a call to PyUFunc_FromFuncAndData doesn't fully register ufuncs for
+a call to :c:func:`PyUFunc_FromFuncAndData` doesn't fully register ufuncs for
custom dtypes and structured array dtypes. We need to also call
-PyUFunc_RegisterLoopForDescr to finish setting up the ufunc.
+:c:func:`PyUFunc_RegisterLoopForDescr` to finish setting up the ufunc.
We only give the C code as the setup.py file is exactly the same as
the setup.py file in `Example NumPy ufunc for one dtype`_, except that
@@ -1048,133 +1048,6 @@ The C file is given below.
#endif
}
-
-.. _`sec:PyUFunc-spec`:
-
-PyUFunc_FromFuncAndData Specification
-=====================================
-
-What follows is the full specification of PyUFunc_FromFuncAndData, which
-automatically generates a ufunc from a C function with the correct signature.
-
-.. seealso:: :c:func:`PyUFunc_FromFuncAndDataAndSignature`
-
-.. c:function:: PyObject *PyUFunc_FromFuncAndData( \
- PyUFuncGenericFunction* func, void** data, char* types, int ntypes, \
- int nin, int nout, int identity, char* name, char* doc, int unused)
-
- *func*
-
- A pointer to an array of 1-d functions to use. This array must be at
- least ntypes long. Each entry in the array must be a
- ``PyUFuncGenericFunction`` function. This function has the following
- signature. An example of a valid 1d loop function is also given.
-
- .. c:function:: void loop1d( \
- char** args, npy_intp* dimensions, npy_intp* steps, void* data)
-
- *args*
-
- An array of pointers to the actual data for the input and output
- arrays. The input arguments are given first followed by the output
- arguments.
-
- *dimensions*
-
- A pointer to the size of the dimension over which this function is
- looping.
-
- *steps*
-
- A pointer to the number of bytes to jump to get to the
- next element in this dimension for each of the input and
- output arguments.
-
- *data*
-
- Arbitrary data (extra arguments, function names, *etc.* )
- that can be stored with the ufunc and will be passed in
- when it is called.
-
- .. code-block:: c
-
- static void
- double_add(char **args, npy_intp *dimensions, npy_intp *steps,
- void *extra)
- {
- npy_intp i;
- npy_intp is1 = steps[0], is2 = steps[1];
- npy_intp os = steps[2], n = dimensions[0];
- char *i1 = args[0], *i2 = args[1], *op = args[2];
- for (i = 0; i < n; i++) {
- *((double *)op) = *((double *)i1) +
- *((double *)i2);
- i1 += is1;
- i2 += is2;
- op += os;
- }
- }
-
- *data*
-
- An array of data. There should be ntypes entries (or NULL) --- one for
- every loop function defined for this ufunc. This data will be passed
- in to the 1-d loop. One common use of this data variable is to pass in
- an actual function to call to compute the result when a generic 1-d
- loop (e.g. :c:func:`PyUFunc_d_d`) is being used.
-
- *types*
-
- An array of type-number signatures (type ``char`` ). This
- array should be of size (nin+nout)*ntypes and contain the
- data-types for the corresponding 1-d loop. The inputs should
- be first followed by the outputs. For example, suppose I have
- a ufunc that supports 1 integer and 1 double 1-d loop
- (length-2 func and data arrays) that takes 2 inputs and
- returns 1 output that is always a complex double, then the
- types array would be
-
- .. code-block:: c
-
- static char types[3] = {NPY_INT, NPY_DOUBLE, NPY_CDOUBLE}
-
- The bit-width names can also be used (e.g. :c:data:`NPY_INT32`,
- :c:data:`NPY_COMPLEX128` ) if desired.
-
- *ntypes*
-
- The number of data-types supported. This is equal to the number of 1-d
- loops provided.
-
- *nin*
-
- The number of input arguments.
-
- *nout*
-
- The number of output arguments.
-
- *identity*
-
- Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
- :c:data:`PyUFunc_None`. This specifies what should be returned when
- an empty array is passed to the reduce method of the ufunc.
-
- *name*
-
- A ``NULL`` -terminated string providing the name of this ufunc
- (should be the Python name it will be called).
-
- *doc*
-
- A documentation string for this ufunc (will be used in generating the
- response to ``{ufunc_name}.__doc__``). Do not include the function
- signature or the name as this is generated automatically.
-
- *unused*
-
- Unused; kept for compatibility. Just set it to zero.
-
.. index::
pair: ufunc; adding new
diff --git a/doc/source/user/install.rst b/doc/source/user/install.rst
index dd7543645..52586f3d7 100644
--- a/doc/source/user/install.rst
+++ b/doc/source/user/install.rst
@@ -4,7 +4,7 @@ Installing NumPy
In most use cases the best way to install NumPy on your system is by using a
pre-built package for your operating system. Please see
-http://scipy.org/install.html for links to available options.
+https://scipy.org/install.html for links to available options.
For instructions on building for source package, see
:doc:`building`. This information is useful mainly for advanced users.
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index 475c68c04..399237c21 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -618,9 +618,9 @@ initial element of a sequence has index 0. Confusion and flamewars arise
because each has advantages and disadvantages. One based indexing is
consistent with common human language usage, where the "first" element
of a sequence has index 1. Zero based indexing `simplifies
-indexing <http://groups.google.com/group/comp.lang.python/msg/1bf4d925dfbf368?q=g:thl3498076713d&hl=en>`__.
+indexing <https://groups.google.com/group/comp.lang.python/msg/1bf4d925dfbf368?q=g:thl3498076713d&hl=en>`__.
See also `a text by prof.dr. Edsger W.
-Dijkstra <http://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html>`__.
+Dijkstra <https://www.cs.utexas.edu/users/EWD/transcriptions/EWD08xx/EWD831.html>`__.
\ **RANGES**: In MATLAB®, ``0:5`` can be used as both a range literal
and a 'slice' index (inside parentheses); however, in Python, constructs
@@ -715,6 +715,6 @@ See http://mathesaurus.sf.net/ for another MATLAB®/NumPy
cross-reference.
An extensive list of tools for scientific work with python can be
-found in the `topical software page <http://scipy.org/topical-software.html>`__.
+found in the `topical software page <https://scipy.org/topical-software.html>`__.
MATLAB® and SimuLink® are registered trademarks of The MathWorks.
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index 57a7004cc..5ef8b145f 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -14,11 +14,11 @@ Prerequisites
Before reading this tutorial you should know a bit of Python. If you
would like to refresh your memory, take a look at the `Python
-tutorial <http://docs.python.org/tut/>`__.
+tutorial <https://docs.python.org/tutorial/>`__.
If you wish to work the examples in this tutorial, you must also have
some software installed on your computer. Please see
-http://scipy.org/install.html for instructions.
+https://scipy.org/install.html for instructions.
The Basics
==========
@@ -569,7 +569,7 @@ first axis::
However, if one wants to perform an operation on each element in the
array, one can use the ``flat`` attribute which is an
-`iterator <https://docs.python.org/2/tutorial/classes.html#iterators>`__
+`iterator <https://docs.python.org/tutorial/classes.html#iterators>`__
over all the elements of the array::
>>> for element in b.flat:
@@ -1191,7 +1191,7 @@ This property can be very useful in assignments::
You can look at the following
example to see
how to use boolean indexing to generate an image of the `Mandelbrot
-set <http://en.wikipedia.org/wiki/Mandelbrot_set>`__:
+set <https://en.wikipedia.org/wiki/Mandelbrot_set>`__:
.. plot::
@@ -1462,8 +1462,8 @@ that ``pylab.hist`` plots the histogram automatically, while
Further reading
===============
-- The `Python tutorial <http://docs.python.org/tutorial/>`__
+- The `Python tutorial <https://docs.python.org/tutorial/>`__
- :ref:`reference`
- `SciPy Tutorial <https://docs.scipy.org/doc/scipy/reference/tutorial/index.html>`__
-- `SciPy Lecture Notes <http://www.scipy-lectures.org>`__
+- `SciPy Lecture Notes <https://www.scipy-lectures.org>`__
- A `matlab, R, IDL, NumPy/SciPy dictionary <http://mathesaurus.sf.net/>`__
diff --git a/doc/source/user/theory.broadcast_1.gif b/doc/source/user/theory.broadcast_1.gif
new file mode 100644
index 000000000..541ec734b
--- /dev/null
+++ b/doc/source/user/theory.broadcast_1.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_2.gif b/doc/source/user/theory.broadcast_2.gif
new file mode 100644
index 000000000..163a8473f
--- /dev/null
+++ b/doc/source/user/theory.broadcast_2.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_3.gif b/doc/source/user/theory.broadcast_3.gif
new file mode 100644
index 000000000..83f61f2df
--- /dev/null
+++ b/doc/source/user/theory.broadcast_3.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_4.gif b/doc/source/user/theory.broadcast_4.gif
new file mode 100644
index 000000000..9b21ff582
--- /dev/null
+++ b/doc/source/user/theory.broadcast_4.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_5.png b/doc/source/user/theory.broadcast_5.png
new file mode 100644
index 000000000..3aa2f0536
--- /dev/null
+++ b/doc/source/user/theory.broadcast_5.png
Binary files differ
diff --git a/doc/source/user/theory.broadcasting.rst b/doc/source/user/theory.broadcasting.rst
new file mode 100644
index 000000000..b37edeacc
--- /dev/null
+++ b/doc/source/user/theory.broadcasting.rst
@@ -0,0 +1,229 @@
+:orphan:
+
+.. _array-broadcasting-in-numpy:
+
+===========================
+Array Broadcasting in Numpy
+===========================
+
+..
+ Originally part of the scipy.org wiki, available `here
+ <https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc>`_ or from the
+ `github repo
+ <https://github.com/scipy/old-wiki/blob/gh-pages/pages/EricsBroadcastingDoc.html>`_
+
+Let's explore a more advanced concept in numpy called broadcasting. The
+term broadcasting describes how numpy treats arrays with different shapes
+during arithmetic operations. Subject to certain constraints, the smaller array
+is "broadcast" across the larger array so that they have compatible shapes.
+Broadcasting provides a means of vectorizing array operations so that looping
+occurs in C instead of Python. It does this without making needless copies of
+data and usually leads to efficient algorithm implementations. There are also
+cases where broadcasting is a bad idea because it leads to inefficient use of
+memory that slows computation. This article provides a gentle introduction to
+broadcasting with numerous examples ranging from simple to involved. It also
+provides hints on when and when not to use broadcasting.
+
+numpy operations are usually done element-by-element which requires two arrays
+to have exactly the same shape:
+
+.. code-block:: python
+ :caption: Example 1
+ :name: example-1
+
+ >>> from numpy import array
+ >>> a = array([1.0, 2.0, 3.0])
+ >>> b = array([2.0, 2.0, 2.0])
+ >>> a * b
+ array([ 2., 4., 6.])
+
+numpy's broadcasting rule relaxes this constraint when the arrays' shapes meet
+certain constraints. The simplest broadcasting example occurs when an array and
+a scalar value are combined in an operation:
+
+.. code-block:: python
+ :caption: Example 2
+ :name: example-2
+
+ >>> from numpy import array
+ >>> a = array([1.0,2.0,3.0])
+ >>> b = 2.0
+ >>> a * b
+ array([ 2., 4., 6.])
+
+The result is equivalent to the previous example where ``b`` was an array. We
+can think of the scalar ``b`` being stretched during the arithmetic operation
+into an array with the same shape as ``a``. The new elements in ``b``, as shown
+in :ref:`figure-1`, are simply copies of the original scalar. The stretching
+analogy is only conceptual. numpy is smart enough to use the original scalar
+value without actually making copies so that broadcasting operations are as
+memory and computationally efficient as possible. Because :ref:`example-2`
+moves less memory, (``b`` is a scalar, not an array) around during the
+multiplication, it is about 10% faster than :ref:`example-1` using the standard
+numpy on Windows 2000 with one million element arrays.
+
+.. figure:: theory.broadcast_1.gif
+ :alt: Vector-Scalar multiplication
+ :name: figure-1
+
+ *Figure 1*
+
+ *In the simplest example of broadcasting, the scalar ``b`` is
+ stretched to become an array of with the same shape as ``a`` so the shapes
+ are compatible for element-by-element multiplication.*
+
+
+The rule governing whether two arrays have compatible shapes for broadcasting
+can be expressed in a single sentence.
+
+.. admonition:: The Broadcasting Rule
+
+ **In order to broadcast, the size of the trailing axes for both arrays in
+ an operation must either be the same size or one of them must be one.**
+
+If this condition is not met, a ``ValueError('frames are not aligned')``
+exception is thrown indicating that the arrays have incompatible shapes. The
+size of the result array created by broadcast operations is the maximum size
+along each dimension from the input arrays. Note that the rule does not say
+anything about the two arrays needing to have the same number of dimensions.
+So, for example, if you have a 256 x 256 x 3 array of RGB values, and you want
+to scale each color in the image by a different value, you can multiply the
+image by a one-dimensional array with 3 values. Lining up the sizes of the
+trailing axes of these arrays according to the broadcast rule shows that they
+are compatible
+
++-------+------------+-------+-------+---+
+|Image | (3d array) | 256 x | 256 x | 3 |
++-------+------------+-------+-------+---+
+|Scale | (1d array) | | | 3 |
++-------+------------+-------+-------+---+
+|Result | (3d array) | 256 x | 256 x | 3 |
++-------+------------+-------+-------+---+
+
+In the following example, both the ``A`` and ``B`` arrays have axes with length
+one that are expanded to a larger size in a broadcast operation.
+
++-------+------------+-----+-----+-----+---+
+|A | (4d array) | 8 x | 1 x | 6 x | 1 |
++-------+------------+-----+-----+-----+---+
+|B | (3d array) | | 7 x | 1 x | 5 |
++-------+------------+-----+-----+-----+---+
+|Result | (4d array) | 8 x | 7 x | 6 x | 5 |
++-------+------------+-----+-----+-----+---+
+
+Below, are several code examples and graphical representations that help make
+the broadcast rule visually obvious. :ref:`example-3` adds a one-dimensional array
+to a two-dimensional array:
+
+.. code-block:: python
+ :caption: Example 3
+ :name: example-3
+
+ >>> from numpy import array
+ >>> a = array([[ 0.0, 0.0, 0.0],
+ ... [10.0, 10.0, 10.0],
+ ... [20.0, 20.0, 20.0],
+ ... [30.0, 30.0, 30.0]])
+ >>> b = array([1.0, 2.0, 3.0])
+ >>> a + b
+ array([[ 1., 2., 3.],
+ [ 11., 12., 13.],
+ [ 21., 22., 23.],
+ [ 31., 32., 33.]])
+
+As shown in :ref:`figure-2`, ``b`` is added to each row of ``a``. When ``b`` is
+longer than the rows of ``a``, as in :ref:`figure-3`, an exception is raised
+because of the incompatible shapes.
+
+.. figure:: theory.broadcast_2.gif
+ :alt: Matrix-Vector
+ :name: figure-2
+
+ *Figure 2*
+
+ *A two dimensional array multiplied by a one dimensional array results in
+ broadcasting if number of 1-d array elements matches the number of 2-d
+ array columns.*
+
+.. figure:: theory.broadcast_3.gif
+ :alt: Matrix-Vector-with-error
+ :name: figure-3
+
+ *Figure 3*
+
+ *When the trailing dimensions of the arrays are unequal, broadcasting fails
+ because it is impossible to align the values in the rows of the 1st array
+ with the elements of the 2nd arrays for element-by-element addition.*
+
+Broadcasting provides a convenient way of taking the outer product (or any
+other outer operation) of two arrays. The following example shows an outer
+addition operation of two 1-d arrays that produces the same result as
+:ref:`example-3`
+
+.. code-block:: python
+ :caption: Example 4
+ :name: example-4
+
+ >>> from numpy import array, newaxis
+ >>> a = array([0.0, 10.0, 20.0, 30.0])
+ >>> b = array([1.0, 2.0, 3.0])
+ >>> a[:,newaxis] + b
+ array([[ 1., 2., 3.],
+ [ 11., 12., 13.],
+ [ 21., 22., 23.],
+ [ 31., 32., 33.]])
+
+Here the newaxis index operator inserts a new axis into ``a``, making it a
+two-dimensional 4x1 array. :ref:`figure-4` illustrates the stretching of both
+arrays to produce the desired 4x3 output array.
+
+.. figure:: theory.broadcast_4.gif
+ :alt: vector-vector with newaxis
+ :name: figure-4
+
+ *Figure 4*
+
+ In some cases, broadcasting stretches both arrays to form an output array
+ larger than either of the initial arrays.*
+
+A Practical Example: Vector Quantization.
+=========================================
+
+Broadcasting comes up quite often in real world problems. A typical example
+occurs in the vector quantization (VQ) algorithm used in information theory,
+classification, and other related areas. The basic operation in VQ [#f0] finds
+the closest point in a set of points, called codes in VQ jargon, to a given
+point, called the observation. In the very simple, two-dimensional case shown
+in :ref:`figure-5`, the values in observation describe the weight and height of an
+athlete to be classified. The codes represent different classes of
+athletes. [#f1]_ Finding the closest point requires calculating the distance
+between observation and each of the codes. The shortest distance provides the
+best match. In this example, ``codes[0]`` is the closest class indicating that
+the athlete is likely a basketball player.
+
+.. figure:: theory.broadcast_5.png
+ :alt: vector quantitization example
+ :name: figure-5
+
+ *Figure 5*
+
+ *The basic operation of vector quantization calculates the distance between
+ an object to be classified, the dark square, and multiple known codes, the
+ gray circles. In this simple case, the codes represent individual classes.
+ More complex cases use multiple codes per class.*
+
+
+.. rubric:: Footnotes
+
+.. [#f0] Vector Quantization J. Makhoul, S. Roucos, and H. Gish, "Vector Quantization in Speech Coding," Proc. IEEE, vol. 73, pp. 1551-1587, Nov. 1985.
+.. [#f1]
+ In this example, weight has more impact on the distance calculation
+ than height because of the larger values. In practice, it is important to
+ normalize the height and weight, often by their standard deviation across the
+ data set, so that both have equal influence on the distance calculation.
+
+.. note::
+
+ The code to produce the figures is part of the `AstroML book
+ <http://www.astroml.org/book_figures/appendix/fig_broadcast_visual.html>`_
+
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 1f60f074c..ba88c733f 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -11,10 +11,10 @@ How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
-`the NumPy homepage <http://www.scipy.org>`_.
+`the NumPy homepage <https://www.scipy.org>`_.
We recommend exploring the docstrings using
-`IPython <http://ipython.scipy.org>`_, an advanced Python shell with
+`IPython <https://ipython.org>`_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
@@ -133,19 +133,9 @@ else:
from .version import git_revision as __git_revision__
from .version import version as __version__
- from ._import_tools import PackageLoader
-
- def pkgload(*packages, **options):
- loader = PackageLoader(infunc=True)
- return loader(*packages, **options)
-
- from . import add_newdocs
- __all__ = ['add_newdocs',
- 'ModuleDeprecationWarning',
+ __all__ = ['ModuleDeprecationWarning',
'VisibleDeprecationWarning']
- pkgload.__doc__ = PackageLoader.__call__.__doc__
-
# Allow distributors to run custom init code
from . import _distributor_init
@@ -173,9 +163,10 @@ else:
from __builtin__ import bool, int, float, complex, object, unicode, str
from .core import round, abs, max, min
+ # now that numpy modules are imported, can initialize limits
+ core.getlimits._register_known_types()
- __all__.extend(['__version__', 'pkgload', 'PackageLoader',
- 'show_config'])
+ __all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
@@ -196,7 +187,7 @@ else:
from .testing import Tester
# Pytest testing
- from numpy.testing._private.pytesttester import PytestTester
+ from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
@@ -219,7 +210,9 @@ else:
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
- "by incorrect BLAS library being linked in.")
+ "by incorrect BLAS library being linked in, or by mixing "
+ "package managers (pip, conda, apt, ...). Search closed "
+ "numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__))
_sanity_check()
diff --git a/numpy/_globals.py b/numpy/_globals.py
index 9a7b458f1..f5c0761b5 100644
--- a/numpy/_globals.py
+++ b/numpy/_globals.py
@@ -17,7 +17,6 @@ motivated this module.
"""
from __future__ import division, absolute_import, print_function
-
__ALL__ = [
'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue'
]
@@ -39,7 +38,9 @@ class ModuleDeprecationWarning(DeprecationWarning):
nose tester will let pass without making tests fail.
"""
- pass
+
+
+ModuleDeprecationWarning.__module__ = 'numpy'
class VisibleDeprecationWarning(UserWarning):
@@ -50,7 +51,10 @@ class VisibleDeprecationWarning(UserWarning):
the usage is most likely a user bug.
"""
- pass
+
+
+VisibleDeprecationWarning.__module__ = 'numpy'
+
class _NoValueType(object):
"""Special keyword value.
@@ -73,4 +77,5 @@ class _NoValueType(object):
def __repr__(self):
return "<no value>"
+
_NoValue = _NoValueType()
diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py
deleted file mode 100644
index cb8bc477c..000000000
--- a/numpy/_import_tools.py
+++ /dev/null
@@ -1,351 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import warnings
-
-__all__ = ['PackageLoader']
-
-class PackageLoader(object):
- def __init__(self, verbose=False, infunc=False):
- """ Manages loading packages.
- """
-
- if infunc:
- _level = 2
- else:
- _level = 1
- self.parent_frame = frame = sys._getframe(_level)
- self.parent_name = eval('__name__', frame.f_globals, frame.f_locals)
- parent_path = eval('__path__', frame.f_globals, frame.f_locals)
- if isinstance(parent_path, str):
- parent_path = [parent_path]
- self.parent_path = parent_path
- if '__all__' not in frame.f_locals:
- exec('__all__ = []', frame.f_globals, frame.f_locals)
- self.parent_export_names = eval('__all__', frame.f_globals, frame.f_locals)
-
- self.info_modules = {}
- self.imported_packages = []
- self.verbose = None
-
- def _get_info_files(self, package_dir, parent_path, parent_package=None):
- """ Return list of (package name,info.py file) from parent_path subdirectories.
- """
- from glob import glob
- files = glob(os.path.join(parent_path, package_dir, 'info.py'))
- for info_file in glob(os.path.join(parent_path, package_dir, 'info.pyc')):
- if info_file[:-1] not in files:
- files.append(info_file)
- info_files = []
- for info_file in files:
- package_name = os.path.dirname(info_file[len(parent_path)+1:])\
- .replace(os.sep, '.')
- if parent_package:
- package_name = parent_package + '.' + package_name
- info_files.append((package_name, info_file))
- info_files.extend(self._get_info_files('*',
- os.path.dirname(info_file),
- package_name))
- return info_files
-
- def _init_info_modules(self, packages=None):
- """Initialize info_modules = {<package_name>: <package info.py module>}.
- """
- from numpy.compat import npy_load_module
- info_files = []
- info_modules = self.info_modules
-
- if packages is None:
- for path in self.parent_path:
- info_files.extend(self._get_info_files('*', path))
- else:
- for package_name in packages:
- package_dir = os.path.join(*package_name.split('.'))
- for path in self.parent_path:
- names_files = self._get_info_files(package_dir, path)
- if names_files:
- info_files.extend(names_files)
- break
- else:
- try:
- exec('import %s.info as info' % (package_name))
- info_modules[package_name] = info
- except ImportError as msg:
- self.warn('No scipy-style subpackage %r found in %s. '\
- 'Ignoring: %s'\
- % (package_name, ':'.join(self.parent_path), msg))
-
- for package_name, info_file in info_files:
- if package_name in info_modules:
- continue
- fullname = self.parent_name +'.'+ package_name
- if info_file[-1]=='c':
- filedescriptor = ('.pyc', 'rb', 2)
- else:
- filedescriptor = ('.py', 'U', 1)
-
- try:
- info_module = npy_load_module(fullname + '.info',
- info_file,
- filedescriptor)
- except Exception as msg:
- self.error(msg)
- info_module = None
-
- if info_module is None or getattr(info_module, 'ignore', False):
- info_modules.pop(package_name, None)
- else:
- self._init_info_modules(getattr(info_module, 'depends', []))
- info_modules[package_name] = info_module
-
- return
-
- def _get_sorted_names(self):
- """ Return package names sorted in the order as they should be
- imported due to dependence relations between packages.
- """
-
- depend_dict = {}
- for name, info_module in self.info_modules.items():
- depend_dict[name] = getattr(info_module, 'depends', [])
- package_names = []
-
- for name in list(depend_dict.keys()):
- if not depend_dict[name]:
- package_names.append(name)
- del depend_dict[name]
-
- while depend_dict:
- for name, lst in list(depend_dict.items()):
- new_lst = [n for n in lst if n in depend_dict]
- if not new_lst:
- package_names.append(name)
- del depend_dict[name]
- else:
- depend_dict[name] = new_lst
-
- return package_names
-
- def __call__(self,*packages, **options):
- """Load one or more packages into parent package top-level namespace.
-
- This function is intended to shorten the need to import many
- subpackages, say of scipy, constantly with statements such as
-
- import scipy.linalg, scipy.fftpack, scipy.etc...
-
- Instead, you can say:
-
- import scipy
- scipy.pkgload('linalg','fftpack',...)
-
- or
-
- scipy.pkgload()
-
- to load all of them in one call.
-
- If a name which doesn't exist in scipy's namespace is
- given, a warning is shown.
-
- Parameters
- ----------
- *packages : arg-tuple
- the names (one or more strings) of all the modules one
- wishes to load into the top-level namespace.
- verbose= : integer
- verbosity level [default: -1].
- verbose=-1 will suspend also warnings.
- force= : bool
- when True, force reloading loaded packages [default: False].
- postpone= : bool
- when True, don't load packages [default: False]
-
- """
- # 2014-10-29, 1.10
- warnings.warn('pkgload and PackageLoader are obsolete '
- 'and will be removed in a future version of numpy',
- DeprecationWarning, stacklevel=2)
- frame = self.parent_frame
- self.info_modules = {}
- if options.get('force', False):
- self.imported_packages = []
- self.verbose = verbose = options.get('verbose', -1)
- postpone = options.get('postpone', None)
- self._init_info_modules(packages or None)
-
- self.log('Imports to %r namespace\n----------------------------'\
- % self.parent_name)
-
- for package_name in self._get_sorted_names():
- if package_name in self.imported_packages:
- continue
- info_module = self.info_modules[package_name]
- global_symbols = getattr(info_module, 'global_symbols', [])
- postpone_import = getattr(info_module, 'postpone_import', False)
- if (postpone and not global_symbols) \
- or (postpone_import and postpone is not None):
- continue
-
- old_object = frame.f_locals.get(package_name, None)
-
- cmdstr = 'import '+package_name
- if self._execcmd(cmdstr):
- continue
- self.imported_packages.append(package_name)
-
- if verbose!=-1:
- new_object = frame.f_locals.get(package_name)
- if old_object is not None and old_object is not new_object:
- self.warn('Overwriting %s=%s (was %s)' \
- % (package_name, self._obj2repr(new_object),
- self._obj2repr(old_object)))
-
- if '.' not in package_name:
- self.parent_export_names.append(package_name)
-
- for symbol in global_symbols:
- if symbol=='*':
- symbols = eval('getattr(%s,"__all__",None)'\
- % (package_name),
- frame.f_globals, frame.f_locals)
- if symbols is None:
- symbols = eval('dir(%s)' % (package_name),
- frame.f_globals, frame.f_locals)
- symbols = [s for s in symbols if not s.startswith('_')]
- else:
- symbols = [symbol]
-
- if verbose!=-1:
- old_objects = {}
- for s in symbols:
- if s in frame.f_locals:
- old_objects[s] = frame.f_locals[s]
-
- cmdstr = 'from '+package_name+' import '+symbol
- if self._execcmd(cmdstr):
- continue
-
- if verbose!=-1:
- for s, old_object in old_objects.items():
- new_object = frame.f_locals[s]
- if new_object is not old_object:
- self.warn('Overwriting %s=%s (was %s)' \
- % (s, self._obj2repr(new_object),
- self._obj2repr(old_object)))
-
- if symbol=='*':
- self.parent_export_names.extend(symbols)
- else:
- self.parent_export_names.append(symbol)
-
- return
-
- def _execcmd(self, cmdstr):
- """ Execute command in parent_frame."""
- frame = self.parent_frame
- try:
- exec (cmdstr, frame.f_globals, frame.f_locals)
- except Exception as msg:
- self.error('%s -> failed: %s' % (cmdstr, msg))
- return True
- else:
- self.log('%s -> success' % (cmdstr))
- return
-
- def _obj2repr(self, obj):
- """ Return repr(obj) with"""
- module = getattr(obj, '__module__', None)
- file = getattr(obj, '__file__', None)
- if module is not None:
- return repr(obj) + ' from ' + module
- if file is not None:
- return repr(obj) + ' from ' + file
- return repr(obj)
-
- def log(self, mess):
- if self.verbose>1:
- print(str(mess), file=sys.stderr)
- def warn(self, mess):
- if self.verbose>=0:
- print(str(mess), file=sys.stderr)
- def error(self, mess):
- if self.verbose!=-1:
- print(str(mess), file=sys.stderr)
-
- def _get_doc_title(self, info_module):
- """ Get the title from a package info.py file.
- """
- title = getattr(info_module, '__doc_title__', None)
- if title is not None:
- return title
- title = getattr(info_module, '__doc__', None)
- if title is not None:
- title = title.lstrip().split('\n', 1)[0]
- return title
- return '* Not Available *'
-
- def _format_titles(self,titles,colsep='---'):
- display_window_width = 70 # How to determine the correct value in runtime??
- lengths = [len(name)-name.find('.')-1 for (name, title) in titles]+[0]
- max_length = max(lengths)
- lines = []
- for (name, title) in titles:
- name = name[name.find('.')+1:]
- w = max_length - len(name)
- words = title.split()
- line = '%s%s %s' % (name, w*' ', colsep)
- tab = len(line) * ' '
- while words:
- word = words.pop(0)
- if len(line)+len(word)>display_window_width:
- lines.append(line)
- line = tab
- line += ' ' + word
- lines.append(line)
- return '\n'.join(lines)
-
- def get_pkgdocs(self):
- """ Return documentation summary of subpackages.
- """
- import sys
- self.info_modules = {}
- self._init_info_modules(None)
-
- titles = []
- symbols = []
- for package_name, info_module in self.info_modules.items():
- global_symbols = getattr(info_module, 'global_symbols', [])
- fullname = self.parent_name +'.'+ package_name
- note = ''
- if fullname not in sys.modules:
- note = ' [*]'
- titles.append((fullname, self._get_doc_title(info_module) + note))
- if global_symbols:
- symbols.append((package_name, ', '.join(global_symbols)))
-
- retstr = self._format_titles(titles) +\
- '\n [*] - using a package requires explicit import (see pkgload)'
-
-
- if symbols:
- retstr += """\n\nGlobal symbols from subpackages"""\
- """\n-------------------------------\n""" +\
- self._format_titles(symbols, '-->')
-
- return retstr
-
-class PackageLoaderDebug(PackageLoader):
- def _execcmd(self, cmdstr):
- """ Execute command in parent_frame."""
- frame = self.parent_frame
- print('Executing', repr(cmdstr), '...', end=' ')
- sys.stdout.flush()
- exec (cmdstr, frame.f_globals, frame.f_locals)
- print('ok')
- sys.stdout.flush()
- return
-
-if int(os.environ.get('NUMPY_IMPORT_DEBUG', '0')):
- PackageLoader = PackageLoaderDebug
diff --git a/numpy/testing/_private/pytesttester.py b/numpy/_pytesttester.py
index 30ecc69c7..8d1a3811c 100644
--- a/numpy/testing/_private/pytesttester.py
+++ b/numpy/_pytesttester.py
@@ -105,24 +105,18 @@ class PytestTester(object):
Notes
-----
- Each NumPy module exposes `test` in its namespace to run all tests for it.
- For example, to run all tests for numpy.lib:
+ Each NumPy module exposes `test` in its namespace to run all tests for
+ it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
- Running unit tests for numpy.lib
...
- Ran 976 tests in 3.933s
-
- OK
-
- >>> result.errors #doctest: +SKIP
- []
- >>> result.knownfail #doctest: +SKIP
- []
+ 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
+ >>> result
+ True
"""
import pytest
diff --git a/numpy/compat/_inspect.py b/numpy/compat/_inspect.py
index 76bf544a5..439d0d2c2 100644
--- a/numpy/compat/_inspect.py
+++ b/numpy/compat/_inspect.py
@@ -184,9 +184,8 @@ def formatargvalues(args, varargs, varkw, locals,
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
- specs = []
- for i in range(len(args)):
- specs.append(strseq(args[i], convert, join))
+ specs = [strseq(arg, convert, join) for arg in args]
+
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index d5bb2e4c7..8e06ead78 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -7,13 +7,14 @@ from __future__ import division, absolute_import, print_function
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
- 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path']
+ 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
+ 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
import sys
try:
- from pathlib import Path
+ from pathlib import Path, PurePath
except ImportError:
- Path = None
+ Path = PurePath = None
if sys.version_info[0] >= 3:
import io
@@ -94,9 +95,33 @@ def asunicode_nested(x):
def is_pathlib_path(obj):
"""
Check whether obj is a pathlib.Path object.
+
+ Prefer using `isinstance(obj, os_PathLike)` instead of this function.
"""
return Path is not None and isinstance(obj, Path)
+# from Python 3.7
+class contextlib_nullcontext(object):
+ """Context manager that does no additional processing.
+
+ Used as a stand-in for a normal context manager, when a particular
+ block of code is only sometimes used with a normal context manager:
+
+ cm = optional_cm if condition else nullcontext()
+ with cm:
+ # Perform operation, using optional_cm if condition is True
+ """
+
+ def __init__(self, enter_result=None):
+ self.enter_result = enter_result
+
+ def __enter__(self):
+ return self.enter_result
+
+ def __exit__(self, *excinfo):
+ pass
+
+
if sys.version_info[0] >= 3 and sys.version_info[1] >= 4:
def npy_load_module(name, fn, info=None):
"""
@@ -154,3 +179,65 @@ else:
finally:
fo.close()
return mod
+
+# backport abc.ABC
+import abc
+if sys.version_info[:2] >= (3, 4):
+ abc_ABC = abc.ABC
+else:
+ abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
+
+
+# Backport os.fs_path, os.PathLike, and PurePath.__fspath__
+if sys.version_info[:2] >= (3, 6):
+ import os
+ os_fspath = os.fspath
+ os_PathLike = os.PathLike
+else:
+ def _PurePath__fspath__(self):
+ return str(self)
+
+ class os_PathLike(abc_ABC):
+ """Abstract base class for implementing the file system path protocol."""
+
+ @abc.abstractmethod
+ def __fspath__(self):
+ """Return the file system path representation of the object."""
+ raise NotImplementedError
+
+ @classmethod
+ def __subclasshook__(cls, subclass):
+ if PurePath is not None and issubclass(subclass, PurePath):
+ return True
+ return hasattr(subclass, '__fspath__')
+
+
+ def os_fspath(path):
+ """Return the path representation of a path-like object.
+ If str or bytes is passed in, it is returned unchanged. Otherwise the
+ os.PathLike interface is used to get the path representation. If the
+ path representation is not str or bytes, TypeError is raised. If the
+ provided path is not str, bytes, or os.PathLike, TypeError is raised.
+ """
+ if isinstance(path, (str, bytes)):
+ return path
+
+ # Work from the object's type to match method resolution of other magic
+ # methods.
+ path_type = type(path)
+ try:
+ path_repr = path_type.__fspath__(path)
+ except AttributeError:
+ if hasattr(path_type, '__fspath__'):
+ raise
+ elif PurePath is not None and issubclass(path_type, PurePath):
+ return _PurePath__fspath__(path)
+ else:
+ raise TypeError("expected str, bytes or os.PathLike object, "
+ "not " + path_type.__name__)
+ if isinstance(path_repr, (str, bytes)):
+ return path_repr
+ else:
+ raise TypeError("expected {}.__fspath__() to return str or bytes, "
+ "not {}".format(path_type.__name__,
+ type(path_repr).__name__))
diff --git a/numpy/conftest.py b/numpy/conftest.py
index 7b1771748..4d4d055ec 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -3,10 +3,8 @@ Pytest configuration and fixtures for the Numpy test suite.
"""
from __future__ import division, absolute_import, print_function
-import warnings
import pytest
import numpy
-import importlib
from numpy.core._multiarray_tests import get_fpu_mode
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index 4d9cbf5da..80ce84f00 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -15,14 +15,35 @@ for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
try:
from . import multiarray
except ImportError as exc:
+ import sys
msg = """
+
+IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
+
Importing the multiarray numpy extension module failed. Most
likely you are trying to import a failed build of numpy.
-If you're working with a numpy git repo, try `git clean -xdf` (removes all
-files not under version control). Otherwise reinstall numpy.
+Here is how to proceed:
+- If you're working with a numpy git repository, try `git clean -xdf`
+ (removes all files not under version control) and rebuild numpy.
+- If you are simply trying to use the numpy version that you have installed:
+ your installation is broken - please reinstall numpy.
+- If you have already reinstalled and that did not fix the problem, then:
+ 1. Check that you are using the Python you expect (you're using %s),
+ and that you have no directories in your PATH or PYTHONPATH that can
+ interfere with the Python and numpy versions you're trying to use.
+ 2. If (1) looks fine, you can open a new issue at
+ https://github.com/numpy/numpy/issues. Please include details on:
+ - how you installed Python
+ - how you installed numpy
+ - your operating system
+ - whether or not you have multiple versions of Python installed
+ - if you built from source, your compiler versions and ideally a build log
+
+ Note: this error has many possible causes, so please don't comment on
+ an existing issue about this - open a new one instead.
Original error was: %s
-""" % (exc,)
+""" % (sys.executable, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
@@ -59,6 +80,10 @@ del nt
from .fromnumeric import amax as max, amin as min, round_ as round
from .numeric import absolute as abs
+# do this after everything else, to minimize the chance of this misleadingly
+# appearing in an import-time traceback
+from . import _add_newdocs
+
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
__all__ += fromnumeric.__all__
@@ -100,6 +125,6 @@ del copyreg
del sys
del _ufunc_reduce
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/add_newdocs.py b/numpy/core/_add_newdocs.py
index 9372b3431..a242a74d4 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -10,7 +10,9 @@ NOTE: Many of the methods of ndarray have corresponding functions.
"""
from __future__ import division, absolute_import, print_function
-from numpy.lib import add_newdoc
+from numpy.core import numerictypes as _numerictypes
+from numpy.core import dtype
+from numpy.core.function_base import add_newdoc
###############################################################################
#
@@ -605,6 +607,7 @@ add_newdoc('numpy.core', 'broadcast',
Examples
--------
+
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
@@ -944,66 +947,6 @@ add_newdoc('numpy.core.multiarray', 'empty',
""")
-add_newdoc('numpy.core.multiarray', 'empty_like',
- """
- empty_like(prototype, dtype=None, order='K', subok=True)
-
- Return a new array with the same shape and type as a given array.
-
- Parameters
- ----------
- prototype : array_like
- The shape and data-type of `prototype` define these same attributes
- of the returned array.
- dtype : data-type, optional
- Overrides the data type of the result.
-
- .. versionadded:: 1.6.0
- order : {'C', 'F', 'A', or 'K'}, optional
- Overrides the memory layout of the result. 'C' means C-order,
- 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
- contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
- as closely as possible.
-
- .. versionadded:: 1.6.0
- subok : bool, optional.
- If True, then the newly created array will use the sub-class
- type of 'a', otherwise it will be a base-class array. Defaults
- to True.
-
- Returns
- -------
- out : ndarray
- Array of uninitialized (arbitrary) data with the same
- shape and type as `prototype`.
-
- See Also
- --------
- ones_like : Return an array of ones with shape and type of input.
- zeros_like : Return an array of zeros with shape and type of input.
- full_like : Return a new array with shape of input filled with value.
- empty : Return a new uninitialized array.
-
- Notes
- -----
- This function does *not* initialize the returned array; to do that use
- `zeros_like` or `ones_like` instead. It may be marginally faster than
- the functions that do set the array values.
-
- Examples
- --------
- >>> a = ([1,2,3], [4,5,6]) # a is array-like
- >>> np.empty_like(a)
- array([[-1073741821, -1073741821, 3], #random
- [ 0, 0, -1073741821]])
- >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
- >>> np.empty_like(a)
- array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
- [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
-
- """)
-
-
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
@@ -1129,6 +1072,43 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
""")
+add_newdoc('numpy.core.multiarray', 'compare_chararrays',
+ """
+ compare_chararrays(a, b, cmp_op, rstrip)
+
+ Performs element-wise comparison of two string arrays using the
+ comparison operator specified by `cmp_op`.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Arrays to be compared.
+ cmp_op : {"<", "<=", "==", ">=", ">", "!="}
+ Type of comparison.
+ rstrip : Boolean
+ If True, the spaces at the end of Strings are removed before the comparison.
+
+ Returns
+ -------
+ out : ndarray
+ The output array of type Boolean with the same shape as a and b.
+
+ Raises
+ ------
+ ValueError
+ If `cmp_op` is not valid.
+ TypeError
+ If at least one of `a` or `b` is a non-string array
+
+ Examples
+ --------
+ >>> a = np.array(["a", "b", "cde"])
+ >>> b = np.array(["a", "a", "dec"])
+ >>> np.compare_chararrays(a, b, ">", True)
+ array([False, True, False])
+
+ """)
+
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
@@ -1281,162 +1261,6 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
""")
-add_newdoc('numpy.core.multiarray', 'concatenate',
- """
- concatenate((a1, a2, ...), axis=0, out=None)
-
- Join a sequence of arrays along an existing axis.
-
- Parameters
- ----------
- a1, a2, ... : sequence of array_like
- The arrays must have the same shape, except in the dimension
- corresponding to `axis` (the first, by default).
- axis : int, optional
- The axis along which the arrays will be joined. If axis is None,
- arrays are flattened before use. Default is 0.
- out : ndarray, optional
- If provided, the destination to place the result. The shape must be
- correct, matching that of what concatenate would have returned if no
- out argument were specified.
-
- Returns
- -------
- res : ndarray
- The concatenated array.
-
- See Also
- --------
- ma.concatenate : Concatenate function that preserves input masks.
- array_split : Split an array into multiple sub-arrays of equal or
- near-equal size.
- split : Split array into a list of multiple sub-arrays of equal size.
- hsplit : Split array into multiple sub-arrays horizontally (column wise)
- vsplit : Split array into multiple sub-arrays vertically (row wise)
- dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
- stack : Stack a sequence of arrays along a new axis.
- hstack : Stack arrays in sequence horizontally (column wise)
- vstack : Stack arrays in sequence vertically (row wise)
- dstack : Stack arrays in sequence depth wise (along third dimension)
-
- Notes
- -----
- When one or more of the arrays to be concatenated is a MaskedArray,
- this function will return a MaskedArray object instead of an ndarray,
- but the input masks are *not* preserved. In cases where a MaskedArray
- is expected as input, use the ma.concatenate function from the masked
- array module instead.
-
- Examples
- --------
- >>> a = np.array([[1, 2], [3, 4]])
- >>> b = np.array([[5, 6]])
- >>> np.concatenate((a, b), axis=0)
- array([[1, 2],
- [3, 4],
- [5, 6]])
- >>> np.concatenate((a, b.T), axis=1)
- array([[1, 2, 5],
- [3, 4, 6]])
- >>> np.concatenate((a, b), axis=None)
- array([1, 2, 3, 4, 5, 6])
-
- This function will not preserve masking of MaskedArray inputs.
-
- >>> a = np.ma.arange(3)
- >>> a[1] = np.ma.masked
- >>> b = np.arange(2, 5)
- >>> a
- masked_array(data = [0 -- 2],
- mask = [False True False],
- fill_value = 999999)
- >>> b
- array([2, 3, 4])
- >>> np.concatenate([a, b])
- masked_array(data = [0 1 2 2 3 4],
- mask = False,
- fill_value = 999999)
- >>> np.ma.concatenate([a, b])
- masked_array(data = [0 -- 2 2 3 4],
- mask = [False True False False False False],
- fill_value = 999999)
-
- """)
-
-add_newdoc('numpy.core', 'inner',
- """
- inner(a, b)
-
- Inner product of two arrays.
-
- Ordinary inner product of vectors for 1-D arrays (without complex
- conjugation), in higher dimensions a sum product over the last axes.
-
- Parameters
- ----------
- a, b : array_like
- If `a` and `b` are nonscalar, their last dimensions must match.
-
- Returns
- -------
- out : ndarray
- `out.shape = a.shape[:-1] + b.shape[:-1]`
-
- Raises
- ------
- ValueError
- If the last dimension of `a` and `b` has different size.
-
- See Also
- --------
- tensordot : Sum products over arbitrary axes.
- dot : Generalised matrix product, using second last dimension of `b`.
- einsum : Einstein summation convention.
-
- Notes
- -----
- For vectors (1-D arrays) it computes the ordinary inner-product::
-
- np.inner(a, b) = sum(a[:]*b[:])
-
- More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
-
- np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
-
- or explicitly::
-
- np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
- = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
-
- In addition `a` or `b` may be scalars, in which case::
-
- np.inner(a,b) = a*b
-
- Examples
- --------
- Ordinary inner product for vectors:
-
- >>> a = np.array([1,2,3])
- >>> b = np.array([0,1,0])
- >>> np.inner(a, b)
- 2
-
- A multidimensional example:
-
- >>> a = np.arange(24).reshape((2,3,4))
- >>> b = np.arange(4)
- >>> np.inner(a, b)
- array([[ 14, 38, 62],
- [ 86, 110, 134]])
-
- An example where `b` is a scalar:
-
- >>> np.inner(np.eye(2), 7)
- array([[ 7., 0.],
- [ 0., 7.]])
-
- """)
-
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
@@ -1452,11 +1276,10 @@ add_newdoc('numpy.core.multiarray', 'arange',
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
- `range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
- but returns an ndarray rather than a list.
+ `range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
- be consistent. It is better to use ``linspace`` for these cases.
+ be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
@@ -1534,6 +1357,12 @@ add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
Set numerical operators for array objects.
+ .. deprecated:: 1.16
+
+ For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
+ For ndarray subclasses, define the ``__array_ufunc__`` method and
+ override the relevant ufunc.
+
Parameters
----------
op1, op2, ... : callable
@@ -1572,262 +1401,6 @@ add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
""")
-add_newdoc('numpy.core.multiarray', 'where',
- """
- where(condition, [x, y])
-
- Return elements, either from `x` or `y`, depending on `condition`.
-
- If only `condition` is given, return ``condition.nonzero()``.
-
- Parameters
- ----------
- condition : array_like, bool
- When True, yield `x`, otherwise yield `y`.
- x, y : array_like, optional
- Values from which to choose. `x`, `y` and `condition` need to be
- broadcastable to some shape.
-
- Returns
- -------
- out : ndarray or tuple of ndarrays
- If both `x` and `y` are specified, the output array contains
- elements of `x` where `condition` is True, and elements from
- `y` elsewhere.
-
- If only `condition` is given, return the tuple
- ``condition.nonzero()``, the indices where `condition` is True.
-
- See Also
- --------
- nonzero, choose
-
- Notes
- -----
- If `x` and `y` are given and input arrays are 1-D, `where` is
- equivalent to::
-
- [xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
-
- Examples
- --------
- >>> np.where([[True, False], [True, True]],
- ... [[1, 2], [3, 4]],
- ... [[9, 8], [7, 6]])
- array([[1, 8],
- [3, 4]])
-
- >>> np.where([[0, 1], [1, 0]])
- (array([0, 1]), array([1, 0]))
-
- >>> x = np.arange(9.).reshape(3, 3)
- >>> np.where( x > 5 )
- (array([2, 2, 2]), array([0, 1, 2]))
- >>> x[np.where( x > 3.0 )] # Note: result is 1D.
- array([ 4., 5., 6., 7., 8.])
- >>> np.where(x < 5, x, -1) # Note: broadcasting.
- array([[ 0., 1., 2.],
- [ 3., 4., -1.],
- [-1., -1., -1.]])
-
- Find the indices of elements of `x` that are in `goodvalues`.
-
- >>> goodvalues = [3, 4, 7]
- >>> ix = np.isin(x, goodvalues)
- >>> ix
- array([[False, False, False],
- [ True, True, False],
- [False, True, False]])
- >>> np.where(ix)
- (array([1, 1, 2]), array([0, 1, 1]))
-
- """)
-
-
-add_newdoc('numpy.core.multiarray', 'lexsort',
- """
- lexsort(keys, axis=-1)
-
- Perform an indirect stable sort using a sequence of keys.
-
- Given multiple sorting keys, which can be interpreted as columns in a
- spreadsheet, lexsort returns an array of integer indices that describes
- the sort order by multiple columns. The last key in the sequence is used
- for the primary sort order, the second-to-last key for the secondary sort
- order, and so on. The keys argument must be a sequence of objects that
- can be converted to arrays of the same shape. If a 2D array is provided
- for the keys argument, it's rows are interpreted as the sorting keys and
- sorting is according to the last row, second last row etc.
-
- Parameters
- ----------
- keys : (k, N) array or tuple containing k (N,)-shaped sequences
- The `k` different "columns" to be sorted. The last column (or row if
- `keys` is a 2D array) is the primary sort key.
- axis : int, optional
- Axis to be indirectly sorted. By default, sort over the last axis.
-
- Returns
- -------
- indices : (N,) ndarray of ints
- Array of indices that sort the keys along the specified axis.
-
- See Also
- --------
- argsort : Indirect sort.
- ndarray.sort : In-place sort.
- sort : Return a sorted copy of an array.
-
- Examples
- --------
- Sort names: first by surname, then by name.
-
- >>> surnames = ('Hertz', 'Galilei', 'Hertz')
- >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
- >>> ind = np.lexsort((first_names, surnames))
- >>> ind
- array([1, 2, 0])
-
- >>> [surnames[i] + ", " + first_names[i] for i in ind]
- ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
-
- Sort two columns of numbers:
-
- >>> a = [1,5,1,4,3,4,4] # First column
- >>> b = [9,4,0,4,0,2,1] # Second column
- >>> ind = np.lexsort((b,a)) # Sort by a, then by b
- >>> print(ind)
- [2 0 4 6 5 3 1]
-
- >>> [(a[i],b[i]) for i in ind]
- [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
-
- Note that sorting is first according to the elements of ``a``.
- Secondary sorting is according to the elements of ``b``.
-
- A normal ``argsort`` would have yielded:
-
- >>> [(a[i],b[i]) for i in np.argsort(a)]
- [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
-
- Structured arrays are sorted lexically by ``argsort``:
-
- >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
- ... dtype=np.dtype([('x', int), ('y', int)]))
-
- >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
- array([2, 0, 4, 6, 5, 3, 1])
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'can_cast',
- """
- can_cast(from_, to, casting='safe')
-
- Returns True if cast between data types can occur according to the
- casting rule. If from is a scalar or array scalar, also returns
- True if the scalar value can be cast without overflow or truncation
- to an integer.
-
- Parameters
- ----------
- from_ : dtype, dtype specifier, scalar, or array
- Data type, scalar, or array to cast from.
- to : dtype or dtype specifier
- Data type to cast to.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- Controls what kind of data casting may occur.
-
- * 'no' means the data types should not be cast at all.
- * 'equiv' means only byte-order changes are allowed.
- * 'safe' means only casts which can preserve values are allowed.
- * 'same_kind' means only safe casts or casts within a kind,
- like float64 to float32, are allowed.
- * 'unsafe' means any data conversions may be done.
-
- Returns
- -------
- out : bool
- True if cast can occur according to the casting rule.
-
- Notes
- -----
- Starting in NumPy 1.9, can_cast function now returns False in 'safe'
- casting mode for integer/float dtype and string dtype if the string dtype
- length is not long enough to store the max integer/float value converted
- to a string. Previously can_cast in 'safe' mode returned True for
- integer/float dtype and a string dtype of any length.
-
- See also
- --------
- dtype, result_type
-
- Examples
- --------
- Basic examples
-
- >>> np.can_cast(np.int32, np.int64)
- True
- >>> np.can_cast(np.float64, complex)
- True
- >>> np.can_cast(complex, float)
- False
-
- >>> np.can_cast('i8', 'f8')
- True
- >>> np.can_cast('i8', 'f4')
- False
- >>> np.can_cast('i4', 'S4')
- False
-
- Casting scalars
-
- >>> np.can_cast(100, 'i1')
- True
- >>> np.can_cast(150, 'i1')
- False
- >>> np.can_cast(150, 'u1')
- True
-
- >>> np.can_cast(3.5e100, np.float32)
- False
- >>> np.can_cast(1000.0, np.float32)
- True
-
- Array scalar checks the value, array does not
-
- >>> np.can_cast(np.array(1000.0), np.float32)
- True
- >>> np.can_cast(np.array([1000.0]), np.float32)
- False
-
- Using the casting rules
-
- >>> np.can_cast('i8', 'i8', 'no')
- True
- >>> np.can_cast('<i8', '>i8', 'no')
- False
-
- >>> np.can_cast('<i8', '>i8', 'equiv')
- True
- >>> np.can_cast('<i4', '>i8', 'equiv')
- False
-
- >>> np.can_cast('<i4', '>i8', 'safe')
- True
- >>> np.can_cast('<i8', '>i4', 'safe')
- False
-
- >>> np.can_cast('<i8', '>i4', 'same_kind')
- True
- >>> np.can_cast('<i8', '>u4', 'same_kind')
- False
-
- >>> np.can_cast('<i8', '>u4', 'unsafe')
- True
-
- """)
-
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
@@ -1888,123 +1461,6 @@ add_newdoc('numpy.core.multiarray', 'promote_types',
""")
-add_newdoc('numpy.core.multiarray', 'min_scalar_type',
- """
- min_scalar_type(a)
-
- For scalar ``a``, returns the data type with the smallest size
- and smallest scalar kind which can hold its value. For non-scalar
- array ``a``, returns the vector's dtype unmodified.
-
- Floating point values are not demoted to integers,
- and complex values are not demoted to floats.
-
- Parameters
- ----------
- a : scalar or array_like
- The value whose minimal data type is to be found.
-
- Returns
- -------
- out : dtype
- The minimal data type.
-
- Notes
- -----
- .. versionadded:: 1.6.0
-
- See Also
- --------
- result_type, promote_types, dtype, can_cast
-
- Examples
- --------
- >>> np.min_scalar_type(10)
- dtype('uint8')
-
- >>> np.min_scalar_type(-260)
- dtype('int16')
-
- >>> np.min_scalar_type(3.1)
- dtype('float16')
-
- >>> np.min_scalar_type(1e50)
- dtype('float64')
-
- >>> np.min_scalar_type(np.arange(4,dtype='f8'))
- dtype('float64')
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'result_type',
- """
- result_type(*arrays_and_dtypes)
-
- Returns the type that results from applying the NumPy
- type promotion rules to the arguments.
-
- Type promotion in NumPy works similarly to the rules in languages
- like C++, with some slight differences. When both scalars and
- arrays are used, the array's type takes precedence and the actual value
- of the scalar is taken into account.
-
- For example, calculating 3*a, where a is an array of 32-bit floats,
- intuitively should result in a 32-bit float output. If the 3 is a
- 32-bit integer, the NumPy rules indicate it can't convert losslessly
- into a 32-bit float, so a 64-bit float should be the result type.
- By examining the value of the constant, '3', we see that it fits in
- an 8-bit integer, which can be cast losslessly into the 32-bit float.
-
- Parameters
- ----------
- arrays_and_dtypes : list of arrays and dtypes
- The operands of some operation whose result type is needed.
-
- Returns
- -------
- out : dtype
- The result type.
-
- See also
- --------
- dtype, promote_types, min_scalar_type, can_cast
-
- Notes
- -----
- .. versionadded:: 1.6.0
-
- The specific algorithm used is as follows.
-
- Categories are determined by first checking which of boolean,
- integer (int/uint), or floating point (float/complex) the maximum
- kind of all the arrays and the scalars are.
-
- If there are only scalars or the maximum category of the scalars
- is higher than the maximum category of the arrays,
- the data types are combined with :func:`promote_types`
- to produce the return value.
-
- Otherwise, `min_scalar_type` is called on each array, and
- the resulting data types are all combined with :func:`promote_types`
- to produce the return value.
-
- The set of int values is not a subset of the uint values for types
- with the same number of bits, something not reflected in
- :func:`min_scalar_type`, but handled as a special case in `result_type`.
-
- Examples
- --------
- >>> np.result_type(3, np.arange(7, dtype='i1'))
- dtype('int8')
-
- >>> np.result_type('i4', 'c8')
- dtype('complex128')
-
- >>> np.result_type(3.0, -2)
- dtype('float64')
-
- """)
-
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
@@ -2057,233 +1513,34 @@ add_newdoc('numpy.core.multiarray', 'getbuffer',
""")
-add_newdoc('numpy.core', 'dot',
- """
- dot(a, b, out=None)
-
- Dot product of two arrays. Specifically,
-
- - If both `a` and `b` are 1-D arrays, it is inner product of vectors
- (without complex conjugation).
-
- - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
- but using :func:`matmul` or ``a @ b`` is preferred.
-
- - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
- and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
-
- - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
- the last axis of `a` and `b`.
-
- - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
- sum product over the last axis of `a` and the second-to-last axis of `b`::
-
- dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
-
- Parameters
- ----------
- a : array_like
- First argument.
- b : array_like
- Second argument.
- out : ndarray, optional
- Output argument. This must have the exact kind that would be returned
- if it was not used. In particular, it must have the right type, must be
- C-contiguous, and its dtype must be the dtype that would be returned
- for `dot(a,b)`. This is a performance feature. Therefore, if these
- conditions are not met, an exception is raised, instead of attempting
- to be flexible.
-
- Returns
- -------
- output : ndarray
- Returns the dot product of `a` and `b`. If `a` and `b` are both
- scalars or both 1-D arrays then a scalar is returned; otherwise
- an array is returned.
- If `out` is given, then it is returned.
-
- Raises
- ------
- ValueError
- If the last dimension of `a` is not the same size as
- the second-to-last dimension of `b`.
-
- See Also
- --------
- vdot : Complex-conjugating dot product.
- tensordot : Sum products over arbitrary axes.
- einsum : Einstein summation convention.
- matmul : '@' operator as method with out parameter.
-
- Examples
- --------
- >>> np.dot(3, 4)
- 12
-
- Neither argument is complex-conjugated:
-
- >>> np.dot([2j, 3j], [2j, 3j])
- (-13+0j)
-
- For 2-D arrays it is the matrix product:
-
- >>> a = [[1, 0], [0, 1]]
- >>> b = [[4, 1], [2, 2]]
- >>> np.dot(a, b)
- array([[4, 1],
- [2, 2]])
-
- >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
- >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
- >>> np.dot(a, b)[2,3,2,1,2,2]
- 499128
- >>> sum(a[2,3,2,:] * b[1,2,:,2])
- 499128
-
- """)
-
-add_newdoc('numpy.core', 'matmul',
+add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
- matmul(a, b, out=None)
-
- Matrix product of two arrays.
-
- The behavior depends on the arguments in the following way.
-
- - If both arguments are 2-D they are multiplied like conventional
- matrices.
- - If either argument is N-D, N > 2, it is treated as a stack of
- matrices residing in the last two indexes and broadcast accordingly.
- - If the first argument is 1-D, it is promoted to a matrix by
- prepending a 1 to its dimensions. After matrix multiplication
- the prepended 1 is removed.
- - If the second argument is 1-D, it is promoted to a matrix by
- appending a 1 to its dimensions. After matrix multiplication
- the appended 1 is removed.
-
- Multiplication by a scalar is not allowed, use ``*`` instead. Note that
- multiplying a stack of matrices with a vector will result in a stack of
- vectors, but matmul will not recognize it as such.
-
- ``matmul`` differs from ``dot`` in two important ways.
-
- - Multiplication by scalars is not allowed.
- - Stacks of matrices are broadcast together as if the matrices
- were elements.
-
- .. warning::
- This function is preliminary and included in NumPy 1.10.0 for testing
- and documentation. Its semantics will not change, but the number and
- order of the optional arguments will.
-
- .. versionadded:: 1.10.0
-
- Parameters
- ----------
- a : array_like
- First argument.
- b : array_like
- Second argument.
- out : ndarray, optional
- Output argument. This must have the exact kind that would be returned
- if it was not used. In particular, it must have the right type, must be
- C-contiguous, and its dtype must be the dtype that would be returned
- for `dot(a,b)`. This is a performance feature. Therefore, if these
- conditions are not met, an exception is raised, instead of attempting
- to be flexible.
-
- Returns
- -------
- output : ndarray
- Returns the dot product of `a` and `b`. If `a` and `b` are both
- 1-D arrays then a scalar is returned; otherwise an array is
- returned. If `out` is given, then it is returned.
-
- Raises
- ------
- ValueError
- If the last dimension of `a` is not the same size as
- the second-to-last dimension of `b`.
-
- If scalar value is passed.
-
- See Also
- --------
- vdot : Complex-conjugating dot product.
- tensordot : Sum products over arbitrary axes.
- einsum : Einstein summation convention.
- dot : alternative matrix product with different broadcasting rules.
-
- Notes
- -----
- The matmul function implements the semantics of the `@` operator introduced
- in Python 3.5 following PEP465.
-
- Examples
- --------
- For 2-D arrays it is the matrix product:
-
- >>> a = [[1, 0], [0, 1]]
- >>> b = [[4, 1], [2, 2]]
- >>> np.matmul(a, b)
- array([[4, 1],
- [2, 2]])
-
- For 2-D mixed with 1-D, the result is the usual.
-
- >>> a = [[1, 0], [0, 1]]
- >>> b = [1, 2]
- >>> np.matmul(a, b)
- array([1, 2])
- >>> np.matmul(b, a)
- array([1, 2])
-
-
- Broadcasting is conventional for stacks of arrays
-
- >>> a = np.arange(2*2*4).reshape((2,2,4))
- >>> b = np.arange(2*2*4).reshape((2,4,2))
- >>> np.matmul(a,b).shape
- (2, 2, 2)
- >>> np.matmul(a,b)[0,1,1]
- 98
- >>> sum(a[0,1,:] * b[0,:,1])
- 98
-
- Vector, vector returns the scalar inner product, but neither argument
- is complex-conjugated:
-
- >>> np.matmul([2j, 3j], [2j, 3j])
- (-13+0j)
+ c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
+ casting='safe')
- Scalar multiplication raises an error.
-
- >>> np.matmul([1,2], 3)
- Traceback (most recent call last):
- ...
- ValueError: Scalar operands are not allowed, use '*' instead
-
- """)
-
-
-add_newdoc('numpy.core', 'c_einsum',
- """
- c_einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
+ *This documentation shadows that of the native python implementation of the `einsum` function,
+ except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
- Using the Einstein summation convention, many common multi-dimensional
- array operations can be represented in a simple fashion. This function
- provides a way to compute such summations. The best way to understand this
- function is to try the examples below, which show how many common NumPy
- functions can be implemented as calls to `einsum`.
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
- This is the core C function.
+ See the notes and examples for clarification.
Parameters
----------
subscripts : str
- Specifies the subscripts for summation.
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
@@ -2311,6 +1568,11 @@ add_newdoc('numpy.core', 'c_einsum',
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
+ optimize : {False, True, 'greedy', 'optimal'}, optional
+ Controls if intermediate optimization should occur. No optimization
+ will occur if False and True will default to the 'greedy' algorithm.
+ Also accepts an explicit contraction list from the ``np.einsum_path``
+ function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
@@ -2319,56 +1581,86 @@ add_newdoc('numpy.core', 'c_einsum',
See Also
--------
- einsum, dot, inner, outer, tensordot
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
- The subscripts string is a comma-separated list of subscript labels,
- where each label refers to a dimension of the corresponding operand.
- Repeated subscripts labels in one operand take the diagonal. For example,
- ``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
- Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
- is equivalent to ``np.inner(a,b)``. If a label appears only once,
- it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
- with no changes.
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
- The order of labels in the output is by default alphabetical. This
- means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
- ``np.einsum('ji', a)`` takes its transpose.
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
- The output can be controlled by specifying output subscript labels
- as well. This specifies the label order, and allows summing to
- be disallowed or forced when desired. The call ``np.einsum('i->', a)``
- is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
- is like ``np.diag(a)``. The difference is that `einsum` does not
- allow broadcasting by default.
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+ describes traditional matrix multiplication and is equivalent to
+ :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+ to :py:func:`np.trace(a) <numpy.trace>`.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
+ and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
- product with the left-most indices instead of rightmost, you can do
+ product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
- produces a view.
+ produces a view (changed in version 1.10.0).
- An alternative way to provide the subscripts and operands is as
- ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
- below have corresponding `einsum` calls with the two parameter methods.
+ `einsum` also provides an alternative way to provide the subscripts
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
- have the same effect as ``np.swapaxes(a, 0, 2)`` and
- ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
@@ -2377,6 +1669,8 @@ add_newdoc('numpy.core', 'c_einsum',
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
+ Trace of a matrix:
+
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
@@ -2384,6 +1678,8 @@ add_newdoc('numpy.core', 'c_einsum',
>>> np.trace(a)
60
+ Extract the diagonal (requires explicit form):
+
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
@@ -2391,31 +1687,69 @@ add_newdoc('numpy.core', 'c_einsum',
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
- >>> np.einsum('ij,j', a, b)
- array([ 30, 80, 130, 180, 230])
- >>> np.einsum(a, [0,1], b, [1])
- array([ 30, 80, 130, 180, 230])
- >>> np.dot(a, b)
- array([ 30, 80, 130, 180, 230])
- >>> np.einsum('...j,j', a, b)
- array([ 30, 80, 130, 180, 230])
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
- >>> c.T
+ >>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
+ >>> np.einsum(',ij', 3, c)
+ array([[ 0, 3, 6],
+ [ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
@@ -2423,12 +1757,7 @@ add_newdoc('numpy.core', 'c_einsum',
array([[ 0, 3, 6],
[ 9, 12, 15]])
- >>> np.einsum('i,i', b, b)
- 30
- >>> np.einsum(b, [0], b, [0])
- 30
- >>> np.inner(b,b)
- 30
+ Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
@@ -2440,12 +1769,7 @@ add_newdoc('numpy.core', 'c_einsum',
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
- >>> np.einsum('i...->...', a)
- array([50, 55, 60, 65, 70])
- >>> np.einsum(a, [0,Ellipsis], [Ellipsis])
- array([50, 55, 60, 65, 70])
- >>> np.sum(a, axis=0)
- array([50, 55, 60, 65, 70])
+ Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
@@ -2468,6 +1792,17 @@ add_newdoc('numpy.core', 'c_einsum',
[ 4796., 5162.],
[ 4928., 5306.]])
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ Example of ellipsis use:
+
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
@@ -2480,69 +1815,6 @@ add_newdoc('numpy.core', 'c_einsum',
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
- >>> # since version 1.10.0
- >>> a = np.zeros((3, 3))
- >>> np.einsum('ii->i', a)[:] = 1
- >>> a
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
-
- """)
-
-add_newdoc('numpy.core', 'vdot',
- """
- vdot(a, b)
-
- Return the dot product of two vectors.
-
- The vdot(`a`, `b`) function handles complex numbers differently than
- dot(`a`, `b`). If the first argument is complex the complex conjugate
- of the first argument is used for the calculation of the dot product.
-
- Note that `vdot` handles multidimensional arrays differently than `dot`:
- it does *not* perform a matrix product, but flattens input arguments
- to 1-D vectors first. Consequently, it should only be used for vectors.
-
- Parameters
- ----------
- a : array_like
- If `a` is complex the complex conjugate is taken before calculation
- of the dot product.
- b : array_like
- Second argument to the dot product.
-
- Returns
- -------
- output : ndarray
- Dot product of `a` and `b`. Can be an int, float, or
- complex depending on the types of `a` and `b`.
-
- See Also
- --------
- dot : Return the dot product without using the complex conjugate of the
- first argument.
-
- Examples
- --------
- >>> a = np.array([1+2j,3+4j])
- >>> b = np.array([5+6j,7+8j])
- >>> np.vdot(a, b)
- (70-8j)
- >>> np.vdot(b, a)
- (70+8j)
-
- Note that higher-dimensional arrays are flattened!
-
- >>> a = np.array([[1, 4], [5, 6]])
- >>> b = np.array([[4, 1], [2, 2]])
- >>> np.vdot(a, b)
- 30
- >>> np.vdot(b, a)
- 30
- >>> 1*4 + 4*1 + 5*2 + 6*2
- 30
-
""")
@@ -2763,46 +2035,17 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
- * data: A pointer to the memory area of the array as a Python integer.
- This memory area may contain data that is not aligned, or not in correct
- byte-order. The memory area may not even be writeable. The array
- flags and data-type of this array should be respected when passing this
- attribute to arbitrary C-code to avoid trouble that can include Python
- crashing. User Beware! The value of this attribute is exactly the same
- as self._array_interface_['data'][0].
-
- * shape (c_intp*self.ndim): A ctypes array of length self.ndim where
- the basetype is the C-integer corresponding to dtype('p') on this
- platform. This base-type could be c_int, c_long, or c_longlong
- depending on the platform. The c_intp type is defined accordingly in
- numpy.ctypeslib. The ctypes array contains the shape of the underlying
- array.
-
- * strides (c_intp*self.ndim): A ctypes array of length self.ndim where
- the basetype is the same as for the shape attribute. This ctypes array
- contains the strides information from the underlying array. This strides
- information is important for showing how many bytes must be jumped to
- get to the next element in the array.
-
- * data_as(obj): Return the data pointer cast to a particular c-types object.
- For example, calling self._as_parameter_ is equivalent to
- self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
- pointer to a ctypes array of floating-point data:
- self.data_as(ctypes.POINTER(ctypes.c_double)).
-
- * shape_as(obj): Return the shape tuple as an array of some other c-types
- type. For example: self.shape_as(ctypes.c_short).
-
- * strides_as(obj): Return the strides tuple as an array of some other
- c-types type. For example: self.strides_as(ctypes.c_longlong).
-
- Be careful using the ctypes attribute - especially on temporary
- arrays or arrays constructed on the fly. For example, calling
- ``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
- that is invalid because the array created as (a+b) is deallocated
- before the next Python statement. You can avoid this problem using
- either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
- hold a reference to the array until ct is deleted or re-assigned.
+ .. autoattribute:: numpy.core._internal._ctypes.data
+
+ .. autoattribute:: numpy.core._internal._ctypes.shape
+
+ .. autoattribute:: numpy.core._internal._ctypes.strides
+
+ .. automethod:: numpy.core._internal._ctypes.data_as
+
+ .. automethod:: numpy.core._internal._ctypes.shape_as
+
+ .. automethod:: numpy.core._internal._ctypes.strides_as
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
@@ -3124,7 +2367,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
Notes
-----
- `a.size` returns a standard arbitrary precision Python integer. This
+ `a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
@@ -4759,7 +4002,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
-
+
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
@@ -5215,273 +4458,6 @@ add_newdoc('numpy.core.umath', 'seterrobj',
#
##############################################################################
-add_newdoc('numpy.core.multiarray', 'digitize',
- """
- digitize(x, bins, right=False)
-
- Return the indices of the bins to which each value in input array belongs.
-
- ========= ============= ============================
- `right` order of bins returned index `i` satisfies
- ========= ============= ============================
- ``False`` increasing ``bins[i-1] <= x < bins[i]``
- ``True`` increasing ``bins[i-1] < x <= bins[i]``
- ``False`` decreasing ``bins[i-1] > x >= bins[i]``
- ``True`` decreasing ``bins[i-1] >= x > bins[i]``
- ========= ============= ============================
-
- If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is
- returned as appropriate.
-
- Parameters
- ----------
- x : array_like
- Input array to be binned. Prior to NumPy 1.10.0, this array had to
- be 1-dimensional, but can now have any shape.
- bins : array_like
- Array of bins. It has to be 1-dimensional and monotonic.
- right : bool, optional
- Indicating whether the intervals include the right or the left bin
- edge. Default behavior is (right==False) indicating that the interval
- does not include the right edge. The left bin end is open in this
- case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
- monotonically increasing bins.
-
- Returns
- -------
- indices : ndarray of ints
- Output array of indices, of same shape as `x`.
-
- Raises
- ------
- ValueError
- If `bins` is not monotonic.
- TypeError
- If the type of the input is complex.
-
- See Also
- --------
- bincount, histogram, unique, searchsorted
-
- Notes
- -----
- If values in `x` are such that they fall outside the bin range,
- attempting to index `bins` with the indices that `digitize` returns
- will result in an IndexError.
-
- .. versionadded:: 1.10.0
-
- `np.digitize` is implemented in terms of `np.searchsorted`. This means
- that a binary search is used to bin the values, which scales much better
- for larger number of bins than the previous linear search. It also removes
- the requirement for the input array to be 1-dimensional.
-
- For monotonically _increasing_ `bins`, the following are equivalent::
-
- np.digitize(x, bins, right=True)
- np.searchsorted(bins, x, side='left')
-
- Note that as the order of the arguments are reversed, the side must be too.
- The `searchsorted` call is marginally faster, as it does not do any
- monotonicity checks. Perhaps more importantly, it supports all dtypes.
-
- Examples
- --------
- >>> x = np.array([0.2, 6.4, 3.0, 1.6])
- >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
- >>> inds = np.digitize(x, bins)
- >>> inds
- array([1, 4, 3, 2])
- >>> for n in range(x.size):
- ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
- ...
- 0.0 <= 0.2 < 1.0
- 4.0 <= 6.4 < 10.0
- 2.5 <= 3.0 < 4.0
- 1.0 <= 1.6 < 2.5
-
- >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
- >>> bins = np.array([0, 5, 10, 15, 20])
- >>> np.digitize(x,bins,right=True)
- array([1, 2, 3, 4, 4])
- >>> np.digitize(x,bins,right=False)
- array([1, 3, 3, 4, 5])
- """)
-
-add_newdoc('numpy.core.multiarray', 'bincount',
- """
- bincount(x, weights=None, minlength=0)
-
- Count number of occurrences of each value in array of non-negative ints.
-
- The number of bins (of size 1) is one larger than the largest value in
- `x`. If `minlength` is specified, there will be at least this number
- of bins in the output array (though it will be longer if necessary,
- depending on the contents of `x`).
- Each bin gives the number of occurrences of its index value in `x`.
- If `weights` is specified the input array is weighted by it, i.e. if a
- value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
- of ``out[n] += 1``.
-
- Parameters
- ----------
- x : array_like, 1 dimension, nonnegative ints
- Input array.
- weights : array_like, optional
- Weights, array of the same shape as `x`.
- minlength : int, optional
- A minimum number of bins for the output array.
-
- .. versionadded:: 1.6.0
-
- Returns
- -------
- out : ndarray of ints
- The result of binning the input array.
- The length of `out` is equal to ``np.amax(x)+1``.
-
- Raises
- ------
- ValueError
- If the input is not 1-dimensional, or contains elements with negative
- values, or if `minlength` is negative.
- TypeError
- If the type of the input is float or complex.
-
- See Also
- --------
- histogram, digitize, unique
-
- Examples
- --------
- >>> np.bincount(np.arange(5))
- array([1, 1, 1, 1, 1])
- >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
- array([1, 3, 1, 1, 0, 0, 0, 1])
-
- >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
- >>> np.bincount(x).size == np.amax(x)+1
- True
-
- The input array needs to be of integer dtype, otherwise a
- TypeError is raised:
-
- >>> np.bincount(np.arange(5, dtype=float))
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: array cannot be safely cast to required type
-
- A possible use of ``bincount`` is to perform sums over
- variable-size chunks of an array, using the ``weights`` keyword.
-
- >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
- >>> x = np.array([0, 1, 1, 2, 2, 2])
- >>> np.bincount(x, weights=w)
- array([ 0.3, 0.7, 1.1])
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
- """
- ravel_multi_index(multi_index, dims, mode='raise', order='C')
-
- Converts a tuple of index arrays into an array of flat
- indices, applying boundary modes to the multi-index.
-
- Parameters
- ----------
- multi_index : tuple of array_like
- A tuple of integer arrays, one array for each dimension.
- dims : tuple of ints
- The shape of array into which the indices from ``multi_index`` apply.
- mode : {'raise', 'wrap', 'clip'}, optional
- Specifies how out-of-bounds indices are handled. Can specify
- either one mode or a tuple of modes, one mode per index.
-
- * 'raise' -- raise an error (default)
- * 'wrap' -- wrap around
- * 'clip' -- clip to the range
-
- In 'clip' mode, a negative index which would normally
- wrap will clip to 0 instead.
- order : {'C', 'F'}, optional
- Determines whether the multi-index should be viewed as
- indexing in row-major (C-style) or column-major
- (Fortran-style) order.
-
- Returns
- -------
- raveled_indices : ndarray
- An array of indices into the flattened version of an array
- of dimensions ``dims``.
-
- See Also
- --------
- unravel_index
-
- Notes
- -----
- .. versionadded:: 1.6.0
-
- Examples
- --------
- >>> arr = np.array([[3,6,6],[4,5,1]])
- >>> np.ravel_multi_index(arr, (7,6))
- array([22, 41, 37])
- >>> np.ravel_multi_index(arr, (7,6), order='F')
- array([31, 41, 13])
- >>> np.ravel_multi_index(arr, (4,6), mode='clip')
- array([22, 23, 19])
- >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
- array([12, 13, 13])
-
- >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
- 1621
- """)
-
-add_newdoc('numpy.core.multiarray', 'unravel_index',
- """
- unravel_index(indices, dims, order='C')
-
- Converts a flat index or array of flat indices into a tuple
- of coordinate arrays.
-
- Parameters
- ----------
- indices : array_like
- An integer array whose elements are indices into the flattened
- version of an array of dimensions ``dims``. Before version 1.6.0,
- this function accepted just one index value.
- dims : tuple of ints
- The shape of the array to use for unraveling ``indices``.
- order : {'C', 'F'}, optional
- Determines whether the indices should be viewed as indexing in
- row-major (C-style) or column-major (Fortran-style) order.
-
- .. versionadded:: 1.6.0
-
- Returns
- -------
- unraveled_coords : tuple of ndarray
- Each array in the tuple has the same shape as the ``indices``
- array.
-
- See Also
- --------
- ravel_multi_index
-
- Examples
- --------
- >>> np.unravel_index([22, 41, 37], (7,6))
- (array([3, 6, 6]), array([4, 5, 1]))
- >>> np.unravel_index([31, 41, 13], (7,6), order='F')
- (array([3, 6, 6]), array([4, 5, 1]))
-
- >>> np.unravel_index(1621, (6,7,8,9))
- (3, 1, 4, 1)
-
- """)
-
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
@@ -5958,7 +4934,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
-
+
.. versionadded:: 1.15.0
Returns
@@ -5991,18 +4967,18 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
-
+
You can use the ``initial`` keyword argument to initialize the reduction with a
different value.
-
+
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10)
array([14., 14.])
-
+
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
-
+
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([])
@@ -6472,13 +5448,13 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('char',
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
- PEP3118 interface description of the data-type.
+ `__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
- PEP3118 `__array_interface__` attribute.
+ `__array_interface__` attribute.
- Warning: This attribute exists specifically for PEP3118 compliance, and
- is not a datatype description compatible with `np.dtype`.
+ Warning: This attribute exists specifically for `__array_interface__`,
+ and is not a datatype description compatible with `np.dtype`.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
@@ -6490,6 +5466,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
(dtype, offset[, title])
+ Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
@@ -6821,211 +5798,6 @@ add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
-add_newdoc('numpy.core.multiarray', 'is_busday',
- """
- is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
-
- Calculates which of the given dates are valid days, and which are not.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- dates : array_like of datetime64[D]
- The array of dates to process.
- weekmask : str or array_like of bool, optional
- A seven-element array indicating which of Monday through Sunday are
- valid days. May be specified as a length-seven list or array, like
- [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
- like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
- weekdays, optionally separated by white space. Valid abbreviations
- are: Mon Tue Wed Thu Fri Sat Sun
- holidays : array_like of datetime64[D], optional
- An array of dates to consider as invalid dates. They may be
- specified in any order, and NaT (not-a-time) dates are ignored.
- This list is saved in a normalized form that is suited for
- fast calculations of valid days.
- busdaycal : busdaycalendar, optional
- A `busdaycalendar` object which specifies the valid days. If this
- parameter is provided, neither weekmask nor holidays may be
- provided.
- out : array of bool, optional
- If provided, this array is filled with the result.
-
- Returns
- -------
- out : array of bool
- An array with the same shape as ``dates``, containing True for
- each valid day, and False for each invalid day.
-
- See Also
- --------
- busdaycalendar: An object that specifies a custom set of valid days.
- busday_offset : Applies an offset counted in valid days.
- busday_count : Counts how many valid days are in a half-open date range.
-
- Examples
- --------
- >>> # The weekdays are Friday, Saturday, and Monday
- ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
- ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
- array([False, False, True], dtype='bool')
- """)
-
-add_newdoc('numpy.core.multiarray', 'busday_offset',
- """
- busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
-
- First adjusts the date to fall on a valid day according to
- the ``roll`` rule, then applies offsets to the given dates
- counted in valid days.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- dates : array_like of datetime64[D]
- The array of dates to process.
- offsets : array_like of int
- The array of offsets, which is broadcast with ``dates``.
- roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
- How to treat dates that do not fall on a valid day. The default
- is 'raise'.
-
- * 'raise' means to raise an exception for an invalid day.
- * 'nat' means to return a NaT (not-a-time) for an invalid day.
- * 'forward' and 'following' mean to take the first valid day
- later in time.
- * 'backward' and 'preceding' mean to take the first valid day
- earlier in time.
- * 'modifiedfollowing' means to take the first valid day
- later in time unless it is across a Month boundary, in which
- case to take the first valid day earlier in time.
- * 'modifiedpreceding' means to take the first valid day
- earlier in time unless it is across a Month boundary, in which
- case to take the first valid day later in time.
- weekmask : str or array_like of bool, optional
- A seven-element array indicating which of Monday through Sunday are
- valid days. May be specified as a length-seven list or array, like
- [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
- like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
- weekdays, optionally separated by white space. Valid abbreviations
- are: Mon Tue Wed Thu Fri Sat Sun
- holidays : array_like of datetime64[D], optional
- An array of dates to consider as invalid dates. They may be
- specified in any order, and NaT (not-a-time) dates are ignored.
- This list is saved in a normalized form that is suited for
- fast calculations of valid days.
- busdaycal : busdaycalendar, optional
- A `busdaycalendar` object which specifies the valid days. If this
- parameter is provided, neither weekmask nor holidays may be
- provided.
- out : array of datetime64[D], optional
- If provided, this array is filled with the result.
-
- Returns
- -------
- out : array of datetime64[D]
- An array with a shape from broadcasting ``dates`` and ``offsets``
- together, containing the dates with offsets applied.
-
- See Also
- --------
- busdaycalendar: An object that specifies a custom set of valid days.
- is_busday : Returns a boolean array indicating valid days.
- busday_count : Counts how many valid days are in a half-open date range.
-
- Examples
- --------
- >>> # First business day in October 2011 (not accounting for holidays)
- ... np.busday_offset('2011-10', 0, roll='forward')
- numpy.datetime64('2011-10-03','D')
- >>> # Last business day in February 2012 (not accounting for holidays)
- ... np.busday_offset('2012-03', -1, roll='forward')
- numpy.datetime64('2012-02-29','D')
- >>> # Third Wednesday in January 2011
- ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
- numpy.datetime64('2011-01-19','D')
- >>> # 2012 Mother's Day in Canada and the U.S.
- ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
- numpy.datetime64('2012-05-13','D')
-
- >>> # First business day on or after a date
- ... np.busday_offset('2011-03-20', 0, roll='forward')
- numpy.datetime64('2011-03-21','D')
- >>> np.busday_offset('2011-03-22', 0, roll='forward')
- numpy.datetime64('2011-03-22','D')
- >>> # First business day after a date
- ... np.busday_offset('2011-03-20', 1, roll='backward')
- numpy.datetime64('2011-03-21','D')
- >>> np.busday_offset('2011-03-22', 1, roll='backward')
- numpy.datetime64('2011-03-23','D')
- """)
-
-add_newdoc('numpy.core.multiarray', 'busday_count',
- """
- busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
-
- Counts the number of valid days between `begindates` and
- `enddates`, not including the day of `enddates`.
-
- If ``enddates`` specifies a date value that is earlier than the
- corresponding ``begindates`` date value, the count will be negative.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- begindates : array_like of datetime64[D]
- The array of the first dates for counting.
- enddates : array_like of datetime64[D]
- The array of the end dates for counting, which are excluded
- from the count themselves.
- weekmask : str or array_like of bool, optional
- A seven-element array indicating which of Monday through Sunday are
- valid days. May be specified as a length-seven list or array, like
- [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
- like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
- weekdays, optionally separated by white space. Valid abbreviations
- are: Mon Tue Wed Thu Fri Sat Sun
- holidays : array_like of datetime64[D], optional
- An array of dates to consider as invalid dates. They may be
- specified in any order, and NaT (not-a-time) dates are ignored.
- This list is saved in a normalized form that is suited for
- fast calculations of valid days.
- busdaycal : busdaycalendar, optional
- A `busdaycalendar` object which specifies the valid days. If this
- parameter is provided, neither weekmask nor holidays may be
- provided.
- out : array of int, optional
- If provided, this array is filled with the result.
-
- Returns
- -------
- out : array of int
- An array with a shape from broadcasting ``begindates`` and ``enddates``
- together, containing the number of valid days between
- the begin and end dates.
-
- See Also
- --------
- busdaycalendar: An object that specifies a custom set of valid days.
- is_busday : Returns a boolean array indicating valid days.
- busday_offset : Applies an offset counted in valid days.
-
- Examples
- --------
- >>> # Number of weekdays in January 2011
- ... np.busday_count('2011-01', '2011-02')
- 21
- >>> # Number of weekdays in 2011
- ... np.busday_count('2011', '2012')
- 260
- >>> # Number of Saturdays in 2011
- ... np.busday_count('2011', '2012', weekmask='Sat')
- 53
- """)
-
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
@@ -7077,75 +5849,14 @@ add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
-add_newdoc('numpy.core.multiarray', 'datetime_as_string',
- """
- datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
-
- Convert an array of datetimes into an array of strings.
-
- Parameters
- ----------
- arr : array_like of datetime64
- The array of UTC timestamps to format.
- unit : str
- One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
- timezone : {'naive', 'UTC', 'local'} or tzinfo
- Timezone information to use when displaying the datetime. If 'UTC', end
- with a Z to indicate UTC time. If 'local', convert to the local timezone
- first, and suffix with a +-#### timezone offset. If a tzinfo object,
- then do as with 'local', but use the specified timezone.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
- Casting to allow when changing between datetime units.
-
- Returns
- -------
- str_arr : ndarray
- An array of strings the same shape as `arr`.
-
- Examples
- --------
- >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
- >>> d
- array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
- '2002-10-27T07:30'], dtype='datetime64[m]')
-
- Setting the timezone to UTC shows the same information, but with a Z suffix
-
- >>> np.datetime_as_string(d, timezone='UTC')
- array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
- '2002-10-27T07:30Z'], dtype='<U35')
-
- Note that we picked datetimes that cross a DST boundary. Passing in a
- ``pytz`` timezone object will print the appropriate offset
-
- >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
- array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
- '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
-
- Passing in a unit will change the precision
-
- >>> np.datetime_as_string(d, unit='h')
- array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
- dtype='<U32')
- >>> np.datetime_as_string(d, unit='s')
- array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
- '2002-10-27T07:30:00'], dtype='<U38')
-
- 'casting' can be used to specify whether precision can be changed
-
- >>> np.datetime_as_string(d, unit='h', casting='safe')
- TypeError: Cannot create a datetime string as units 'h' from a NumPy
- datetime with units 'm' according to the rule 'safe'
- """)
-
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
- The returned tuple can be passed as the second argument of `datetime64` and
- `timedelta64`.
+ The returned tuple can be passed as the second argument of `numpy.datetime64` and
+ `numpy.timedelta64`.
Parameters
----------
@@ -7169,98 +5880,10 @@ add_newdoc('numpy.core.multiarray', 'datetime_data',
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
- as a timedelta::
+ as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
- numpy.datetime64('2010-01-01T00:00:00','25s')
- """)
-
-##############################################################################
-#
-# nd_grid instances
-#
-##############################################################################
-
-add_newdoc('numpy.lib.index_tricks', 'mgrid',
- """
- `nd_grid` instance which returns a dense multi-dimensional "meshgrid".
-
- An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
- (or fleshed out) mesh-grid when indexed, so that each returned argument
- has the same shape. The dimensions and number of the output arrays are
- equal to the number of indexing dimensions. If the step length is not a
- complex number, then the stop is not inclusive.
-
- However, if the step length is a **complex number** (e.g. 5j), then
- the integer part of its magnitude is interpreted as specifying the
- number of points to create between the start and stop values, where
- the stop value **is inclusive**.
-
- Returns
- ----------
- mesh-grid `ndarrays` all of the same dimensions
-
- See Also
- --------
- numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
- ogrid : like mgrid but returns open (not fleshed out) mesh grids
- r_ : array concatenator
-
- Examples
- --------
- >>> np.mgrid[0:5,0:5]
- array([[[0, 0, 0, 0, 0],
- [1, 1, 1, 1, 1],
- [2, 2, 2, 2, 2],
- [3, 3, 3, 3, 3],
- [4, 4, 4, 4, 4]],
- [[0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4]]])
- >>> np.mgrid[-1:1:5j]
- array([-1. , -0.5, 0. , 0.5, 1. ])
-
- """)
-
-add_newdoc('numpy.lib.index_tricks', 'ogrid',
- """
- `nd_grid` instance which returns an open multi-dimensional "meshgrid".
-
- An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
- (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
- of each returned array is greater than 1. The dimension and number of the
- output arrays are equal to the number of indexing dimensions. If the step
- length is not a complex number, then the stop is not inclusive.
-
- However, if the step length is a **complex number** (e.g. 5j), then
- the integer part of its magnitude is interpreted as specifying the
- number of points to create between the start and stop values, where
- the stop value **is inclusive**.
-
- Returns
- ----------
- mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
-
- See Also
- --------
- np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
- mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
- r_ : array concatenator
-
- Examples
- --------
- >>> from numpy import ogrid
- >>> ogrid[-1:1:5j]
- array([-1. , -0.5, 0. , 0.5, 1. ])
- >>> ogrid[0:5,0:5]
- [array([[0],
- [1],
- [2],
- [3],
- [4]]), array([[0, 1, 2, 3, 4]])]
-
+ numpy.datetime64('2010-01-01T00:00:00', '25s')
""")
@@ -8086,66 +6709,228 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('view',
##############################################################################
#
-# Documentation for other scalar classes
+# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
-add_newdoc('numpy.core.numerictypes', 'bool_',
- """NumPy's Boolean type. Character code: ``?``. Alias: bool8""")
-add_newdoc('numpy.core.numerictypes', 'complex64',
+add_newdoc('numpy.core.numerictypes', 'number',
"""
- Complex number type composed of two 32 bit floats. Character code: 'F'.
+ Abstract base class of all numeric scalar types.
+
+ """)
+add_newdoc('numpy.core.numerictypes', 'integer',
+ """
+ Abstract base class of all integer scalar types.
+
""")
-add_newdoc('numpy.core.numerictypes', 'complex128',
+add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
- Complex number type composed of two 64 bit floats. Character code: 'D'.
- Python complex compatible.
+ Abstract base class of all signed integer scalar types.
+
+ """)
+add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
+ """
+ Abstract base class of all unsigned integer scalar types.
+
""")
-add_newdoc('numpy.core.numerictypes', 'complex256',
+add_newdoc('numpy.core.numerictypes', 'inexact',
"""
- Complex number type composed of two 128-bit floats. Character code: 'G'.
+ Abstract base class of all numeric scalar types with a (potentially)
+ inexact representation of the values in its range, such as
+ floating-point numbers.
+
+ """)
+add_newdoc('numpy.core.numerictypes', 'floating',
+ """
+ Abstract base class of all floating-point scalar types.
+
""")
-add_newdoc('numpy.core.numerictypes', 'float32',
+add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
- 32-bit floating-point number. Character code 'f'. C float compatible.
+ Abstract base class of all complex number scalar types that are made up of
+ floating-point numbers.
+
+ """)
+add_newdoc('numpy.core.numerictypes', 'flexible',
+ """
+ Abstract base class of all scalar types without predefined length.
+ The actual size of these types depends on the specific `np.dtype`
+ instantiation.
+
""")
-add_newdoc('numpy.core.numerictypes', 'float64',
+add_newdoc('numpy.core.numerictypes', 'character',
"""
- 64-bit floating-point number. Character code 'd'. Python float compatible.
+ Abstract base class of all character string scalar types.
+
+ """)
+
+
+##############################################################################
+#
+# Documentation for concrete scalar classes
+#
+##############################################################################
+
+def numeric_type_aliases(aliases):
+ def type_aliases_gen():
+ for alias, doc in aliases:
+ try:
+ alias_type = getattr(_numerictypes, alias)
+ except AttributeError:
+ # The set of aliases that actually exist varies between platforms
+ pass
+ else:
+ yield (alias_type, alias, doc)
+ return list(type_aliases_gen())
+
+
+possible_aliases = numeric_type_aliases([
+ ('int8', '8-bit signed integer (-128 to 127)'),
+ ('int16', '16-bit signed integer (-32768 to 32767)'),
+ ('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
+ ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
+ ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
+ ('uint8', '8-bit unsigned integer (0 to 255)'),
+ ('uint16', '16-bit unsigned integer (0 to 65535)'),
+ ('uint32', '32-bit unsigned integer (0 to 4294967295)'),
+ ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
+ ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
+ ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
+ ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
+ ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
+ ('float96', '96-bit extended-precision floating-point number type'),
+ ('float128', '128-bit extended-precision floating-point number type'),
+ ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
+ ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
+ ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
+ ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
+ ])
+
+
+def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
+ o = getattr(_numerictypes, obj)
+
+ character_code = dtype(o).char
+ canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
+ alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
+ alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
+ for (alias_type, alias, doc) in possible_aliases if alias_type is o)
+
+ docstring = """
+ {doc}
+ Character code: ``'{character_code}'``.
+ {canonical_name_doc}{alias_doc}
+ """.format(doc=doc.strip(), character_code=character_code,
+ canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
+
+ add_newdoc('numpy.core.numerictypes', obj, docstring)
+
+
+add_newdoc_for_scalar_type('bool_', ['bool8'],
+ """
+ Boolean type (True or False), stored as a byte.
+ """)
+
+add_newdoc_for_scalar_type('byte', [],
+ """
+ Signed integer type, compatible with C ``char``.
+ """)
+
+add_newdoc_for_scalar_type('short', [],
+ """
+ Signed integer type, compatible with C ``short``.
+ """)
+
+add_newdoc_for_scalar_type('intc', [],
+ """
+ Signed integer type, compatible with C ``int``.
+ """)
+
+add_newdoc_for_scalar_type('int_', [],
+ """
+ Signed integer type, compatible with Python `int` anc C ``long``.
+ """)
+
+add_newdoc_for_scalar_type('longlong', [],
+ """
+ Signed integer type, compatible with C ``long long``.
+ """)
+add_newdoc_for_scalar_type('ubyte', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned char``.
""")
-add_newdoc('numpy.core.numerictypes', 'float96',
+add_newdoc_for_scalar_type('ushort', [],
"""
+ Unsigned integer type, compatible with C ``unsigned short``.
""")
-add_newdoc('numpy.core.numerictypes', 'float128',
+add_newdoc_for_scalar_type('uintc', [],
"""
- 128-bit floating-point number. Character code: 'g'. C long float
- compatible.
+ Unsigned integer type, compatible with C ``unsigned int``.
+ """)
+add_newdoc_for_scalar_type('uint', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned long``.
""")
-add_newdoc('numpy.core.numerictypes', 'int8',
- """8-bit integer. Character code ``b``. C char compatible.""")
+add_newdoc_for_scalar_type('ulonglong', [],
+ """
+ Signed integer type, compatible with C ``unsigned long long``.
+ """)
-add_newdoc('numpy.core.numerictypes', 'int16',
- """16-bit integer. Character code ``h``. C short compatible.""")
+add_newdoc_for_scalar_type('half', [],
+ """
+ Half-precision floating-point number type.
+ """)
-add_newdoc('numpy.core.numerictypes', 'int32',
- """32-bit integer. Character code 'i'. C int compatible.""")
+add_newdoc_for_scalar_type('single', [],
+ """
+ Single-precision floating-point number type, compatible with C ``float``.
+ """)
-add_newdoc('numpy.core.numerictypes', 'int64',
- """64-bit integer. Character code 'l'. Python int compatible.""")
+add_newdoc_for_scalar_type('double', ['float_'],
+ """
+ Double-precision floating-point number type, compatible with Python `float`
+ and C ``double``.
+ """)
-add_newdoc('numpy.core.numerictypes', 'object_',
- """Any Python object. Character code: 'O'.""")
+add_newdoc_for_scalar_type('longdouble', ['longfloat'],
+ """
+ Extended-precision floating-point number type, compatible with C
+ ``long double`` but not necessarily with IEEE 754 quadruple-precision.
+ """)
+
+add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
+ """
+ Complex number type composed of two single-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
+ """
+ Complex number type composed of two double-precision floating-point
+ numbers, compatible with Python `complex`.
+ """)
+
+add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
+ """
+ Complex number type composed of two extended-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('object_', [],
+ """
+ Any Python object.
+ """)
diff --git a/numpy/core/_aliased_types.py b/numpy/core/_aliased_types.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/core/_aliased_types.py
diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py
new file mode 100644
index 000000000..3a12c8fad
--- /dev/null
+++ b/numpy/core/_dtype.py
@@ -0,0 +1,341 @@
+"""
+A place for code to be called from the implementation of np.dtype
+
+String handling is much easier to do correctly in python.
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+import numpy as np
+
+
+_kind_to_stem = {
+ 'u': 'uint',
+ 'i': 'int',
+ 'c': 'complex',
+ 'f': 'float',
+ 'b': 'bool',
+ 'V': 'void',
+ 'O': 'object',
+ 'M': 'datetime',
+ 'm': 'timedelta'
+}
+if sys.version_info[0] >= 3:
+ _kind_to_stem.update({
+ 'S': 'bytes',
+ 'U': 'str'
+ })
+else:
+ _kind_to_stem.update({
+ 'S': 'string',
+ 'U': 'unicode'
+ })
+
+
+def _kind_name(dtype):
+ try:
+ return _kind_to_stem[dtype.kind]
+ except KeyError:
+ raise RuntimeError(
+ "internal dtype error, unknown kind {!r}"
+ .format(dtype.kind)
+ )
+
+
+def __str__(dtype):
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=True)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
+ return dtype.str
+ else:
+ return dtype.name
+
+
+def __repr__(dtype):
+ arg_str = _construction_repr(dtype, include_align=False)
+ if dtype.isalignedstruct:
+ arg_str = arg_str + ", align=True"
+ return "dtype({})".format(arg_str)
+
+
+def _unpack_field(dtype, offset, title=None):
+ """
+ Helper function to normalize the items in dtype.fields.
+
+ Call as:
+
+ dtype, offset, title = _unpack_field(*dtype.fields[name])
+ """
+ return dtype, offset, title
+
+
+def _isunsized(dtype):
+ # PyDataType_ISUNSIZED
+ return dtype.itemsize == 0
+
+
+def _construction_repr(dtype, include_align=False, short=False):
+ """
+ Creates a string repr of the dtype, excluding the 'dtype()' part
+ surrounding the object. This object may be a string, a list, or
+ a dict depending on the nature of the dtype. This
+ is the object passed as the first parameter to the dtype
+ constructor, and if no additional constructor parameters are
+ given, will reproduce the exact memory layout.
+
+ Parameters
+ ----------
+ short : bool
+ If true, this creates a shorter repr using 'kind' and 'itemsize', instead
+ of the longer type name.
+
+ include_align : bool
+ If true, this includes the 'align=True' parameter
+ inside the struct dtype construction dict when needed. Use this flag
+ if you want a proper repr string without the 'dtype()' part around it.
+
+ If false, this does not preserve the
+ 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
+ struct arrays like the regular repr does, because the 'align'
+ flag is not part of first dtype constructor parameter. This
+ mode is intended for a full 'repr', where the 'align=True' is
+ provided as the second parameter.
+ """
+ if dtype.fields is not None:
+ return _struct_str(dtype, include_align=include_align)
+ elif dtype.subdtype:
+ return _subarray_str(dtype)
+ else:
+ return _scalar_str(dtype, short=short)
+
+
+def _scalar_str(dtype, short):
+ byteorder = _byte_order_str(dtype)
+
+ if dtype.type == np.bool_:
+ if short:
+ return "'?'"
+ else:
+ return "'bool'"
+
+ elif dtype.type == np.object_:
+ # The object reference may be different sizes on different
+ # platforms, so it should never include the itemsize here.
+ return "'O'"
+
+ elif dtype.type == np.string_:
+ if _isunsized(dtype):
+ return "'S'"
+ else:
+ return "'S%d'" % dtype.itemsize
+
+ elif dtype.type == np.unicode_:
+ if _isunsized(dtype):
+ return "'%sU'" % byteorder
+ else:
+ return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
+
+ # unlike the other types, subclasses of void are preserved - but
+ # historically the repr does not actually reveal the subclass
+ elif issubclass(dtype.type, np.void):
+ if _isunsized(dtype):
+ return "'V'"
+ else:
+ return "'V%d'" % dtype.itemsize
+
+ elif dtype.type == np.datetime64:
+ return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+ elif dtype.type == np.timedelta64:
+ return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
+
+ elif np.issubdtype(dtype, np.number):
+ # Short repr with endianness, like '<f8'
+ if short or dtype.byteorder not in ('=', '|'):
+ return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
+
+ # Longer repr, like 'float64'
+ else:
+ return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
+
+ elif dtype.isbuiltin == 2:
+ return dtype.type.__name__
+
+ else:
+ raise RuntimeError(
+ "Internal error: NumPy dtype unrecognized type number")
+
+
+def _byte_order_str(dtype):
+ """ Normalize byteorder to '<' or '>' """
+ # hack to obtain the native and swapped byte order characters
+ swapped = np.dtype(int).newbyteorder('s')
+ native = swapped.newbyteorder('s')
+
+ byteorder = dtype.byteorder
+ if byteorder == '=':
+ return native.byteorder
+ if byteorder == 's':
+ # TODO: this path can never be reached
+ return swapped.byteorder
+ elif byteorder == '|':
+ return ''
+ else:
+ return byteorder
+
+
+def _datetime_metadata_str(dtype):
+ # TODO: this duplicates the C append_metastr_to_string
+ unit, count = np.datetime_data(dtype)
+ if unit == 'generic':
+ return ''
+ elif count == 1:
+ return '[{}]'.format(unit)
+ else:
+ return '[{}{}]'.format(count, unit)
+
+
+def _struct_dict_str(dtype, includealignedflag):
+ # unpack the fields dictionary into ls
+ names = dtype.names
+ fld_dtypes = []
+ offsets = []
+ titles = []
+ for name in names:
+ fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
+ fld_dtypes.append(fld_dtype)
+ offsets.append(offset)
+ titles.append(title)
+
+ # Build up a string to make the dictionary
+
+ # First, the names
+ ret = "{'names':["
+ ret += ",".join(repr(name) for name in names)
+
+ # Second, the formats
+ ret += "], 'formats':["
+ ret += ",".join(
+ _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
+
+ # Third, the offsets
+ ret += "], 'offsets':["
+ ret += ",".join("%d" % offset for offset in offsets)
+
+ # Fourth, the titles
+ if any(title is not None for title in titles):
+ ret += "], 'titles':["
+ ret += ",".join(repr(title) for title in titles)
+
+ # Fifth, the itemsize
+ ret += "], 'itemsize':%d" % dtype.itemsize
+
+ if (includealignedflag and dtype.isalignedstruct):
+ # Finally, the aligned flag
+ ret += ", 'aligned':True}"
+ else:
+ ret += "}"
+
+ return ret
+
+
+def _is_packed(dtype):
+ """
+ Checks whether the structured data type in 'dtype'
+ has a simple layout, where all the fields are in order,
+ and follow each other with no alignment padding.
+
+ When this returns true, the dtype can be reconstructed
+ from a list of the field names and dtypes with no additional
+ dtype parameters.
+
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` functio.
+ """
+ total_offset = 0
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+ if fld_offset != total_offset:
+ return False
+ total_offset += fld_dtype.itemsize
+ if total_offset != dtype.itemsize:
+ return False
+ return True
+
+
+def _struct_list_str(dtype):
+ items = []
+ for name in dtype.names:
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
+
+ item = "("
+ if title is not None:
+ item += "({!r}, {!r}), ".format(title, name)
+ else:
+ item += "{!r}, ".format(name)
+ # Special case subarray handling here
+ if fld_dtype.subdtype is not None:
+ base, shape = fld_dtype.subdtype
+ item += "{}, {}".format(
+ _construction_repr(base, short=True),
+ shape
+ )
+ else:
+ item += _construction_repr(fld_dtype, short=True)
+
+ item += ")"
+ items.append(item)
+
+ return "[" + ", ".join(items) + "]"
+
+
+def _struct_str(dtype, include_align):
+ # The list str representation can't include the 'align=' flag,
+ # so if it is requested and the struct has the aligned flag set,
+ # we must use the dict str instead.
+ if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
+ sub = _struct_list_str(dtype)
+
+ else:
+ sub = _struct_dict_str(dtype, include_align)
+
+ # If the data type isn't the default, void, show it
+ if dtype.type != np.void:
+ return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
+ else:
+ return sub
+
+
+def _subarray_str(dtype):
+ base, shape = dtype.subdtype
+ return "({}, {})".format(
+ _construction_repr(base, short=True),
+ shape
+ )
+
+
+def _name_get(dtype):
+ # provides dtype.name.__get__
+
+ if dtype.isbuiltin == 2:
+ # user dtypes don't promise to do anything special
+ return dtype.type.__name__
+
+ # Builtin classes are documented as returning a "bit name"
+ name = dtype.type.__name__
+
+ # handle bool_, str_, etc
+ if name[-1] == '_':
+ name = name[:-1]
+
+ # append bit counts to str, unicode, and void
+ if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype):
+ name += "{}".format(dtype.itemsize * 8)
+
+ # append metadata to datetimes
+ elif dtype.type in (np.datetime64, np.timedelta64):
+ name += _datetime_metadata_str(dtype)
+
+ return name
diff --git a/numpy/core/_dtype_ctypes.py b/numpy/core/_dtype_ctypes.py
new file mode 100644
index 000000000..0852b1ef2
--- /dev/null
+++ b/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,113 @@
+"""
+Conversion from ctypes to dtype.
+
+In an ideal world, we could acheive this through the PEP3118 buffer protocol,
+something like::
+
+ def dtype_from_ctypes_type(t):
+ # needed to ensure that the shape of `t` is within memoryview.format
+ class DummyStruct(ctypes.Structure):
+ _fields_ = [('a', t)]
+
+ # empty to avoid memory allocation
+ ctype_0 = (DummyStruct * 0)()
+ mv = memoryview(ctype_0)
+
+ # convert the struct, and slice back out the field
+ return _dtype_from_pep3118(mv.format)['a']
+
+Unfortunately, this fails because:
+
+* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
+* PEP3118 cannot represent unions, but both numpy and ctypes can
+* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
+"""
+import _ctypes
+import ctypes
+
+import numpy as np
+
+
+def _from_ctypes_array(t):
+ return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
+
+
+def _from_ctypes_structure(t):
+ for item in t._fields_:
+ if len(item) > 2:
+ raise TypeError(
+ "ctypes bitfields have no dtype equivalent")
+
+ if hasattr(t, "_pack_"):
+ formats = []
+ offsets = []
+ names = []
+ current_offset = 0
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ # Each type has a default offset, this is platform dependent for some types.
+ effective_pack = min(t._pack_, ctypes.alignment(ftyp))
+ current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
+ offsets.append(current_offset)
+ current_offset += ctypes.sizeof(ftyp)
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+ else:
+ fields = []
+ for fname, ftyp in t._fields_:
+ fields.append((fname, dtype_from_ctypes_type(ftyp)))
+
+ # by default, ctypes structs are aligned
+ return np.dtype(fields, align=True)
+
+
+def _from_ctypes_scalar(t):
+ """
+ Return the dtype type with endianness included if it's the case
+ """
+ if getattr(t, '__ctype_be__', None) is t:
+ return np.dtype('>' + t._type_)
+ elif getattr(t, '__ctype_le__', None) is t:
+ return np.dtype('<' + t._type_)
+ else:
+ return np.dtype(t._type_)
+
+
+def _from_ctypes_union(t):
+ formats = []
+ offsets = []
+ names = []
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ offsets.append(0) # Union fields are offset to 0
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+
+
+def dtype_from_ctypes_type(t):
+ """
+ Construct a dtype object from a ctypes type
+ """
+ if issubclass(t, _ctypes.Array):
+ return _from_ctypes_array(t)
+ elif issubclass(t, _ctypes._Pointer):
+ raise TypeError("ctypes pointers have no dtype equivalent")
+ elif issubclass(t, _ctypes.Structure):
+ return _from_ctypes_structure(t)
+ elif issubclass(t, _ctypes.Union):
+ return _from_ctypes_union(t)
+ elif isinstance(getattr(t, '_type_', None), str):
+ return _from_ctypes_scalar(t)
+ else:
+ raise NotImplementedError(
+ "Unknown ctypes type {}".format(t.__name__))
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index e658fc514..27a3deeda 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -1,5 +1,5 @@
"""
-A place for code to be called from core C-code.
+A place for internal code
Some things are more easily handled Python.
@@ -9,13 +9,13 @@ from __future__ import division, absolute_import, print_function
import re
import sys
-from numpy.compat import basestring, unicode
+from numpy.compat import unicode
+from numpy.core.overrides import set_module
from .multiarray import dtype, array, ndarray
try:
import ctypes
except ImportError:
ctypes = None
-from .numerictypes import object_
if (sys.byteorder == 'little'):
_nbo = b'<'
@@ -238,53 +238,158 @@ _getintp_ctype.cache = None
class _missing_ctypes(object):
def cast(self, num, obj):
- return num
+ return num.value
+
+ class c_void_p(object):
+ def __init__(self, ptr):
+ self.value = ptr
+
+
+class _unsafe_first_element_pointer(object):
+ """
+ Helper to allow viewing an array as a ctypes pointer to the first element
+
+ This avoids:
+ * dealing with strides
+ * `.view` rejecting object-containing arrays
+ * `memoryview` not supporting overlapping fields
+ """
+ def __init__(self, arr):
+ self.base = arr
+
+ @property
+ def __array_interface__(self):
+ i = dict(
+ shape=(),
+ typestr='|V0',
+ data=(self.base.__array_interface__['data'][0], False),
+ strides=(),
+ version=3,
+ )
+ return i
+
+
+def _get_void_ptr(arr):
+ """
+ Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array
+ """
+ import numpy as np
+ # convert to a 0d array that has a data pointer referrign to the start
+ # of arr. This holds a reference to arr.
+ simple_arr = np.asarray(_unsafe_first_element_pointer(arr))
+
+ # create a `char[0]` using the same memory.
+ c_arr = (ctypes.c_char * 0).from_buffer(simple_arr)
+
+ # finally cast to void*
+ return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p)
- def c_void_p(self, num):
- return num
class _ctypes(object):
def __init__(self, array, ptr=None):
+ self._arr = array
+
if ctypes:
self._ctypes = ctypes
+ # get a void pointer to the buffer, which keeps the array alive
+ self._data = _get_void_ptr(array)
+ assert self._data.value == ptr
else:
+ # fake a pointer-like object that holds onto the reference
self._ctypes = _missing_ctypes()
- self._arr = array
- self._data = ptr
+ self._data = self._ctypes.c_void_p(ptr)
+ self._data._objects = array
+
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
+ """
+ Return the data pointer cast to a particular c-types object.
+ For example, calling ``self._as_parameter_`` is equivalent to
+ ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
+ pointer to a ctypes array of floating-point data:
+ ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
+
+ The returned pointer will keep a reference to the array.
+ """
return self._ctypes.cast(self._data, obj)
def shape_as(self, obj):
+ """
+ Return the shape tuple as an array of some other c-types
+ type. For example: ``self.shape_as(ctypes.c_short)``.
+ """
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
+ """
+ Return the strides tuple as an array of some other
+ c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
+ """
if self._zerod:
return None
return (obj*self._arr.ndim)(*self._arr.strides)
- def get_data(self):
- return self._data
-
- def get_shape(self):
+ @property
+ def data(self):
+ """
+ A pointer to the memory area of the array as a Python integer.
+ This memory area may contain data that is not aligned, or not in correct
+ byte-order. The memory area may not even be writeable. The array
+ flags and data-type of this array should be respected when passing this
+ attribute to arbitrary C-code to avoid trouble that can include Python
+ crashing. User Beware! The value of this attribute is exactly the same
+ as ``self._array_interface_['data'][0]``.
+
+ Note that unlike `data_as`, a reference will not be kept to the array:
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
+ pointer to a deallocated array, and should be spelt
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
+ """
+ return self._data.value
+
+ @property
+ def shape(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the C-integer corresponding to ``dtype('p')`` on this
+ platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or
+ `ctypes.c_longlong` depending on the platform.
+ The c_intp type is defined accordingly in `numpy.ctypeslib`.
+ The ctypes array contains the shape of the underlying array.
+ """
return self.shape_as(_getintp_ctype())
- def get_strides(self):
+ @property
+ def strides(self):
+ """
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
+ the basetype is the same as for the shape attribute. This ctypes array
+ contains the strides information from the underlying array. This strides
+ information is important for showing how many bytes must be jumped to
+ get to the next element in the array.
+ """
return self.strides_as(_getintp_ctype())
- def get_as_parameter(self):
- return self._ctypes.c_void_p(self._data)
+ @property
+ def _as_parameter_(self):
+ """
+ Overrides the ctypes semi-magic method
+
+ Enables `c_func(some_array.ctypes)`
+ """
+ return self._data
- data = property(get_data, None, doc="c-types data")
- shape = property(get_shape, None, doc="c-types shape")
- strides = property(get_strides, None, doc="c-types strides")
- _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
+ # kept for compatibility
+ get_data = data.fget
+ get_shape = shape.fget
+ get_strides = strides.fget
+ get_as_parameter = _as_parameter_.fget
def _newnames(datatype, order):
@@ -444,46 +549,52 @@ _pep3118_standard_map = {
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
-def _dtype_from_pep3118(spec):
+_pep3118_unsupported_map = {
+ 'u': 'UCS-2 strings',
+ '&': 'pointers',
+ 't': 'bitfields',
+ 'X': 'function pointers',
+}
- class Stream(object):
- def __init__(self, s):
- self.s = s
- self.byteorder = '@'
+class _Stream(object):
+ def __init__(self, s):
+ self.s = s
+ self.byteorder = '@'
- def advance(self, n):
- res = self.s[:n]
- self.s = self.s[n:]
- return res
+ def advance(self, n):
+ res = self.s[:n]
+ self.s = self.s[n:]
+ return res
- def consume(self, c):
- if self.s[:len(c)] == c:
- self.advance(len(c))
- return True
- return False
-
- def consume_until(self, c):
- if callable(c):
- i = 0
- while i < len(self.s) and not c(self.s[i]):
- i = i + 1
- return self.advance(i)
- else:
- i = self.s.index(c)
- res = self.advance(i)
- self.advance(len(c))
- return res
+ def consume(self, c):
+ if self.s[:len(c)] == c:
+ self.advance(len(c))
+ return True
+ return False
+
+ def consume_until(self, c):
+ if callable(c):
+ i = 0
+ while i < len(self.s) and not c(self.s[i]):
+ i = i + 1
+ return self.advance(i)
+ else:
+ i = self.s.index(c)
+ res = self.advance(i)
+ self.advance(len(c))
+ return res
- @property
- def next(self):
- return self.s[0]
+ @property
+ def next(self):
+ return self.s[0]
- def __bool__(self):
- return bool(self.s)
- __nonzero__ = __bool__
+ def __bool__(self):
+ return bool(self.s)
+ __nonzero__ = __bool__
- stream = Stream(spec)
+def _dtype_from_pep3118(spec):
+ stream = _Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
@@ -555,6 +666,11 @@ def __dtype_from_pep3118(stream, is_subdtype):
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
+ elif stream.next in _pep3118_unsupported_map:
+ desc = _pep3118_unsupported_map[stream.next]
+ raise NotImplementedError(
+ "Unrepresentable PEP 3118 data type {!r} ({})"
+ .format(stream.next, desc))
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
@@ -680,9 +796,11 @@ def _lcm(a, b):
return a // _gcd(a, b) * b
# Exception used in shares_memory()
+@set_module('numpy')
class TooHardError(RuntimeError):
pass
+@set_module('numpy')
class AxisError(ValueError, IndexError):
""" Axis supplied was invalid. """
def __init__(self, axis, ndim=None, msg_prefix=None):
@@ -758,14 +876,46 @@ def _ufunc_doc_signature_formatter(ufunc):
)
-def _is_from_ctypes(obj):
- # determine if an object comes from ctypes, in order to work around
+def npy_ctypes_check(cls):
+ # determine if a class comes from ctypes, in order to work around
# a bug in the buffer protocol for those objects, bpo-10746
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
- ctype_base = type(obj).__mro__[-2]
+ ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return 'ctypes' in ctype_base.__module__
except Exception:
return False
+
+
+class recursive(object):
+ '''
+ A decorator class for recursive nested functions.
+ Naive recursive nested functions hold a reference to themselves:
+
+ def outer(*args):
+ def stringify_leaky(arg0, *arg1):
+ if len(arg1) > 0:
+ return stringify_leaky(*arg1) # <- HERE
+ return str(arg0)
+ stringify_leaky(*args)
+
+ This design pattern creates a reference cycle that is difficult for a
+ garbage collector to resolve. The decorator class prevents the
+ cycle by passing the nested function in as an argument `self`:
+
+ def outer(*args):
+ @recursive
+ def stringify(self, arg0, *arg1):
+ if len(arg1) > 0:
+ return self(*arg1)
+ return str(arg0)
+ stringify(*args)
+
+ '''
+ def __init__(self, func):
+ self.func = func
+ def __call__(self, *args, **kwargs):
+ return self.func(self, *args, **kwargs)
+
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 33f6d01a8..baeab6383 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -154,3 +154,15 @@ def _ptp(a, axis=None, out=None, keepdims=False):
umr_minimum(a, axis, None, None, keepdims),
out
)
+
+_NDARRAY_ARRAY_FUNCTION = mu.ndarray.__array_function__
+
+def _array_function(self, func, types, args, kwargs):
+ # TODO: rewrite this in C
+ # Cannot handle items that have __array_function__ other than our own.
+ for t in types:
+ if not issubclass(t, mu.ndarray) and hasattr(t, '__array_function__'):
+ return NotImplemented
+
+ # The regular implementation can handle this, so we call it directly.
+ return func.__wrapped__(*args, **kwargs)
diff --git a/numpy/core/_string_helpers.py b/numpy/core/_string_helpers.py
new file mode 100644
index 000000000..45e6a739e
--- /dev/null
+++ b/numpy/core/_string_helpers.py
@@ -0,0 +1,100 @@
+"""
+String-handling utilities to avoid locale-dependence.
+
+Used primarily to generate type name aliases.
+"""
+# "import string" is costly to import!
+# Construct the translation tables directly
+# "A" = chr(65), "a" = chr(97)
+_all_chars = [chr(_m) for _m in range(256)]
+_ascii_upper = _all_chars[65:65+26]
+_ascii_lower = _all_chars[97:97+26]
+LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
+UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
+
+
+def english_lower(s):
+ """ Apply English case rules to convert ASCII strings to all lower case.
+
+ This is an internal utility function to replace calls to str.lower() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ lowered : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_lower
+ >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
+ >>> english_lower('')
+ ''
+ """
+ lowered = s.translate(LOWER_TABLE)
+ return lowered
+
+
+def english_upper(s):
+ """ Apply English case rules to convert ASCII strings to all upper case.
+
+ This is an internal utility function to replace calls to str.upper() such
+ that we can avoid changing behavior with changing locales. In particular,
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
+ both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ uppered : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_upper
+ >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
+ >>> english_upper('')
+ ''
+ """
+ uppered = s.translate(UPPER_TABLE)
+ return uppered
+
+
+def english_capitalize(s):
+ """ Apply English case rules to convert the first character of an ASCII
+ string to upper case.
+
+ This is an internal utility function to replace calls to str.capitalize()
+ such that we can avoid changing behavior with changing locales.
+
+ Parameters
+ ----------
+ s : str
+
+ Returns
+ -------
+ capitalized : str
+
+ Examples
+ --------
+ >>> from numpy.core.numerictypes import english_capitalize
+ >>> english_capitalize('int8')
+ 'Int8'
+ >>> english_capitalize('Int8')
+ 'Int8'
+ >>> english_capitalize('')
+ ''
+ """
+ if s:
+ return english_upper(s[0]) + s[1:]
+ else:
+ return s
diff --git a/numpy/core/_type_aliases.py b/numpy/core/_type_aliases.py
new file mode 100644
index 000000000..d6e1a1fb7
--- /dev/null
+++ b/numpy/core/_type_aliases.py
@@ -0,0 +1,282 @@
+"""
+Due to compatibility, numpy has a very large number of different naming
+conventions for the scalar types (those subclassing from `numpy.generic`).
+This file produces a convoluted set of dictionaries mapping names to types,
+and sometimes other mappings too.
+
+.. data:: allTypes
+ A dictionary of names to types that will be exposed as attributes through
+ ``np.core.numerictypes.*``
+
+.. data:: sctypeDict
+ Similar to `allTypes`, but maps a broader set of aliases to their types.
+
+.. data:: sctypeNA
+ NumArray-compatible names for the scalar types. Contains not only
+ ``name: type`` mappings, but ``char: name`` mappings too.
+
+ .. deprecated:: 1.16
+
+.. data:: sctypes
+ A dictionary keyed by a "type group" string, providing a list of types
+ under that group.
+
+"""
+import warnings
+import sys
+
+from numpy.compat import unicode
+from numpy._globals import VisibleDeprecationWarning
+from numpy.core._string_helpers import english_lower, english_capitalize
+from numpy.core.multiarray import typeinfo, dtype
+from numpy.core._dtype import _kind_name
+
+
+sctypeDict = {} # Contains all leaf-node scalar types with aliases
+class TypeNADict(dict):
+ def __getitem__(self, key):
+ # 2018-06-24, 1.16
+ warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
+ 'of numpy', VisibleDeprecationWarning, stacklevel=2)
+ return dict.__getitem__(self, key)
+ def get(self, key, default=None):
+ # 2018-06-24, 1.16
+ warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
+ 'of numpy', VisibleDeprecationWarning, stacklevel=2)
+ return dict.get(self, key, default)
+
+sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences
+allTypes = {} # Collect the types we will add to the module
+
+
+# separate the actual type info from the abstract base classes
+_abstract_types = {}
+_concrete_typeinfo = {}
+for k, v in typeinfo.items():
+ # make all the keys lowercase too
+ k = english_lower(k)
+ if isinstance(v, type):
+ _abstract_types[k] = v
+ else:
+ _concrete_typeinfo[k] = v
+
+_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
+
+
+def _bits_of(obj):
+ try:
+ info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
+ except StopIteration:
+ if obj in _abstract_types.values():
+ raise ValueError("Cannot count the bits of an abstract type")
+
+ # some third-party type - make a best-guess
+ return dtype(obj).itemsize * 8
+ else:
+ return info.bits
+
+
+def bitname(obj):
+ """Return a bit-width name for a given type object"""
+ bits = _bits_of(obj)
+ dt = dtype(obj)
+ char = dt.kind
+ base = _kind_name(dt)
+
+ if base == 'object':
+ bits = 0
+
+ if bits != 0:
+ char = "%s%d" % (char, bits // 8)
+
+ return base, bits, char
+
+
+def _add_types():
+ for name, info in _concrete_typeinfo.items():
+ # define C-name and insert typenum and typechar references also
+ allTypes[name] = info.type
+ sctypeDict[name] = info.type
+ sctypeDict[info.char] = info.type
+ sctypeDict[info.num] = info.type
+
+ for name, cls in _abstract_types.items():
+ allTypes[name] = cls
+_add_types()
+
+# This is the priority order used to assign the bit-sized NPY_INTxx names, which
+# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
+# consistent.
+# If two C types have the same size, then the earliest one in this list is used
+# as the sized name.
+_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
+_uint_ctypes = list('u' + t for t in _int_ctypes)
+
+def _add_aliases():
+ for name, info in _concrete_typeinfo.items():
+ # these are handled by _add_integer_aliases
+ if name in _int_ctypes or name in _uint_ctypes:
+ continue
+
+ # insert bit-width version for this class (if relevant)
+ base, bit, char = bitname(info.type)
+
+ myname = "%s%d" % (base, bit)
+
+ # ensure that (c)longdouble does not overwrite the aliases assigned to
+ # (c)double
+ if name in ('longdouble', 'clongdouble') and myname in allTypes:
+ continue
+
+ base_capitalize = english_capitalize(base)
+ if base == 'complex':
+ na_name = '%s%d' % (base_capitalize, bit//2)
+ elif base == 'bool':
+ na_name = base_capitalize
+ else:
+ na_name = "%s%d" % (base_capitalize, bit)
+
+ allTypes[myname] = info.type
+
+ # add mapping for both the bit name and the numarray name
+ sctypeDict[myname] = info.type
+ sctypeDict[na_name] = info.type
+
+ # add forward, reverse, and string mapping to numarray
+ sctypeNA[na_name] = info.type
+ sctypeNA[info.type] = na_name
+ sctypeNA[info.char] = na_name
+
+ sctypeDict[char] = info.type
+ sctypeNA[char] = na_name
+_add_aliases()
+
+def _add_integer_aliases():
+ seen_bits = set()
+ for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
+ i_info = _concrete_typeinfo[i_ctype]
+ u_info = _concrete_typeinfo[u_ctype]
+ bits = i_info.bits # same for both
+
+ for info, charname, intname, Intname in [
+ (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits),
+ (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]:
+ if bits not in seen_bits:
+ # sometimes two different types have the same number of bits
+ # if so, the one iterated over first takes precedence
+ allTypes[intname] = info.type
+ sctypeDict[intname] = info.type
+ sctypeDict[Intname] = info.type
+ sctypeDict[charname] = info.type
+ sctypeNA[Intname] = info.type
+ sctypeNA[charname] = info.type
+ sctypeNA[info.type] = Intname
+ sctypeNA[info.char] = Intname
+
+ seen_bits.add(bits)
+
+_add_integer_aliases()
+
+# We use these later
+void = allTypes['void']
+
+#
+# Rework the Python names (so that float and complex and int are consistent
+# with Python usage)
+#
+def _set_up_aliases():
+ type_pairs = [('complex_', 'cdouble'),
+ ('int0', 'intp'),
+ ('uint0', 'uintp'),
+ ('single', 'float'),
+ ('csingle', 'cfloat'),
+ ('singlecomplex', 'cfloat'),
+ ('float_', 'double'),
+ ('intc', 'int'),
+ ('uintc', 'uint'),
+ ('int_', 'long'),
+ ('uint', 'ulong'),
+ ('cfloat', 'cdouble'),
+ ('longfloat', 'longdouble'),
+ ('clongfloat', 'clongdouble'),
+ ('longcomplex', 'clongdouble'),
+ ('bool_', 'bool'),
+ ('bytes_', 'string'),
+ ('string_', 'string'),
+ ('unicode_', 'unicode'),
+ ('object_', 'object')]
+ if sys.version_info[0] >= 3:
+ type_pairs.extend([('str_', 'unicode')])
+ else:
+ type_pairs.extend([('str_', 'string')])
+ for alias, t in type_pairs:
+ allTypes[alias] = allTypes[t]
+ sctypeDict[alias] = sctypeDict[t]
+ # Remove aliases overriding python types and modules
+ to_remove = ['ulong', 'object', 'int', 'float',
+ 'complex', 'bool', 'string', 'datetime', 'timedelta']
+ if sys.version_info[0] >= 3:
+ to_remove.extend(['bytes', 'str'])
+ else:
+ to_remove.extend(['unicode', 'long'])
+
+ for t in to_remove:
+ try:
+ del allTypes[t]
+ del sctypeDict[t]
+ except KeyError:
+ pass
+_set_up_aliases()
+
+
+sctypes = {'int': [],
+ 'uint':[],
+ 'float':[],
+ 'complex':[],
+ 'others':[bool, object, bytes, unicode, void]}
+
+def _add_array_type(typename, bits):
+ try:
+ t = allTypes['%s%d' % (typename, bits)]
+ except KeyError:
+ pass
+ else:
+ sctypes[typename].append(t)
+
+def _set_array_types():
+ ibytes = [1, 2, 4, 8, 16, 32, 64]
+ fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
+ for bytes in ibytes:
+ bits = 8*bytes
+ _add_array_type('int', bits)
+ _add_array_type('uint', bits)
+ for bytes in fbytes:
+ bits = 8*bytes
+ _add_array_type('float', bits)
+ _add_array_type('complex', 2*bits)
+ _gi = dtype('p')
+ if _gi.type not in sctypes['int']:
+ indx = 0
+ sz = _gi.itemsize
+ _lst = sctypes['int']
+ while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
+ indx += 1
+ sctypes['int'].insert(indx, _gi.type)
+ sctypes['uint'].insert(indx, dtype('P').type)
+_set_array_types()
+
+
+# Add additional strings to the sctypeDict
+_toadd = ['int', 'float', 'complex', 'bool', 'object']
+if sys.version_info[0] >= 3:
+ _toadd.extend(['str', 'bytes', ('a', 'bytes_')])
+else:
+ _toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')])
+
+for name in _toadd:
+ if isinstance(name, tuple):
+ sctypeDict[name[0]] = allTypes[name[1]]
+ else:
+ sctypeDict[name] = allTypes['%s_' % name]
+
+del _toadd, name
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 6d15cb23f..6a71de226 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -26,6 +26,7 @@ __docformat__ = 'restructuredtext'
import sys
import functools
+import numbers
if sys.version_info[0] >= 3:
try:
from _thread import get_ident
@@ -42,12 +43,13 @@ from . import numerictypes as _nt
from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
- datetime_as_string, datetime_data, dtype, ndarray,
+ datetime_as_string, datetime_data, ndarray,
set_legacy_print_mode)
from .fromnumeric import ravel, any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
+from .overrides import array_function_dispatch, set_module
import warnings
import contextlib
@@ -85,9 +87,15 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None,
if legacy not in [None, False, '1.13']:
warnings.warn("legacy printing option can currently only be '1.13' or "
"`False`", stacklevel=3)
-
+ if threshold is not None:
+ # forbid the bad threshold arg suggested by stack overflow, gh-12351
+ if not isinstance(threshold, numbers.Number) or np.isnan(threshold):
+ raise ValueError("threshold must be numeric and non-NAN, try "
+ "sys.maxsize for untruncated representation")
return options
+
+@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, **kwarg):
@@ -249,6 +257,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
set_legacy_print_mode(0)
+@set_module('numpy')
def get_printoptions():
"""
Return the current print options.
@@ -278,6 +287,7 @@ def get_printoptions():
return _format_options.copy()
+@set_module('numpy')
@contextlib.contextmanager
def printoptions(*args, **kwargs):
"""Context manager for setting print options.
@@ -496,6 +506,16 @@ def _array2string(a, options, separator=' ', prefix=""):
return lst
+def _array2string_dispatcher(
+ a, max_line_width=None, precision=None,
+ suppress_small=None, separator=None, prefix=None,
+ style=None, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix=None,
+ **kwarg):
+ return (a,)
+
+
+@array_function_dispatch(_array2string_dispatcher, module='numpy')
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
@@ -528,6 +548,8 @@ def array2string(a, max_line_width=None, precision=None,
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
+ It should be noted that the content of prefix and suffix strings are
+ not included in the output.
style : _NoValue, optional
Has no effect, do not use.
@@ -963,6 +985,8 @@ class LongFloatFormat(FloatingFormat):
DeprecationWarning, stacklevel=2)
super(LongFloatFormat, self).__init__(*args, **kwargs)
+
+@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None):
"""
@@ -1030,6 +1054,8 @@ def format_float_scientific(x, precision=None, unique=True, trim='k',
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits)
+
+@set_module('numpy')
def format_float_positional(x, precision=None, unique=True,
fractional=True, trim='k', sign=False,
pad_left=None, pad_right=None):
@@ -1368,6 +1394,59 @@ def dtype_short_repr(dtype):
return typename
+def _array_repr_implementation(
+ arr, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_repr() that allows overriding array2string."""
+ if max_line_width is None:
+ max_line_width = _format_options['linewidth']
+
+ if type(arr) is not ndarray:
+ class_name = type(arr).__name__
+ else:
+ class_name = "array"
+
+ skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
+
+ prefix = class_name + "("
+ suffix = ")" if skipdtype else ","
+
+ if (_format_options['legacy'] == '1.13' and
+ arr.shape == () and not arr.dtype.names):
+ lst = repr(arr.item())
+ elif arr.size > 0 or arr.shape == (0,):
+ lst = array2string(arr, max_line_width, precision, suppress_small,
+ ', ', prefix, suffix=suffix)
+ else: # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(arr.shape),)
+
+ arr_str = prefix + lst + suffix
+
+ if skipdtype:
+ return arr_str
+
+ dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
+
+ # compute whether we should put dtype on a new line: Do so if adding the
+ # dtype would extend the last line past max_line_width.
+ # Note: This line gives the correct result even when rfind returns -1.
+ last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
+ spacer = " "
+ if _format_options['legacy'] == '1.13':
+ if issubclass(arr.dtype.type, flexible):
+ spacer = '\n' + ' '*len(class_name + "(")
+ elif last_line_len + len(dtype_str) + 1 > max_line_width:
+ spacer = '\n' + ' '*len(class_name + "(")
+
+ return arr_str + spacer + dtype_str
+
+
+def _array_repr_dispatcher(
+ arr, max_line_width=None, precision=None, suppress_small=None):
+ return (arr,)
+
+
+@array_function_dispatch(_array_repr_dispatcher, module='numpy')
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
@@ -1410,50 +1489,39 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
'array([ 0.000001, 0. , 2. , 3. ])'
"""
- if max_line_width is None:
- max_line_width = _format_options['linewidth']
+ return _array_repr_implementation(
+ arr, max_line_width, precision, suppress_small)
- if type(arr) is not ndarray:
- class_name = type(arr).__name__
- else:
- class_name = "array"
- skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
+_guarded_str = _recursive_guard()(str)
- prefix = class_name + "("
- suffix = ")" if skipdtype else ","
+def _array_str_implementation(
+ a, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_str() that allows overriding array2string."""
if (_format_options['legacy'] == '1.13' and
- arr.shape == () and not arr.dtype.names):
- lst = repr(arr.item())
- elif arr.size > 0 or arr.shape == (0,):
- lst = array2string(arr, max_line_width, precision, suppress_small,
- ', ', prefix, suffix=suffix)
- else: # show zero-length shape unless it is (0,)
- lst = "[], shape=%s" % (repr(arr.shape),)
-
- arr_str = prefix + lst + suffix
+ a.shape == () and not a.dtype.names):
+ return str(a.item())
- if skipdtype:
- return arr_str
+ # the str of 0d arrays is a special case: It should appear like a scalar,
+ # so floats are not truncated by `precision`, and strings are not wrapped
+ # in quotes. So we return the str of the scalar value.
+ if a.shape == ():
+ # obtain a scalar and call str on it, avoiding problems for subclasses
+ # for which indexing with () returns a 0d instead of a scalar by using
+ # ndarray's getindex. Also guard against recursive 0d object arrays.
+ return _guarded_str(np.ndarray.__getitem__(a, ()))
- dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
- # compute whether we should put dtype on a new line: Do so if adding the
- # dtype would extend the last line past max_line_width.
- # Note: This line gives the correct result even when rfind returns -1.
- last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
- spacer = " "
- if _format_options['legacy'] == '1.13':
- if issubclass(arr.dtype.type, flexible):
- spacer = '\n' + ' '*len(class_name + "(")
- elif last_line_len + len(dtype_str) + 1 > max_line_width:
- spacer = '\n' + ' '*len(class_name + "(")
- return arr_str + spacer + dtype_str
+def _array_str_dispatcher(
+ a, max_line_width=None, precision=None, suppress_small=None):
+ return (a,)
-_guarded_str = _recursive_guard()(str)
+@array_function_dispatch(_array_str_dispatcher, module='numpy')
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
Return a string representation of the data in an array.
@@ -1488,20 +1556,17 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None):
'[0 1 2]'
"""
- if (_format_options['legacy'] == '1.13' and
- a.shape == () and not a.dtype.names):
- return str(a.item())
+ return _array_str_implementation(
+ a, max_line_width, precision, suppress_small)
- # the str of 0d arrays is a special case: It should appear like a scalar,
- # so floats are not truncated by `precision`, and strings are not wrapped
- # in quotes. So we return the str of the scalar value.
- if a.shape == ():
- # obtain a scalar and call str on it, avoiding problems for subclasses
- # for which indexing with () returns a 0d instead of a scalar by using
- # ndarray's getindex. Also guard against recursive 0d object arrays.
- return _guarded_str(np.ndarray.__getitem__(a, ()))
- return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+# needed if __array_function__ is disabled
+_array2string_impl = getattr(array2string, '__wrapped__', array2string)
+_default_array_str = functools.partial(_array_str_implementation,
+ array2string=_array2string_impl)
+_default_array_repr = functools.partial(_array_repr_implementation,
+ array2string=_array2string_impl)
+
def set_string_function(f, repr=True):
"""
@@ -1556,11 +1621,11 @@ def set_string_function(f, repr=True):
"""
if f is None:
if repr:
- return multiarray.set_string_function(array_repr, 1)
+ return multiarray.set_string_function(_default_array_repr, 1)
else:
- return multiarray.set_string_function(array_str, 0)
+ return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
-set_string_function(array_str, 0)
-set_string_function(array_repr, 1)
+set_string_function(_default_array_str, 0)
+set_string_function(_default_array_repr, 1)
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 43c32eac6..00f10df57 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -39,7 +39,12 @@
0x0000000b = edb1ba83730c650fd9bc5772a919cda7
# Version 12 (NumPy 1.14) Added PyArray_ResolveWritebackIfCopy,
-# Version 12 (NumPy 1.15) No change.
# PyArray_SetWritebackIfCopyBase and deprecated PyArray_SetUpdateIfCopyBase.
+# Version 12 (NumPy 1.15) No change.
0x0000000c = a1bc756c5782853ec2e3616cf66869d8
+# Version 13 (NumPy 1.16)
+# Deprecate PyArray_SetNumericOps and PyArray_GetNumericOps,
+# Add fields core_dim_flags and core_dim_sizes to PyUFuncObject.
+# Add PyUFunc_FromFuncAndDataAndSignatureAndIdentity to ufunc_funcs_api.
+0x0000000d = 5b0e8bbded00b166125974fc71e80a33
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 42c564a97..1d2cd25c8 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -163,9 +163,7 @@ def skip_brackets(s, lbrac, rbrac):
def split_arguments(argstr):
arguments = []
- bracket_counts = {'(': 0, '[': 0}
current_argument = []
- state = 0
i = 0
def finish_arg():
if current_argument:
@@ -400,9 +398,7 @@ class FunctionApi(object):
return " (void *) %s" % self.name
def internal_define(self):
- annstr = []
- for a in self.annotations:
- annstr.append(str(a))
+ annstr = [str(a) for a in self.annotations]
annstr = ' '.join(annstr)
astr = """\
NPY_NO_EXPORT %s %s %s \\\n (%s);""" % (annstr, self.return_type,
@@ -463,10 +459,7 @@ def get_api_functions(tagname, api_dict):
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
- dfunctions = []
- for func in functions:
- o = api_dict[func.name][0]
- dfunctions.append( (o, func) )
+ dfunctions = [(api_dict[func.name][0], func) for func in functions]
dfunctions.sort()
return [a[1] for a in dfunctions]
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py
index b4aeaa277..5e04fb86d 100644
--- a/numpy/core/code_generators/generate_numpy_api.py
+++ b/numpy/core/code_generators/generate_numpy_api.py
@@ -46,11 +46,10 @@ static int
_import_array(void)
{
int st;
- PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
PyObject *c_api = NULL;
if (numpy == NULL) {
- PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
@@ -193,7 +192,9 @@ def do_generate_api(targets, sources):
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
- multiarray_funcs)
+ multiarray_funcs)
+
+ # FIXME: ordered_funcs_api is unused
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
diff --git a/numpy/core/code_generators/generate_ufunc_api.py b/numpy/core/code_generators/generate_ufunc_api.py
index 3bcf137f7..1b0143e88 100644
--- a/numpy/core/code_generators/generate_ufunc_api.py
+++ b/numpy/core/code_generators/generate_ufunc_api.py
@@ -36,11 +36,12 @@ static void **PyUFunc_API=NULL;
static NPY_INLINE int
_import_umath(void)
{
- PyObject *numpy = PyImport_ImportModule("numpy.core.umath");
+ PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
PyObject *c_api = NULL;
if (numpy == NULL) {
- PyErr_SetString(PyExc_ImportError, "numpy.core.umath failed to import");
+ PyErr_SetString(PyExc_ImportError,
+ "numpy.core._multiarray_umath failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_UFUNC_API");
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 632bcb41f..f5ee02c42 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -10,11 +10,14 @@ sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
-Zero = "PyUFunc_Zero"
-One = "PyUFunc_One"
-None_ = "PyUFunc_None"
-AllOnes = "PyUFunc_MinusOne"
-ReorderableNone = "PyUFunc_ReorderableNone"
+Zero = "PyInt_FromLong(0)"
+One = "PyInt_FromLong(1)"
+True_ = "(Py_INCREF(Py_True), Py_True)"
+False_ = "(Py_INCREF(Py_False), Py_False)"
+None_ = object()
+AllOnes = "PyInt_FromLong(-1)"
+MinusInfinity = 'PyFloat_FromDouble(-NPY_INFINITY)'
+ReorderableNone = "(Py_INCREF(Py_None), Py_None)"
# Sentinel value to specify using the full type description in the
# function name
@@ -74,10 +77,7 @@ class TypeDescription(object):
_fdata_map = dict(e='npy_%sf', f='npy_%sf', d='npy_%s', g='npy_%sl',
F='nc_%sf', D='nc_%s', G='nc_%sl')
def build_func_data(types, f):
- func_data = []
- for t in types:
- d = _fdata_map.get(t, '%s') % (f,)
- func_data.append(d)
+ func_data = [_fdata_map.get(t, '%s') % (f,) for t in types]
return func_data
def TD(types, f=None, astype=None, in_=None, out=None, simd=None):
@@ -124,7 +124,7 @@ class Ufunc(object):
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
- *type_descriptions):
+ *type_descriptions, **kwargs):
self.nin = nin
self.nout = nout
if identity is None:
@@ -133,10 +133,13 @@ class Ufunc(object):
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
+ self.signature = kwargs.pop('signature', None)
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
+ if kwargs:
+ raise ValueError('unknown kwargs %r' % str(kwargs))
# String-handling utilities to avoid locale-dependence.
@@ -458,7 +461,7 @@ defdict = {
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'logical_and':
- Ufunc(2, 1, One,
+ Ufunc(2, 1, True_,
docstrings.get('numpy.core.umath.logical_and'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
@@ -472,14 +475,14 @@ defdict = {
TD(O, f='npy_ObjectLogicalNot'),
),
'logical_or':
- Ufunc(2, 1, Zero,
+ Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_or'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
),
'logical_xor':
- Ufunc(2, 1, Zero,
+ Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
@@ -514,7 +517,7 @@ defdict = {
TD(O, f='npy_ObjectMin')
),
'logaddexp':
- Ufunc(2, 1, None,
+ Ufunc(2, 1, MinusInfinity,
docstrings.get('numpy.core.umath.logaddexp'),
None,
TD(flts, f="logaddexp", astype={'e':'f'})
@@ -791,8 +794,9 @@ defdict = {
'remainder':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.remainder'),
- None,
+ 'PyUFunc_RemainderTypeResolver',
TD(intflt),
+ [TypeDescription('m', FullTypeDescr, 'mm', 'm')],
TD(O, f='PyNumber_Remainder'),
),
'divmod':
@@ -900,7 +904,14 @@ defdict = {
"PyUFunc_SimpleBinaryOperationTypeResolver",
TD(ints),
TD('O', f='npy_ObjectLCM'),
- )
+ ),
+'matmul' :
+ Ufunc(2, 1, None,
+ docstrings.get('numpy.core.umath.matmul'),
+ "PyUFunc_SimpleBinaryOperationTypeResolver",
+ TD(notimes_or_obj),
+ signature='(n?,k),(k,m?)->(n?,m?)',
+ ),
}
if sys.version_info[0] >= 3:
@@ -1046,19 +1057,44 @@ def make_ufuncs(funcdict):
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
+ if uf.signature is None:
+ sig = "NULL"
+ else:
+ sig = '"{}"'.format(uf.signature)
fmt = textwrap.dedent("""\
- f = PyUFunc_FromFuncAndData(
+ identity = {identity_expr};
+ if ({has_identity} && identity == NULL) {{
+ return -1;
+ }}
+ f = PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
{name}_functions, {name}_data, {name}_signatures, {nloops},
{nin}, {nout}, {identity}, "{name}",
- "{doc}", 0
+ "{doc}", 0, {sig}, identity
);
+ if ({has_identity}) {{
+ Py_DECREF(identity);
+ }}
if (f == NULL) {{
return -1;
- }}""")
- mlist.append(fmt.format(
+ }}
+ """)
+ args = dict(
name=name, nloops=len(uf.type_descriptions),
- nin=uf.nin, nout=uf.nout, identity=uf.identity, doc=docstring
- ))
+ nin=uf.nin, nout=uf.nout,
+ has_identity='0' if uf.identity is None_ else '1',
+ identity='PyUFunc_IdentityValue',
+ identity_expr=uf.identity,
+ doc=docstring,
+ sig=sig,
+ )
+
+ # Only PyUFunc_None means don't reorder - we pass this using the old
+ # argument
+ if uf.identity is None_:
+ args['identity'] = 'PyUFunc_None'
+ args['identity_expr'] = 'NULL'
+
+ mlist.append(fmt.format(**args))
if uf.typereso is not None:
mlist.append(
r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
@@ -1080,11 +1116,15 @@ def make_code(funcdict, filename):
Please make changes to the code generator program (%s)
**/
#include "cpuid.h"
+ #include "ufunc_object.h"
+ #include "ufunc_type_resolution.h"
+ #include "loops.h"
+ #include "matmul.h"
%s
static int
InitOperators(PyObject *dictionary) {
- PyObject *f;
+ PyObject *f, *identity;
%s
%s
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index d8a9ee6b4..a71c236fd 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -402,6 +402,8 @@ ufunc_funcs_api = {
# End 1.7 API
'PyUFunc_RegisterLoopForDescr': (41,),
# End 1.8 API
+ 'PyUFunc_FromFuncAndDataAndSignatureAndIdentity': (42,),
+ # End 1.16 API
}
# List of all the dicts which define the C API
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index f7d58a26f..8a690c43d 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -39,7 +39,8 @@ subst = {
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
- if name[0] != '_':
+ if name[0] != '_' and name != 'matmul':
+ # matmul is special, it does not use the OUT_SCALAR replacement strings
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
@@ -233,7 +234,7 @@ add_newdoc('numpy.core.umath', 'arccosh',
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
- http://en.wikipedia.org/wiki/Arccosh
+ https://en.wikipedia.org/wiki/Arccosh
Examples
--------
@@ -335,7 +336,7 @@ add_newdoc('numpy.core.umath', 'arcsinh',
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
- http://en.wikipedia.org/wiki/Arcsinh
+ https://en.wikipedia.org/wiki/Arcsinh
Examples
--------
@@ -535,7 +536,7 @@ add_newdoc('numpy.core.umath', 'arctanh',
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 86. http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Inverse hyperbolic function",
- http://en.wikipedia.org/wiki/Arctanh
+ https://en.wikipedia.org/wiki/Arctanh
Examples
--------
@@ -1136,7 +1137,7 @@ add_newdoc('numpy.core.umath', 'exp',
References
----------
.. [1] Wikipedia, "Exponential function",
- http://en.wikipedia.org/wiki/Exponential_function
+ https://en.wikipedia.org/wiki/Exponential_function
.. [2] M. Abramovitz and I. A. Stegun, "Handbook of Mathematical Functions
with Formulas, Graphs, and Mathematical Tables," Dover, 1964, p. 69,
http://www.math.sfu.ca/~cbm/aands/page_69.htm
@@ -1551,7 +1552,7 @@ add_newdoc('numpy.core.umath', 'invert',
References
----------
.. [1] Wikipedia, "Two's complement",
- http://en.wikipedia.org/wiki/Two's_complement
+ https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
@@ -1740,6 +1741,8 @@ add_newdoc('numpy.core.umath', 'isnat',
"""
Test element-wise for NaT (not a time) and return result as a boolean array.
+ .. versionadded:: 1.13.0
+
Parameters
----------
x : array_like
@@ -1912,7 +1915,7 @@ add_newdoc('numpy.core.umath', 'log',
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
- .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
+ .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
@@ -1961,7 +1964,7 @@ add_newdoc('numpy.core.umath', 'log10',
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
- .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
+ .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
@@ -2147,7 +2150,7 @@ add_newdoc('numpy.core.umath', 'log1p',
----------
.. [1] M. Abramowitz and I.A. Stegun, "Handbook of Mathematical Functions",
10th printing, 1964, pp. 67. http://www.math.sfu.ca/~cbm/aands/
- .. [2] Wikipedia, "Logarithm". http://en.wikipedia.org/wiki/Logarithm
+ .. [2] Wikipedia, "Logarithm". https://en.wikipedia.org/wiki/Logarithm
Examples
--------
@@ -2539,6 +2542,128 @@ add_newdoc('numpy.core.umath', 'fmin',
""")
+add_newdoc('numpy.core.umath', 'matmul',
+ """
+ Matrix product of two arrays.
+
+ Parameters
+ ----------
+ x1, x2 : array_like
+ Input arrays, scalars not allowed.
+ out : ndarray, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
+ provided or `None`, a freshly-allocated array is returned.
+ **kwargs
+ For other keyword-only arguments, see the
+ :ref:`ufunc docs <ufuncs.kwargs>`.
+
+ ..versionadded:: 1.16
+ Now handles ufunc kwargs
+
+ Returns
+ -------
+ y : ndarray
+ The matrix product of the inputs.
+ This is a scalar only when both x1, x2 are 1-d vectors.
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` is not the same size as
+ the second-to-last dimension of `b`.
+
+ If a scalar value is passed in.
+
+ See Also
+ --------
+ vdot : Complex-conjugating dot product.
+ tensordot : Sum products over arbitrary axes.
+ einsum : Einstein summation convention.
+ dot : alternative matrix product with different broadcasting rules.
+
+ Notes
+ -----
+
+ The behavior depends on the arguments in the following way.
+
+ - If both arguments are 2-D they are multiplied like conventional
+ matrices.
+ - If either argument is N-D, N > 2, it is treated as a stack of
+ matrices residing in the last two indexes and broadcast accordingly.
+ - If the first argument is 1-D, it is promoted to a matrix by
+ prepending a 1 to its dimensions. After matrix multiplication
+ the prepended 1 is removed.
+ - If the second argument is 1-D, it is promoted to a matrix by
+ appending a 1 to its dimensions. After matrix multiplication
+ the appended 1 is removed.
+
+ ``matmul`` differs from ``dot`` in two important ways:
+
+ - Multiplication by scalars is not allowed, use ``*`` instead.
+ - Stacks of matrices are broadcast together as if the matrices
+ were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
+
+ >>> a = a = np.full([9,5,7,3], True, dtype=bool)
+ >>> c = np.full([9, 5, 4,3], True, dtype=bool)
+ >>> np.dot(a, c).shape
+ (9, 5, 7, 9, 5, 4)
+ >>> np.matmul(a, c).shape # n is 5, k is 3, m is 4
+ (9, 5, 7, 4)
+
+ The matmul function implements the semantics of the `@` operator introduced
+ in Python 3.5 following PEP465.
+
+ Examples
+ --------
+ For 2-D arrays it is the matrix product:
+
+ >>> a = np.array([[1, 0],
+ ... [0, 1]])
+ >>> b = np.array([[4, 1],
+ ... [2, 2]]
+ >>> np.matmul(a, b)
+ array([[4, 1],
+ [2, 2]])
+
+ For 2-D mixed with 1-D, the result is the usual.
+
+ >>> a = np.array([[1, 0],
+ ... [0, 1]]
+ >>> b = np.array([1, 2])
+ >>> np.matmul(a, b)
+ array([1, 2])
+ >>> np.matmul(b, a)
+ array([1, 2])
+
+
+ Broadcasting is conventional for stacks of arrays
+
+ >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
+ >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
+ >>> np.matmul(a,b).shape
+ (2, 2, 2)
+ >>> np.matmul(a, b)[0, 1, 1]
+ 98
+ >>> sum(a[0, 1, :] * b[0 , :, 1])
+ 98
+
+ Vector, vector returns the scalar inner product, but neither argument
+ is complex-conjugated:
+
+ >>> np.matmul([2j, 3j], [2j, 3j])
+ (-13+0j)
+
+ Scalar multiplication raises an error.
+
+ >>> np.matmul([1,2], 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: matmul: Input operand 1 does not have enough dimensions ...
+
+ .. versionadded:: 1.10.0
+ """)
+
add_newdoc('numpy.core.umath', 'modf',
"""
Return the fractional and integral parts of an array, element-wise.
@@ -2592,8 +2717,7 @@ add_newdoc('numpy.core.umath', 'multiply',
Returns
-------
y : ndarray
- The product of `x1` and `x2`, element-wise. Returns a scalar if
- both `x1` and `x2` are scalars.
+ The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
@@ -3578,7 +3702,7 @@ add_newdoc('numpy.core.umath', 'tanh',
http://www.math.sfu.ca/~cbm/aands/
.. [2] Wikipedia, "Hyperbolic function",
- http://en.wikipedia.org/wiki/Hyperbolic_function
+ https://en.wikipedia.org/wiki/Hyperbolic_function
Examples
--------
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index 6d0a0add5..12ba3f02e 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -17,11 +17,14 @@ The preferred alias for `defchararray` is `numpy.char`.
"""
from __future__ import division, absolute_import, print_function
+import functools
import sys
from .numerictypes import string_, unicode_, integer, object_, bool_, character
from .numeric import ndarray, compare_chararrays
from .numeric import array as narray
from numpy.core.multiarray import _vec_string
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from numpy.compat import asbytes, long
import numpy
@@ -47,6 +50,10 @@ else:
_bytes = str
_len = len
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.char')
+
+
def _use_unicode(*args):
"""
Helper function for determining the output type of some string
@@ -95,6 +102,11 @@ def _get_num_chars(a):
return a.itemsize
+def _binary_op_dispatcher(x1, x2):
+ return (x1, x2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
def equal(x1, x2):
"""
Return (x1 == x2) element-wise.
@@ -119,6 +131,8 @@ def equal(x1, x2):
"""
return compare_chararrays(x1, x2, '==', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def not_equal(x1, x2):
"""
Return (x1 != x2) element-wise.
@@ -143,6 +157,8 @@ def not_equal(x1, x2):
"""
return compare_chararrays(x1, x2, '!=', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def greater_equal(x1, x2):
"""
Return (x1 >= x2) element-wise.
@@ -168,6 +184,8 @@ def greater_equal(x1, x2):
"""
return compare_chararrays(x1, x2, '>=', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def less_equal(x1, x2):
"""
Return (x1 <= x2) element-wise.
@@ -192,6 +210,8 @@ def less_equal(x1, x2):
"""
return compare_chararrays(x1, x2, '<=', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def greater(x1, x2):
"""
Return (x1 > x2) element-wise.
@@ -216,6 +236,8 @@ def greater(x1, x2):
"""
return compare_chararrays(x1, x2, '>', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def less(x1, x2):
"""
Return (x1 < x2) element-wise.
@@ -240,6 +262,12 @@ def less(x1, x2):
"""
return compare_chararrays(x1, x2, '<', True)
+
+def _unary_op_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_unary_op_dispatcher)
def str_len(a):
"""
Return len(a) element-wise.
@@ -259,6 +287,8 @@ def str_len(a):
"""
return _vec_string(a, integer, '__len__')
+
+@array_function_dispatch(_binary_op_dispatcher)
def add(x1, x2):
"""
Return element-wise string concatenation for two arrays of str or unicode.
@@ -285,6 +315,12 @@ def add(x1, x2):
dtype = _use_unicode(arr1, arr2)
return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))
+
+def _multiply_dispatcher(a, i):
+ return (a,)
+
+
+@array_function_dispatch(_multiply_dispatcher)
def multiply(a, i):
"""
Return (a * i), that is string multiple concatenation,
@@ -313,6 +349,12 @@ def multiply(a, i):
return _vec_string(
a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
+
+def _mod_dispatcher(a, values):
+ return (a, values)
+
+
+@array_function_dispatch(_mod_dispatcher)
def mod(a, values):
"""
Return (a % i), that is pre-Python 2.6 string formatting
@@ -339,6 +381,8 @@ def mod(a, values):
return _to_string_or_unicode_array(
_vec_string(a, object_, '__mod__', (values,)))
+
+@array_function_dispatch(_unary_op_dispatcher)
def capitalize(a):
"""
Return a copy of `a` with only the first character of each element
@@ -377,6 +421,11 @@ def capitalize(a):
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
+def _center_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_center_dispatcher)
def center(a, width, fillchar=' '):
"""
Return a copy of `a` with its elements centered in a string of
@@ -413,6 +462,11 @@ def center(a, width, fillchar=' '):
a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
+def _count_dispatcher(a, sub, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_dispatcher)
def count(a, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
@@ -459,6 +513,11 @@ def count(a, sub, start=0, end=None):
return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))
+def _code_dispatcher(a, encoding=None, errors=None):
+ return (a,)
+
+
+@array_function_dispatch(_code_dispatcher)
def decode(a, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
@@ -505,6 +564,7 @@ def decode(a, encoding=None, errors=None):
_vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
+@array_function_dispatch(_code_dispatcher)
def encode(a, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
@@ -540,6 +600,11 @@ def encode(a, encoding=None, errors=None):
_vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
+def _endswith_dispatcher(a, suffix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_endswith_dispatcher)
def endswith(a, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
@@ -584,6 +649,11 @@ def endswith(a, suffix, start=0, end=None):
a, bool_, 'endswith', [suffix, start] + _clean_args(end))
+def _expandtabs_dispatcher(a, tabsize=None):
+ return (a,)
+
+
+@array_function_dispatch(_expandtabs_dispatcher)
def expandtabs(a, tabsize=8):
"""
Return a copy of each string element where all tab characters are
@@ -619,6 +689,7 @@ def expandtabs(a, tabsize=8):
_vec_string(a, object_, 'expandtabs', (tabsize,)))
+@array_function_dispatch(_count_dispatcher)
def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
@@ -654,6 +725,7 @@ def find(a, sub, start=0, end=None):
a, integer, 'find', [sub, start] + _clean_args(end))
+@array_function_dispatch(_count_dispatcher)
def index(a, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
@@ -681,6 +753,8 @@ def index(a, sub, start=0, end=None):
return _vec_string(
a, integer, 'index', [sub, start] + _clean_args(end))
+
+@array_function_dispatch(_unary_op_dispatcher)
def isalnum(a):
"""
Returns true for each element if all characters in the string are
@@ -705,6 +779,8 @@ def isalnum(a):
"""
return _vec_string(a, bool_, 'isalnum')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isalpha(a):
"""
Returns true for each element if all characters in the string are
@@ -729,6 +805,8 @@ def isalpha(a):
"""
return _vec_string(a, bool_, 'isalpha')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isdigit(a):
"""
Returns true for each element if all characters in the string are
@@ -753,6 +831,8 @@ def isdigit(a):
"""
return _vec_string(a, bool_, 'isdigit')
+
+@array_function_dispatch(_unary_op_dispatcher)
def islower(a):
"""
Returns true for each element if all cased characters in the
@@ -778,6 +858,8 @@ def islower(a):
"""
return _vec_string(a, bool_, 'islower')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isspace(a):
"""
Returns true for each element if there are only whitespace
@@ -803,6 +885,8 @@ def isspace(a):
"""
return _vec_string(a, bool_, 'isspace')
+
+@array_function_dispatch(_unary_op_dispatcher)
def istitle(a):
"""
Returns true for each element if the element is a titlecased
@@ -827,6 +911,8 @@ def istitle(a):
"""
return _vec_string(a, bool_, 'istitle')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isupper(a):
"""
Returns true for each element if all cased characters in the
@@ -852,6 +938,12 @@ def isupper(a):
"""
return _vec_string(a, bool_, 'isupper')
+
+def _join_dispatcher(sep, seq):
+ return (sep, seq)
+
+
+@array_function_dispatch(_join_dispatcher)
def join(sep, seq):
"""
Return a string which is the concatenation of the strings in the
@@ -877,6 +969,12 @@ def join(sep, seq):
_vec_string(sep, object_, 'join', (seq,)))
+
+def _just_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_just_dispatcher)
def ljust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` left-justified in a
@@ -912,6 +1010,7 @@ def ljust(a, width, fillchar=' '):
a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
+@array_function_dispatch(_unary_op_dispatcher)
def lower(a):
"""
Return an array with the elements converted to lowercase.
@@ -948,6 +1047,11 @@ def lower(a):
return _vec_string(a_arr, a_arr.dtype, 'lower')
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
def lstrip(a, chars=None):
"""
For each element in `a`, return a copy with the leading characters
@@ -1005,6 +1109,11 @@ def lstrip(a, chars=None):
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
+def _partition_dispatcher(a, sep):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
def partition(a, sep):
"""
Partition each element in `a` around `sep`.
@@ -1040,6 +1149,11 @@ def partition(a, sep):
_vec_string(a, object_, 'partition', (sep,)))
+def _replace_dispatcher(a, old, new, count=None):
+ return (a,)
+
+
+@array_function_dispatch(_replace_dispatcher)
def replace(a, old, new, count=None):
"""
For each element in `a`, return a copy of the string with all
@@ -1072,6 +1186,7 @@ def replace(a, old, new, count=None):
a, object_, 'replace', [old, new] + _clean_args(count)))
+@array_function_dispatch(_count_dispatcher)
def rfind(a, sub, start=0, end=None):
"""
For each element in `a`, return the highest index in the string
@@ -1104,6 +1219,7 @@ def rfind(a, sub, start=0, end=None):
a, integer, 'rfind', [sub, start] + _clean_args(end))
+@array_function_dispatch(_count_dispatcher)
def rindex(a, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
@@ -1133,6 +1249,7 @@ def rindex(a, sub, start=0, end=None):
a, integer, 'rindex', [sub, start] + _clean_args(end))
+@array_function_dispatch(_just_dispatcher)
def rjust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` right-justified in a
@@ -1168,6 +1285,7 @@ def rjust(a, width, fillchar=' '):
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
+@array_function_dispatch(_partition_dispatcher)
def rpartition(a, sep):
"""
Partition (split) each element around the right-most separator.
@@ -1203,6 +1321,11 @@ def rpartition(a, sep):
_vec_string(a, object_, 'rpartition', (sep,)))
+def _split_dispatcher(a, sep=None, maxsplit=None):
+ return (a,)
+
+
+@array_function_dispatch(_split_dispatcher)
def rsplit(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
@@ -1240,6 +1363,11 @@ def rsplit(a, sep=None, maxsplit=None):
a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
def rstrip(a, chars=None):
"""
For each element in `a`, return a copy with the trailing
@@ -1284,6 +1412,7 @@ def rstrip(a, chars=None):
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
+@array_function_dispatch(_split_dispatcher)
def split(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
@@ -1318,6 +1447,11 @@ def split(a, sep=None, maxsplit=None):
a, object_, 'split', [sep] + _clean_args(maxsplit))
+def _splitlines_dispatcher(a, keepends=None):
+ return (a,)
+
+
+@array_function_dispatch(_splitlines_dispatcher)
def splitlines(a, keepends=None):
"""
For each element in `a`, return a list of the lines in the
@@ -1347,6 +1481,11 @@ def splitlines(a, keepends=None):
a, object_, 'splitlines', _clean_args(keepends))
+def _startswith_dispatcher(a, prefix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_startswith_dispatcher)
def startswith(a, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
@@ -1378,6 +1517,7 @@ def startswith(a, prefix, start=0, end=None):
a, bool_, 'startswith', [prefix, start] + _clean_args(end))
+@array_function_dispatch(_strip_dispatcher)
def strip(a, chars=None):
"""
For each element in `a`, return a copy with the leading and
@@ -1426,6 +1566,7 @@ def strip(a, chars=None):
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
+@array_function_dispatch(_unary_op_dispatcher)
def swapcase(a):
"""
Return element-wise a copy of the string with
@@ -1463,6 +1604,7 @@ def swapcase(a):
return _vec_string(a_arr, a_arr.dtype, 'swapcase')
+@array_function_dispatch(_unary_op_dispatcher)
def title(a):
"""
Return element-wise title cased version of string or unicode.
@@ -1502,6 +1644,11 @@ def title(a):
return _vec_string(a_arr, a_arr.dtype, 'title')
+def _translate_dispatcher(a, table, deletechars=None):
+ return (a,)
+
+
+@array_function_dispatch(_translate_dispatcher)
def translate(a, table, deletechars=None):
"""
For each element in `a`, return a copy of the string where all
@@ -1538,6 +1685,7 @@ def translate(a, table, deletechars=None):
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
+@array_function_dispatch(_unary_op_dispatcher)
def upper(a):
"""
Return an array with the elements converted to uppercase.
@@ -1574,6 +1722,11 @@ def upper(a):
return _vec_string(a_arr, a_arr.dtype, 'upper')
+def _zfill_dispatcher(a, width):
+ return (a,)
+
+
+@array_function_dispatch(_zfill_dispatcher)
def zfill(a, width):
"""
Return the numeric string left-filled with zeros
@@ -1604,6 +1757,7 @@ def zfill(a, width):
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
+@array_function_dispatch(_unary_op_dispatcher)
def isnumeric(a):
"""
For each element, return True if there are only numeric
@@ -1635,6 +1789,7 @@ def isnumeric(a):
return _vec_string(a, bool_, 'isnumeric')
+@array_function_dispatch(_unary_op_dispatcher)
def isdecimal(a):
"""
For each element, return True if there are only decimal
@@ -1666,6 +1821,7 @@ def isdecimal(a):
return _vec_string(a, bool_, 'isdecimal')
+@set_module('numpy')
class chararray(ndarray):
"""
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index c22f2da42..c4fc77e9e 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -8,7 +8,8 @@ import itertools
from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
-from numpy.core.numeric import asarray, asanyarray, result_type, tensordot, dot
+from numpy.core.numeric import asanyarray, tensordot
+from numpy.core.overrides import array_function_dispatch
__all__ = ['einsum', 'einsum_path']
@@ -168,7 +169,7 @@ def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
- >>> oset = set('')
+ >>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
@@ -339,7 +340,7 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
- >>> oset = set('')
+ >>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
@@ -520,6 +521,7 @@ def _can_dot(inputs, result, idx_removed):
# We are a matrix-matrix product, but we need to copy data
return True
+
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
@@ -688,6 +690,17 @@ def _parse_einsum_input(operands):
return (input_subscripts, output_subscript, operands)
+def _einsum_path_dispatcher(*operands, **kwargs):
+ # NOTE: technically, we should only dispatch on array-like arguments, not
+ # subscripts (given as strings). But separating operands into
+ # arrays/subscripts is a little tricky/slow (given einsum's two supported
+ # signatures), so as a practical shortcut we dispatch on everything.
+ # Strings will be ignored for dispatching since they don't define
+ # __array_function__.
+ return operands
+
+
+@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
@@ -836,7 +849,6 @@ def einsum_path(*operands, **kwargs):
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
- subscripts = input_subscripts + '->' + output_subscript
# Build a few useful list and sets
input_list = input_subscripts.split(',')
@@ -875,9 +887,8 @@ def einsum_path(*operands, **kwargs):
broadcast_indices = [set(x) for x in broadcast_indices]
# Compute size of each input array plus the output array
- size_list = []
- for term in input_list + [output_subscript]:
- size_list.append(_compute_size_by_dict(term, dimension_dict))
+ size_list = [_compute_size_by_dict(term, dimension_dict)
+ for term in input_list + [output_subscript]]
max_size = max(size_list)
if memory_limit is None:
@@ -979,7 +990,16 @@ def einsum_path(*operands, **kwargs):
return (path, path_print)
+def _einsum_dispatcher(*operands, **kwargs):
+ # Arguably we dispatch on more arguments that we really should; see note in
+ # _einsum_path_dispatcher for why.
+ for op in operands:
+ yield op
+ yield kwargs.get('out')
+
+
# Rewrite einsum to handle different cases
+@array_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
@@ -987,19 +1007,27 @@ def einsum(*operands, **kwargs):
Evaluates the Einstein summation convention on the operands.
- Using the Einstein summation convention, many common multi-dimensional
- array operations can be represented in a simple fashion. This function
- provides a way to compute such summations. The best way to understand this
- function is to try the examples below, which show how many common NumPy
- functions can be implemented as calls to `einsum`.
+ Using the Einstein summation convention, many common multi-dimensional,
+ linear algebraic array operations can be represented in a simple fashion.
+ In *implicit* mode `einsum` computes these values.
+
+ In *explicit* mode, `einsum` provides further flexibility to compute
+ other array operations that might not be considered classical Einstein
+ summation operations, by disabling, or forcing summation over specified
+ subscript labels.
+
+ See the notes and examples for clarification.
Parameters
----------
subscripts : str
- Specifies the subscripts for summation.
+ Specifies the subscripts for summation as comma separated list of
+ subscript labels. An implicit (classical Einstein summation)
+ calculation is performed unless the explicit indicator '->' is
+ included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
- out : {ndarray, None}, optional
+ out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
@@ -1043,50 +1071,80 @@ def einsum(*operands, **kwargs):
-----
.. versionadded:: 1.6.0
- The subscripts string is a comma-separated list of subscript labels,
- where each label refers to a dimension of the corresponding operand.
- Repeated subscripts labels in one operand take the diagonal. For example,
- ``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
+ The Einstein summation convention can be used to compute
+ many multi-dimensional, linear algebraic array operations. `einsum`
+ provides a succinct way of representing these.
- Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
- is equivalent to ``np.inner(a,b)``. If a label appears only once,
- it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
- with no changes.
+ A non-exhaustive list of these operations,
+ which can be computed by `einsum`, is shown below along with examples:
- The order of labels in the output is by default alphabetical. This
- means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
- ``np.einsum('ji', a)`` takes its transpose.
+ * Trace of an array, :py:func:`numpy.trace`.
+ * Return a diagonal, :py:func:`numpy.diag`.
+ * Array axis summations, :py:func:`numpy.sum`.
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
+ * Tensor contractions, :py:func:`numpy.tensordot`.
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
- The output can be controlled by specifying output subscript labels
- as well. This specifies the label order, and allows summing to
- be disallowed or forced when desired. The call ``np.einsum('i->', a)``
- is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
- is like ``np.diag(a)``. The difference is that `einsum` does not
- allow broadcasting by default.
+ The subscripts string is a comma-separated list of subscript labels,
+ where each label refers to a dimension of the corresponding operand.
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
+ is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
+ describes traditional matrix multiplication and is equivalent to
+ :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
+ to :py:func:`np.trace(a) <numpy.trace>`.
+
+ In *implicit mode*, the chosen subscripts are important
+ since the axes of the output are reordered alphabetically. This
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
+ multiplication since subscript 'h' precedes subscript 'i'.
+
+ In *explicit mode* the output can be directly controlled by
+ specifying output subscript labels. This requires the
+ identifier '->' as well as the list of output subscript labels.
+ This feature increases the flexibility of the function since
+ summing can be disabled or forced when required. The call
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
+ and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
+ The difference is that `einsum` does not allow broadcasting by default.
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
+ order of the output subscript labels and therefore returns matrix
+ multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
- product with the left-most indices instead of rightmost, you can do
+ product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
- produces a view.
+ produces a view (changed in version 1.10.0).
- An alternative way to provide the subscripts and operands is as
- ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
- below have corresponding `einsum` calls with the two parameter methods.
+ `einsum` also provides an alternative way to provide the subscripts
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
+ If the output shape is not provided in this format `einsum` will be
+ calculated in implicit mode, otherwise it will be performed explicitly.
+ The examples below have corresponding `einsum` calls with the two
+ parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
- have the same effect as ``np.swapaxes(a, 0, 2)`` and
- ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
@@ -1096,7 +1154,14 @@ def einsum(*operands, **kwargs):
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
- See ``np.einsum_path`` for more details.
+ Typically a 'greedy' algorithm is applied which empirical tests have shown
+ returns the optimal path in the majority of cases. In some cases 'optimal'
+ will return the superlative path through a more expensive, exhaustive search.
+ For iterative calculations it may be advisable to calculate the optimal path
+ once and reuse that path by supplying it as an argument. An example is given
+ below.
+
+ See :py:func:`numpy.einsum_path` for more details.
Examples
--------
@@ -1104,6 +1169,8 @@ def einsum(*operands, **kwargs):
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
+ Trace of a matrix:
+
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
@@ -1111,6 +1178,8 @@ def einsum(*operands, **kwargs):
>>> np.trace(a)
60
+ Extract the diagonal (requires explicit form):
+
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
@@ -1118,32 +1187,67 @@ def einsum(*operands, **kwargs):
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
- >>> np.einsum('ij,j', a, b)
- array([ 30, 80, 130, 180, 230])
- >>> np.einsum(a, [0,1], b, [1])
- array([ 30, 80, 130, 180, 230])
- >>> np.dot(a, b)
- array([ 30, 80, 130, 180, 230])
- >>> np.einsum('...j,j', a, b)
- array([ 30, 80, 130, 180, 230])
+ Sum over an axis (requires explicit form):
+
+ >>> np.einsum('ij->i', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [0,1], [0])
+ array([ 10, 35, 60, 85, 110])
+ >>> np.sum(a, axis=1)
+ array([ 10, 35, 60, 85, 110])
+
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
+
+ >>> np.einsum('...j->...', a)
+ array([ 10, 35, 60, 85, 110])
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
+ array([ 10, 35, 60, 85, 110])
+
+ Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
+ >>> np.einsum('ij->ji', c)
+ array([[0, 3],
+ [1, 4],
+ [2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
- >>> c.T
+ >>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
+ Vector inner products:
+
+ >>> np.einsum('i,i', b, b)
+ 30
+ >>> np.einsum(b, [0], b, [0])
+ 30
+ >>> np.inner(b,b)
+ 30
+
+ Matrix vector multiplication:
+
+ >>> np.einsum('ij,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum(a, [0,1], b, [1])
+ array([ 30, 80, 130, 180, 230])
+ >>> np.dot(a, b)
+ array([ 30, 80, 130, 180, 230])
+ >>> np.einsum('...j,j', a, b)
+ array([ 30, 80, 130, 180, 230])
+
+ Broadcasting and scalar multiplication:
+
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
- >>> np.einsum(',ij', 3, C)
+ >>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
@@ -1153,12 +1257,7 @@ def einsum(*operands, **kwargs):
array([[ 0, 3, 6],
[ 9, 12, 15]])
- >>> np.einsum('i,i', b, b)
- 30
- >>> np.einsum(b, [0], b, [0])
- 30
- >>> np.inner(b,b)
- 30
+ Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
@@ -1170,12 +1269,7 @@ def einsum(*operands, **kwargs):
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
- >>> np.einsum('i...->...', a)
- array([50, 55, 60, 65, 70])
- >>> np.einsum(a, [0,Ellipsis], [Ellipsis])
- array([50, 55, 60, 65, 70])
- >>> np.sum(a, axis=0)
- array([50, 55, 60, 65, 70])
+ Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
@@ -1198,6 +1292,17 @@ def einsum(*operands, **kwargs):
[ 4796., 5162.],
[ 4928., 5306.]])
+ Writeable returned arrays (since version 1.10.0):
+
+ >>> a = np.zeros((3, 3))
+ >>> np.einsum('ii->i', a)[:] = 1
+ >>> a
+ array([[ 1., 0., 0.],
+ [ 0., 1., 0.],
+ [ 0., 0., 1.]])
+
+ Example of ellipsis use:
+
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
@@ -1210,13 +1315,26 @@ def einsum(*operands, **kwargs):
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
- >>> # since version 1.10.0
- >>> a = np.zeros((3, 3))
- >>> np.einsum('ii->i', a)[:] = 1
- >>> a
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
+ Chained array operations. For more complicated contractions, speed ups
+ might be achieved by repeatedly computing a 'greedy' path or pre-computing the
+ 'optimal' path and repeatedly applying it, using an
+ `einsum_path` insertion (since version 1.12.0). Performance improvements can be
+ particularly significant with larger arrays:
+
+ >>> a = np.ones(64).reshape(2,4,8)
+ # Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
+ # Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
+ # Greedy `einsum` (faster optimal path approximation): ~160ms
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
+ # Optimal `einsum` (best usage pattern in some use cases): ~110ms
+ >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
+ >>> for iteration in range(500):
+ ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
@@ -1255,9 +1373,7 @@ def einsum(*operands, **kwargs):
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
- tmp_operands = []
- for x in inds:
- tmp_operands.append(operands.pop(x))
+ tmp_operands = [operands.pop(x) for x in inds]
# Do we need to deal with the output?
handle_out = specified_out and ((num + 1) == len(contraction_list))
@@ -1274,7 +1390,7 @@ def einsum(*operands, **kwargs):
# Find indices to contract over
left_pos, right_pos = [], []
- for s in idx_rm:
+ for s in sorted(idx_rm):
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index d1aae0aa0..59a820d53 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -3,12 +3,14 @@
"""
from __future__ import division, absolute_import, print_function
+import functools
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
+from . import overrides
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
@@ -31,6 +33,9 @@ _gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
@@ -62,10 +67,8 @@ def _wrapfunc(obj, method, *args, **kwds):
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
- passkwargs = {}
- for k, v in kwargs.items():
- if v is not np._NoValue:
- passkwargs[k] = v
+ passkwargs = {k: v for k, v in kwargs.items()
+ if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
@@ -83,6 +86,11 @@ def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
+def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
+ return (a, out)
+
+
+@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
@@ -181,7 +189,12 @@ def take(a, indices, axis=None, out=None, mode='raise'):
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
+def _reshape_dispatcher(a, newshape, order=None):
+ return (a,)
+
+
# not deprecated --- copy if necessary, view otherwise
+@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
@@ -279,6 +292,14 @@ def reshape(a, newshape, order='C'):
return _wrapfunc(a, 'reshape', newshape, order=order)
+def _choose_dispatcher(a, choices, out=None, mode=None):
+ yield a
+ for c in choices:
+ yield c
+ yield out
+
+
+@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
@@ -401,6 +422,11 @@ def choose(a, choices, out=None, mode='raise'):
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
+def _repeat_dispatcher(a, repeats, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
@@ -445,6 +471,11 @@ def repeat(a, repeats, axis=None):
return _wrapfunc(a, 'repeat', repeats, axis=axis)
+def _put_dispatcher(a, ind, v, mode=None):
+ return (a, ind, v)
+
+
+@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
@@ -503,6 +534,11 @@ def put(a, ind, v, mode='raise'):
return put(ind, v, mode=mode)
+def _swapaxes_dispatcher(a, axis1, axis2):
+ return (a,)
+
+
+@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
@@ -549,6 +585,11 @@ def swapaxes(a, axis1, axis2):
return _wrapfunc(a, 'swapaxes', axis1, axis2)
+def _transpose_dispatcher(a, axes=None):
+ return (a,)
+
+
+@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
@@ -598,6 +639,11 @@ def transpose(a, axes=None):
return _wrapfunc(a, 'transpose', axes)
+def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
@@ -689,6 +735,11 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
return a
+def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
@@ -757,6 +808,11 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
+def _sort_dispatcher(a, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_sort_dispatcher)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
@@ -879,6 +935,11 @@ def sort(a, axis=-1, kind='quicksort', order=None):
return a
+def _argsort_dispatcher(a, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argsort_dispatcher)
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
@@ -973,6 +1034,11 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
+def _argmax_dispatcher(a, axis=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
@@ -1007,10 +1073,10 @@ def argmax(a, axis=None, out=None):
Examples
--------
- >>> a = np.arange(6).reshape(2,3)
+ >>> a = np.arange(6).reshape(2,3) + 10
>>> a
- array([[0, 1, 2],
- [3, 4, 5]])
+ array([[10, 11, 12],
+ [13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
@@ -1024,7 +1090,7 @@ def argmax(a, axis=None, out=None):
>>> ind
(1, 2)
>>> a[ind]
- 5
+ 15
>>> b = np.arange(6)
>>> b[1] = 5
@@ -1037,6 +1103,11 @@ def argmax(a, axis=None, out=None):
return _wrapfunc(a, 'argmax', axis=axis, out=out)
+def _argmin_dispatcher(a, axis=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
@@ -1071,10 +1142,10 @@ def argmin(a, axis=None, out=None):
Examples
--------
- >>> a = np.arange(6).reshape(2,3)
+ >>> a = np.arange(6).reshape(2,3) + 10
>>> a
- array([[0, 1, 2],
- [3, 4, 5]])
+ array([[10, 11, 12],
+ [13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
@@ -1088,12 +1159,12 @@ def argmin(a, axis=None, out=None):
>>> ind
(0, 0)
>>> a[ind]
- 0
+ 10
- >>> b = np.arange(6)
- >>> b[4] = 0
+ >>> b = np.arange(6) + 10
+ >>> b[4] = 10
>>> b
- array([0, 1, 2, 3, 0, 5])
+ array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
@@ -1101,6 +1172,11 @@ def argmin(a, axis=None, out=None):
return _wrapfunc(a, 'argmin', axis=axis, out=out)
+def _searchsorted_dispatcher(a, v, side=None, sorter=None):
+ return (a, v, sorter)
+
+
+@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
@@ -1170,6 +1246,11 @@ def searchsorted(a, v, side='left', sorter=None):
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
+def _resize_dispatcher(a, new_shape):
+ return (a,)
+
+
+@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
@@ -1198,6 +1279,16 @@ def resize(a, new_shape):
--------
ndarray.resize : resize an array in-place.
+ Notes
+ -----
+ Warning: This functionality does **not** consider axes separately,
+ i.e. it does not apply interpolation/extrapolation.
+ It fills the return array with the required number of elements, taken
+ from `a` as they are laid out in memory, disregarding strides and axes.
+ (This is in case the new shape is smaller. For larger, see above.)
+ This functionality is therefore not suitable to resize images,
+ or data where each axis represents a separate and distinct entity.
+
Examples
--------
>>> a=np.array([[0,1],[2,3]])
@@ -1233,6 +1324,11 @@ def resize(a, new_shape):
return reshape(a, new_shape)
+def _squeeze_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
@@ -1291,6 +1387,12 @@ def squeeze(a, axis=None):
else:
return squeeze(axis=axis)
+
+def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
+ return (a,)
+
+
+@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
@@ -1405,6 +1507,12 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+def _trace_dispatcher(
+ a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
@@ -1468,6 +1576,11 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+def _ravel_dispatcher(a, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
@@ -1574,6 +1687,11 @@ def ravel(a, order='C'):
return asanyarray(a).ravel(order=order)
+def _nonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
@@ -1615,16 +1733,16 @@ def nonzero(a):
Examples
--------
- >>> x = np.array([[1,0,0], [0,2,0], [1,1,0]])
+ >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
- array([[1, 0, 0],
- [0, 2, 0],
- [1, 1, 0]])
+ array([[3, 0, 0],
+ [0, 4, 0],
+ [5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
- array([1, 2, 1, 1])
+ array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
@@ -1636,7 +1754,7 @@ def nonzero(a):
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
- >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
+ >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
@@ -1644,7 +1762,14 @@ def nonzero(a):
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
- The ``nonzero`` method of the boolean array can also be called.
+ Using this result to index `a` is equivalent to using the mask directly:
+
+ >>> a[np.nonzero(a > 3)]
+ array([4, 5, 6, 7, 8, 9])
+ >>> a[a > 3] # prefer this spelling
+ array([4, 5, 6, 7, 8, 9])
+
+ ``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
@@ -1653,6 +1778,11 @@ def nonzero(a):
return _wrapfunc(a, 'nonzero')
+def _shape_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
@@ -1698,6 +1828,11 @@ def shape(a):
return result
+def _compress_dispatcher(condition, a, axis=None, out=None):
+ return (condition, a, out)
+
+
+@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
@@ -1761,6 +1896,11 @@ def compress(condition, a, axis=None, out=None):
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
+def _clip_dispatcher(a, a_min, a_max, out=None):
+ return (a, a_min, a_max)
+
+
+@array_function_dispatch(_clip_dispatcher)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
@@ -1818,6 +1958,12 @@ def clip(a, a_min, a_max, out=None):
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
+def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_sum_dispatcher)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Sum of array elements over a given axis.
@@ -1917,7 +2063,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
- "Use np.sum(np.from_iter(generator)) or the python sum builtin instead.",
+ "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
DeprecationWarning, stacklevel=2)
res = _sum_(a)
@@ -1930,6 +2076,11 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
initial=initial)
+def _any_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
@@ -2013,6 +2164,11 @@ def any(a, axis=None, out=None, keepdims=np._NoValue):
return _wrapreduction(a, np.logical_or, 'any', axis, None, out, keepdims=keepdims)
+def _all_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
@@ -2089,6 +2245,11 @@ def all(a, axis=None, out=None, keepdims=np._NoValue):
return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims)
+def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
@@ -2156,6 +2317,11 @@ def cumsum(a, axis=None, dtype=None, out=None):
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
+def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
@@ -2224,6 +2390,11 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue):
return _methods._ptp(a, axis=axis, out=out, **kwargs)
+def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amax_dispatcher)
def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
@@ -2334,6 +2505,11 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
initial=initial)
+def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amin_dispatcher)
def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
@@ -2442,6 +2618,11 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
initial=initial)
+def _alen_dispathcer(a):
+ return (a,)
+
+
+@array_function_dispatch(_alen_dispathcer)
def alen(a):
"""
Return the length of the first dimension of the input array.
@@ -2475,6 +2656,12 @@ def alen(a):
return len(array(a, ndmin=1))
+def _prod_dispatcher(
+ a, axis=None, dtype=None, out=None, keepdims=None, initial=None):
+ return (a, out)
+
+
+@array_function_dispatch(_prod_dispatcher)
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
"""
Return the product of array elements over a given axis.
@@ -2585,6 +2772,11 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N
initial=initial)
+def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
@@ -2648,6 +2840,11 @@ def cumprod(a, axis=None, dtype=None, out=None):
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
+def _ndim_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
@@ -2685,6 +2882,11 @@ def ndim(a):
return asarray(a).ndim
+def _size_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
@@ -2731,6 +2933,11 @@ def size(a, axis=None):
return asarray(a).shape[axis]
+def _around_dispatcher(a, decimals=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
@@ -2777,11 +2984,11 @@ def around(a, decimals=0, out=None):
References
----------
- .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
- http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
+ .. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
+ https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
- http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
+ https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
@@ -2800,20 +3007,11 @@ def around(a, decimals=0, out=None):
return _wrapfunc(a, 'round', decimals=decimals, out=out)
-def round_(a, decimals=0, out=None):
- """
- Round an array to the given number of decimals.
-
- Refer to `around` for full documentation.
-
- See Also
- --------
- around : equivalent function
-
- """
- return around(a, decimals=decimals, out=out)
+def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
@@ -2920,6 +3118,12 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
out=out, **kwargs)
+def _std_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
@@ -3038,6 +3242,12 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
**kwargs)
+def _var_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
@@ -3160,6 +3370,19 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
+@array_function_dispatch(_around_dispatcher)
+def round_(a, decimals=0, out=None):
+ """
+ Round an array to the given number of decimals.
+
+ See Also
+ --------
+ around : equivalent function; see for details.
+ """
+ return around(a, decimals=decimals, out=out)
+
+
+@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
@@ -3171,6 +3394,7 @@ def product(*args, **kwargs):
return prod(*args, **kwargs)
+@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
@@ -3182,6 +3406,7 @@ def cumproduct(*args, **kwargs):
return cumprod(*args, **kwargs)
+@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
@@ -3195,6 +3420,7 @@ def sometrue(*args, **kwargs):
return any(*args, **kwargs)
+@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
@@ -3206,6 +3432,7 @@ def alltrue(*args, **kwargs):
return all(*args, **kwargs)
+@array_function_dispatch(_ndim_dispatcher)
def rank(a):
"""
Return the number of dimensions of an array.
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 82de1a36e..b68fd4068 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -1,15 +1,22 @@
from __future__ import division, absolute_import, print_function
+import functools
import warnings
import operator
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
- TooHardError,asanyarray)
+ TooHardError, asanyarray)
+from numpy.core.multiarray import add_docstring
+from numpy.core import overrides
__all__ = ['logspace', 'linspace', 'geomspace']
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
def _index_deprecate(i, stacklevel=2):
try:
i = operator.index(i)
@@ -22,7 +29,14 @@ def _index_deprecate(i, stacklevel=2):
return i
-def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
+def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
+ dtype=None, axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_linspace_dispatcher)
+def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
+ axis=0):
"""
Return evenly spaced numbers over a specified interval.
@@ -31,11 +45,14 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
The endpoint of the interval can optionally be excluded.
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
Parameters
----------
- start : scalar
+ start : array_like
The starting value of the sequence.
- stop : scalar
+ stop : array_like
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
@@ -54,6 +71,13 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
.. versionadded:: 1.9.0
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
Returns
-------
samples : ndarray
@@ -70,7 +94,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
- logspace : Samples uniformly distributed in log space.
+ geomspace : Similar to `linspace`, but with numbers spaced evenly on a log
+ scale (a geometric progression).
+ logspace : Similar to `geomspace`, but with the end points specified as
+ logarithms.
Examples
--------
@@ -112,16 +139,15 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
if dtype is None:
dtype = dt
- y = _nx.arange(0, num, dtype=dt)
-
delta = stop - start
+ y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * delta.ndim)
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
- _mult_inplace = _nx.isscalar(delta)
+ _mult_inplace = _nx.isscalar(delta)
if num > 1:
step = delta / div
- if step == 0:
+ if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
y /= div
if _mult_inplace:
@@ -144,13 +170,23 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
if endpoint and num > 1:
y[-1] = stop
+ if axis != 0:
+ y = _nx.moveaxis(y, 0, axis)
+
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
-def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
+def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
+ dtype=None, axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_logspace_dispatcher)
+def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
+ axis=0):
"""
Return numbers spaced evenly on a log scale.
@@ -158,11 +194,14 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
Parameters
----------
- start : float
+ start : array_like
``base ** start`` is the starting value of the sequence.
- stop : float
+ stop : array_like
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
@@ -179,6 +218,13 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
Returns
-------
@@ -228,24 +274,33 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
>>> plt.show()
"""
- y = linspace(start, stop, num=num, endpoint=endpoint)
+ y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
if dtype is None:
return _nx.power(base, y)
- return _nx.power(base, y).astype(dtype)
+ return _nx.power(base, y).astype(dtype, copy=False)
+
+def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
+ axis=None):
+ return (start, stop)
-def geomspace(start, stop, num=50, endpoint=True, dtype=None):
+
+@array_function_dispatch(_geomspace_dispatcher)
+def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""
Return numbers spaced evenly on a log scale (a geometric progression).
This is similar to `logspace`, but with endpoints specified directly.
Each output sample is a constant multiple of the previous.
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
Parameters
----------
- start : scalar
+ start : array_like
The starting value of the sequence.
- stop : scalar
+ stop : array_like
The final value of the sequence, unless `endpoint` is False.
In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
@@ -258,6 +313,12 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
Returns
-------
@@ -322,37 +383,80 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
>>> plt.show()
"""
- if start == 0 or stop == 0:
+ start = asanyarray(start)
+ stop = asanyarray(stop)
+ if _nx.any(start == 0) or _nx.any(stop == 0):
raise ValueError('Geometric sequence cannot include zero')
- dt = result_type(start, stop, float(num))
+ dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
if dtype is None:
dtype = dt
else:
# complex to dtype('complex128'), for instance
dtype = _nx.dtype(dtype)
+ # Promote both arguments to the same dtype in case, for instance, one is
+ # complex and another is negative and log would produce NaN otherwise.
+ # Copy since we may change things in-place further down.
+ start = start.astype(dt, copy=True)
+ stop = stop.astype(dt, copy=True)
+
+ out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
# Avoid negligible real or imaginary parts in output by rotating to
# positive real, calculating, then undoing rotation
- out_sign = 1
- if start.real == stop.real == 0:
- start, stop = start.imag, stop.imag
- out_sign = 1j * out_sign
- if _nx.sign(start) == _nx.sign(stop) == -1:
- start, stop = -start, -stop
- out_sign = -out_sign
-
- # Promote both arguments to the same dtype in case, for instance, one is
- # complex and another is negative and log would produce NaN otherwise
- start = start + (stop - stop)
- stop = stop + (start - start)
- if _nx.issubdtype(dtype, _nx.complexfloating):
- start = start + 0j
- stop = stop + 0j
+ if _nx.issubdtype(dt, _nx.complexfloating):
+ all_imag = (start.real == 0.) & (stop.real == 0.)
+ if _nx.any(all_imag):
+ start[all_imag] = start[all_imag].imag
+ stop[all_imag] = stop[all_imag].imag
+ out_sign[all_imag] = 1j
+
+ both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
+ if _nx.any(both_negative):
+ _nx.negative(start, out=start, where=both_negative)
+ _nx.negative(stop, out=stop, where=both_negative)
+ _nx.negative(out_sign, out=out_sign, where=both_negative)
log_start = _nx.log10(start)
log_stop = _nx.log10(stop)
result = out_sign * logspace(log_start, log_stop, num=num,
endpoint=endpoint, base=10.0, dtype=dtype)
+ if axis != 0:
+ result = _nx.moveaxis(result, 0, axis)
+
+ return result.astype(dtype, copy=False)
+
- return result.astype(dtype)
+#always succeed
+def add_newdoc(place, obj, doc):
+ """
+ Adds documentation to obj which is in module place.
+
+ If doc is a string add it to obj as a docstring
+
+ If doc is a tuple, then the first element is interpreted as
+ an attribute of obj and the second as the docstring
+ (method, docstring)
+
+ If doc is a list, then each element of the list should be a
+ sequence of length two --> [(method1, docstring1),
+ (method2, docstring2), ...]
+
+ This routine never raises an error.
+
+ This routine cannot modify read-only docstrings, as appear
+ in new-style classes or built-in functions. Because this
+ routine never raises an error the caller must check manually
+ that the docstrings were changed.
+ """
+ try:
+ new = getattr(__import__(place, globals(), {}, [obj]), obj)
+ if isinstance(doc, str):
+ add_docstring(new, doc.strip())
+ elif isinstance(doc, tuple):
+ add_docstring(getattr(new, doc[0]), doc[1].strip())
+ elif isinstance(doc, list):
+ for val in doc:
+ add_docstring(getattr(new, val[0]), val[1].strip())
+ except Exception:
+ pass
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index e450a660d..544b8b35f 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -8,6 +8,7 @@ __all__ = ['finfo', 'iinfo']
import warnings
from .machar import MachAr
+from .overrides import set_module
from . import numeric
from . import numerictypes as ntypes
from .numeric import array, inf
@@ -30,6 +31,32 @@ def _fr1(a):
a.shape = ()
return a
+class MachArLike(object):
+ """ Object to simulate MachAr instance """
+
+ def __init__(self,
+ ftype,
+ **kwargs):
+ params = _MACHAR_PARAMS[ftype]
+ float_conv = lambda v: array([v], ftype)
+ float_to_float = lambda v : _fr1(float_conv(v))
+ float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype))
+
+ self.title = params['title']
+ # Parameter types same as for discovered MachAr object.
+ self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
+ self.epsneg = float_to_float(kwargs.pop('epsneg'))
+ self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
+ self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
+ self.ibeta = params['itype'](kwargs.pop('ibeta'))
+ self.__dict__.update(kwargs)
+ self.precision = int(-log10(self.eps))
+ self.resolution = float_to_float(float_conv(10) ** (-self.precision))
+ self._str_eps = float_to_str(self.eps)
+ self._str_epsneg = float_to_str(self.epsneg)
+ self._str_xmin = float_to_str(self.xmin)
+ self._str_xmax = float_to_str(self.xmax)
+ self._str_resolution = float_to_str(self.resolution)
_convert_to_float = {
ntypes.csingle: ntypes.single,
@@ -37,7 +64,6 @@ _convert_to_float = {
ntypes.clongfloat: ntypes.longfloat
}
-
# Parameters for creating MachAr / MachAr-like objects
_title_fmt = 'numpy {} precision floating point number'
_MACHAR_PARAMS = {
@@ -58,194 +84,156 @@ _MACHAR_PARAMS = {
fmt = '%12.5e',
title = _title_fmt.format('half'))}
-
-class MachArLike(object):
- """ Object to simulate MachAr instance """
-
- def __init__(self,
- ftype,
- **kwargs):
- params = _MACHAR_PARAMS[ftype]
- float_conv = lambda v: array([v], ftype)
- float_to_float = lambda v : _fr1(float_conv(v))
- self._float_to_str = lambda v: (params['fmt'] %
- array(_fr0(v)[0], ftype))
- self.title = params['title']
- # Parameter types same as for discovered MachAr object.
- self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
- self.epsneg = float_to_float(kwargs.pop('epsneg'))
- self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
- self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
- self.ibeta = params['itype'](kwargs.pop('ibeta'))
- self.__dict__.update(kwargs)
- self.precision = int(-log10(self.eps))
- self.resolution = float_to_float(float_conv(10) ** (-self.precision))
-
- # Properties below to delay need for float_to_str, and thus avoid circular
- # imports during early numpy module loading.
- # See: https://github.com/numpy/numpy/pull/8983#discussion_r115838683
-
- @property
- def _str_eps(self):
- return self._float_to_str(self.eps)
-
- @property
- def _str_epsneg(self):
- return self._float_to_str(self.epsneg)
-
- @property
- def _str_xmin(self):
- return self._float_to_str(self.xmin)
-
- @property
- def _str_xmax(self):
- return self._float_to_str(self.xmax)
-
- @property
- def _str_resolution(self):
- return self._float_to_str(self.resolution)
-
-
-# Known parameters for float16
-# See docstring of MachAr class for description of parameters.
-_f16 = ntypes.float16
-_float16_ma = MachArLike(_f16,
- machep=-10,
- negep=-11,
- minexp=-14,
- maxexp=16,
- it=10,
- iexp=5,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_f16(-10)),
- epsneg=exp2(_f16(-11)),
- huge=_f16(65504),
- tiny=_f16(2 ** -14))
-
-# Known parameters for float32
-_f32 = ntypes.float32
-_float32_ma = MachArLike(_f32,
- machep=-23,
- negep=-24,
- minexp=-126,
- maxexp=128,
- it=23,
- iexp=8,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_f32(-23)),
- epsneg=exp2(_f32(-24)),
- huge=_f32((1 - 2 ** -24) * 2**128),
- tiny=exp2(_f32(-126)))
-
-# Known parameters for float64
-_f64 = ntypes.float64
-_epsneg_f64 = 2.0 ** -53.0
-_tiny_f64 = 2.0 ** -1022.0
-_float64_ma = MachArLike(_f64,
- machep=-52,
- negep=-53,
- minexp=-1022,
- maxexp=1024,
- it=52,
- iexp=11,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=2.0 ** -52.0,
- epsneg=_epsneg_f64,
- huge=(1.0 - _epsneg_f64) / _tiny_f64 * _f64(4),
- tiny=_tiny_f64)
-
-# Known parameters for IEEE 754 128-bit binary float
-_ld = ntypes.longdouble
-_epsneg_f128 = exp2(_ld(-113))
-_tiny_f128 = exp2(_ld(-16382))
-# Ignore runtime error when this is not f128
-with numeric.errstate(all='ignore'):
- _huge_f128 = (_ld(1) - _epsneg_f128) / _tiny_f128 * _ld(4)
-_float128_ma = MachArLike(_ld,
- machep=-112,
- negep=-113,
- minexp=-16382,
- maxexp=16384,
- it=112,
- iexp=15,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_ld(-112)),
- epsneg=_epsneg_f128,
- huge=_huge_f128,
- tiny=_tiny_f128)
-
-# Known parameters for float80 (Intel 80-bit extended precision)
-_epsneg_f80 = exp2(_ld(-64))
-_tiny_f80 = exp2(_ld(-16382))
-# Ignore runtime error when this is not f80
-with numeric.errstate(all='ignore'):
- _huge_f80 = (_ld(1) - _epsneg_f80) / _tiny_f80 * _ld(4)
-_float80_ma = MachArLike(_ld,
- machep=-63,
- negep=-64,
- minexp=-16382,
- maxexp=16384,
- it=63,
- iexp=15,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_ld(-63)),
- epsneg=_epsneg_f80,
- huge=_huge_f80,
- tiny=_tiny_f80)
-
-# Guessed / known parameters for double double; see:
-# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
-# These numbers have the same exponent range as float64, but extended number of
-# digits in the significand.
-_huge_dd = (umath.nextafter(_ld(inf), _ld(0))
- if hasattr(umath, 'nextafter') # Missing on some platforms?
- else _float64_ma.huge)
-_float_dd_ma = MachArLike(_ld,
- machep=-105,
- negep=-106,
- minexp=-1022,
- maxexp=1024,
- it=105,
- iexp=11,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_ld(-105)),
- epsneg= exp2(_ld(-106)),
- huge=_huge_dd,
- tiny=exp2(_ld(-1022)))
-
-
# Key to identify the floating point type. Key is result of
# ftype('-0.1').newbyteorder('<').tobytes()
# See:
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
-_KNOWN_TYPES = {
- b'\x9a\x99\x99\x99\x99\x99\xb9\xbf' : _float64_ma,
- b'\xcd\xcc\xcc\xbd' : _float32_ma,
- b'f\xae' : _float16_ma,
+_KNOWN_TYPES = {}
+def _register_type(machar, bytepat):
+ _KNOWN_TYPES[bytepat] = machar
+_float_ma = {}
+
+def _register_known_types():
+ # Known parameters for float16
+ # See docstring of MachAr class for description of parameters.
+ f16 = ntypes.float16
+ float16_ma = MachArLike(f16,
+ machep=-10,
+ negep=-11,
+ minexp=-14,
+ maxexp=16,
+ it=10,
+ iexp=5,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f16(-10)),
+ epsneg=exp2(f16(-11)),
+ huge=f16(65504),
+ tiny=f16(2 ** -14))
+ _register_type(float16_ma, b'f\xae')
+ _float_ma[16] = float16_ma
+
+ # Known parameters for float32
+ f32 = ntypes.float32
+ float32_ma = MachArLike(f32,
+ machep=-23,
+ negep=-24,
+ minexp=-126,
+ maxexp=128,
+ it=23,
+ iexp=8,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f32(-23)),
+ epsneg=exp2(f32(-24)),
+ huge=f32((1 - 2 ** -24) * 2**128),
+ tiny=exp2(f32(-126)))
+ _register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
+ _float_ma[32] = float32_ma
+
+ # Known parameters for float64
+ f64 = ntypes.float64
+ epsneg_f64 = 2.0 ** -53.0
+ tiny_f64 = 2.0 ** -1022.0
+ float64_ma = MachArLike(f64,
+ machep=-52,
+ negep=-53,
+ minexp=-1022,
+ maxexp=1024,
+ it=52,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=2.0 ** -52.0,
+ epsneg=epsneg_f64,
+ huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
+ tiny=tiny_f64)
+ _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ _float_ma[64] = float64_ma
+
+ # Known parameters for IEEE 754 128-bit binary float
+ ld = ntypes.longdouble
+ epsneg_f128 = exp2(ld(-113))
+ tiny_f128 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f128
+ with numeric.errstate(all='ignore'):
+ huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
+ float128_ma = MachArLike(ld,
+ machep=-112,
+ negep=-113,
+ minexp=-16382,
+ maxexp=16384,
+ it=112,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-112)),
+ epsneg=epsneg_f128,
+ huge=huge_f128,
+ tiny=tiny_f128)
+ # IEEE 754 128-bit binary float
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _float_ma[128] = float128_ma
+
+ # Known parameters for float80 (Intel 80-bit extended precision)
+ epsneg_f80 = exp2(ld(-64))
+ tiny_f80 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f80
+ with numeric.errstate(all='ignore'):
+ huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
+ float80_ma = MachArLike(ld,
+ machep=-63,
+ negep=-64,
+ minexp=-16382,
+ maxexp=16384,
+ it=63,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-63)),
+ epsneg=epsneg_f80,
+ huge=huge_f80,
+ tiny=tiny_f80)
# float80, first 10 bytes containing actual storage
- b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf' : _float80_ma,
+ _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
+ _float_ma[80] = float80_ma
+
+ # Guessed / known parameters for double double; see:
+ # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
+ # These numbers have the same exponent range as float64, but extended number of
+ # digits in the significand.
+ huge_dd = (umath.nextafter(ld(inf), ld(0))
+ if hasattr(umath, 'nextafter') # Missing on some platforms?
+ else float64_ma.huge)
+ float_dd_ma = MachArLike(ld,
+ machep=-105,
+ negep=-106,
+ minexp=-1022,
+ maxexp=1024,
+ it=105,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-105)),
+ epsneg= exp2(ld(-106)),
+ huge=huge_dd,
+ tiny=exp2(ld(-1022)))
# double double; low, high order (e.g. PPC 64)
- b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf' :
- _float_dd_ma,
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
# double double; high, low order (e.g. PPC 64 le)
- b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<' :
- _float_dd_ma,
- # IEEE 754 128-bit binary float
- b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf' :
- _float128_ma,
-}
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
+ _float_ma['dd'] = float_dd_ma
def _get_machar(ftype):
@@ -302,6 +290,7 @@ def _discovered_machar(ftype):
params['title'])
+@set_module('numpy')
class finfo(object):
"""
finfo(dtype)
@@ -452,6 +441,7 @@ class finfo(object):
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
+@set_module('numpy')
class iinfo(object):
"""
iinfo(type)
@@ -513,7 +503,7 @@ class iinfo(object):
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if self.kind not in 'iu':
- raise ValueError("Invalid integer data type.")
+ raise ValueError("Invalid integer data type %r." % (self.kind,))
def min(self):
"""Minimum value of given dtype."""
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index 12fc7098c..45f008b1d 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -5,13 +5,7 @@
#ifndef NPY_NDARRAYOBJECT_H
#define NPY_NDARRAYOBJECT_H
#ifdef __cplusplus
-#define CONFUSE_EMACS {
-#define CONFUSE_EMACS2 }
-extern "C" CONFUSE_EMACS
-#undef CONFUSE_EMACS
-#undef CONFUSE_EMACS2
-/* ... otherwise a semi-smart identer (like emacs) tries to indent
- everything when you're typing */
+extern "C" {
#endif
#include <Python.h>
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index cf73cecea..b0b749c80 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -505,7 +505,8 @@ typedef struct {
PyArray_NonzeroFunc *nonzero;
/*
- * Used for arange.
+ * Used for arange. Should return 0 on success
+ * and -1 on failure.
* Can be NULL.
*/
PyArray_FillFunc *fill;
@@ -1670,7 +1671,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT)
-#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj))
+#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num )
@@ -1759,7 +1760,7 @@ typedef struct {
/************************************************************
* This is the form of the struct that's returned pointed by the
* PyCObject attribute of an array __array_struct__. See
- * http://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
+ * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full
* documentation.
************************************************************/
typedef struct {
diff --git a/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
index 4c318bc47..a6ee21219 100644
--- a/numpy/core/include/numpy/npy_1_7_deprecated_api.h
+++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -5,17 +5,20 @@
#error "Should never include npy_*_*_deprecated_api directly."
#endif
+/* Emit a warning if the user did not specifically request the old API */
+#ifndef NPY_NO_DEPRECATED_API
#if defined(_WIN32)
#define _WARN___STR2__(x) #x
#define _WARN___STR1__(x) _WARN___STR2__(x)
#define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "
-#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it by " \
- "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
+#pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \
+ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")
#elif defined(__GNUC__)
-#warning "Using deprecated NumPy API, disable it by " \
- "#defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
+#warning "Using deprecated NumPy API, disable it with " \
+ "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
#endif
/* TODO: How to do this warning message for other compilers? */
+#endif
/*
* This header exists to collect all dangerous/deprecated NumPy API
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 2d0ccd3b9..832bc0599 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -69,6 +69,16 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
#endif
+/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
+#if PY_VERSION_HEX < 0x03050200
+ #define Py_SETREF(op, op2) \
+ do { \
+ PyObject *_py_tmp = (PyObject *)(op); \
+ (op) = (op2); \
+ Py_DECREF(_py_tmp); \
+ } while (0)
+#endif
+
/*
* PyString -> PyBytes
*/
@@ -141,20 +151,14 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
static NPY_INLINE void
PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
{
- PyObject *newobj;
- newobj = PyUnicode_Concat(*left, right);
- Py_DECREF(*left);
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
Py_DECREF(right);
- *left = newobj;
}
static NPY_INLINE void
PyUnicode_Concat2(PyObject **left, PyObject *right)
{
- PyObject *newobj;
- newobj = PyUnicode_Concat(*left, right);
- Py_DECREF(*left);
- *left = newobj;
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
}
/*
@@ -215,6 +219,7 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
if (handle == NULL) {
PyErr_SetString(PyExc_IOError,
"Getting a FILE* from a Python file object failed");
+ return NULL;
}
/* Record the original raw file handle position */
@@ -378,6 +383,68 @@ npy_PyFile_CloseFile(PyObject *file)
return 0;
}
+
+/* This is a copy of _PyErr_ChainExceptions
+ */
+static NPY_INLINE void
+npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ /* only py3 supports this anyway */
+ #ifdef NPY_PY3K
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetContext(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ #endif
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+
+/* This is a copy of _PyErr_ChainExceptions, with:
+ * - a minimal implementation for python 2
+ * - __cause__ used instead of __context__
+ */
+static NPY_INLINE void
+npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ /* only py3 supports this anyway */
+ #ifdef NPY_PY3K
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetCause(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ #endif
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
/*
* PyObject_Cmp
*/
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 5faff4385..64aaaacff 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -14,7 +14,7 @@
* using static inline modifiers when defining npy_math functions
* allows the compiler to make optimizations when possible
*/
-#if NPY_INTERNAL_BUILD
+#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD
#ifndef NPY_INLINE_MATH
#define NPY_INLINE_MATH 1
#endif
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index 649bdb0a6..44cdffd14 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -37,28 +37,31 @@
#define NPY_LITTLE_ENDIAN 1234
#define NPY_BIG_ENDIAN 4321
- #if defined(NPY_CPU_X86) \
- || defined(NPY_CPU_AMD64) \
- || defined(NPY_CPU_IA64) \
- || defined(NPY_CPU_ALPHA) \
- || defined(NPY_CPU_ARMEL) \
- || defined(NPY_CPU_AARCH64) \
- || defined(NPY_CPU_SH_LE) \
- || defined(NPY_CPU_MIPSEL) \
- || defined(NPY_CPU_PPC64LE) \
- || defined(NPY_CPU_ARCEL) \
+ #if defined(NPY_CPU_X86) \
+ || defined(NPY_CPU_AMD64) \
+ || defined(NPY_CPU_IA64) \
+ || defined(NPY_CPU_ALPHA) \
+ || defined(NPY_CPU_ARMEL) \
+ || defined(NPY_CPU_ARMEL_AARCH32) \
+ || defined(NPY_CPU_ARMEL_AARCH64) \
+ || defined(NPY_CPU_SH_LE) \
+ || defined(NPY_CPU_MIPSEL) \
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL) \
|| defined(NPY_CPU_RISCV64)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
- #elif defined(NPY_CPU_PPC) \
- || defined(NPY_CPU_SPARC) \
- || defined(NPY_CPU_S390) \
- || defined(NPY_CPU_HPPA) \
- || defined(NPY_CPU_PPC64) \
- || defined(NPY_CPU_ARMEB) \
- || defined(NPY_CPU_SH_BE) \
- || defined(NPY_CPU_MIPSEB) \
- || defined(NPY_CPU_OR1K) \
- || defined(NPY_CPU_M68K) \
+ #elif defined(NPY_CPU_PPC) \
+ || defined(NPY_CPU_SPARC) \
+ || defined(NPY_CPU_S390) \
+ || defined(NPY_CPU_HPPA) \
+ || defined(NPY_CPU_PPC64) \
+ || defined(NPY_CPU_ARMEB) \
+ || defined(NPY_CPU_ARMEB_AARCH32) \
+ || defined(NPY_CPU_ARMEB_AARCH64) \
+ || defined(NPY_CPU_SH_BE) \
+ || defined(NPY_CPU_MIPSEB) \
+ || defined(NPY_CPU_OR1K) \
+ || defined(NPY_CPU_M68K) \
|| defined(NPY_CPU_ARCEB)
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
#else
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index 4b1b3d325..90d837a9b 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -209,9 +209,33 @@ typedef struct _tagPyUFuncObject {
* set by nditer object.
*/
npy_uint32 iter_flags;
+
+ /* New in NPY_API_VERSION 0x0000000D and above */
+
+ /*
+ * for each core_num_dim_ix distinct dimension names,
+ * the possible "frozen" size (-1 if not frozen).
+ */
+ npy_intp *core_dim_sizes;
+
+ /*
+ * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
+ */
+ npy_uint32 *core_dim_flags;
+
+ /* Identity for reduction, when identity == PyUFunc_IdentityValue */
+ PyObject *identity_value;
+
} PyUFuncObject;
#include "arrayobject.h"
+/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
+/* the core dimension's size will be determined by the operands. */
+#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
+/* the core dimension may be absent */
+#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
+/* flags inferred during execution */
+#define UFUNC_CORE_DIM_MISSING 0x00040000
#define UFUNC_ERR_IGNORE 0
#define UFUNC_ERR_WARN 1
@@ -276,6 +300,12 @@ typedef struct _tagPyUFuncObject {
* This case allows reduction with multiple axes at once.
*/
#define PyUFunc_ReorderableNone -2
+/*
+ * UFunc unit is in identity_value, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_IdentityValue -3
+
#define UFUNC_REDUCE 0
#define UFUNC_ACCUMULATE 1
@@ -314,22 +344,6 @@ typedef struct _loop1d_info {
&(arg)->first))) \
goto fail;} while (0)
-
-/* keep in sync with ieee754.c.src */
-#if defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \
- (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \
- defined(__NetBSD__) || \
- defined(__GLIBC__) || defined(__APPLE__) || \
- defined(__CYGWIN__) || defined(__MINGW32__) || \
- (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) || \
- defined(_AIX) || \
- defined(_MSC_VER) || \
- defined(__osf__) && defined(__alpha)
-#else
-#define NO_FLOATING_POINT_SUPPORT
-#endif
-
-
/*
* THESE MACROS ARE DEPRECATED.
* Use npy_set_floatstatus_* in the npymath library.
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index 7578544fe..91fb4eda8 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -11,9 +11,11 @@ __all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core.numeric import errstate
+from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
+@set_module('numpy')
class MachAr(object):
"""
Diagnosing machine parameters.
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index 536fa6094..82bc4707c 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -2,7 +2,10 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from .numeric import uint8, ndarray, dtype
-from numpy.compat import long, basestring, is_pathlib_path
+from numpy.compat import (
+ long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path
+)
+from numpy.core.overrides import set_module
__all__ = ['memmap']
@@ -17,6 +20,8 @@ mode_equivalents = {
"write":"w+"
}
+
+@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
@@ -211,78 +216,69 @@ class memmap(ndarray):
raise ValueError("mode must be one of %s" %
(valid_filemodes + list(mode_equivalents.keys())))
- if hasattr(filename, 'read'):
- fid = filename
- own_file = False
- elif is_pathlib_path(filename):
- fid = filename.open((mode == 'c' and 'r' or mode)+'b')
- own_file = True
- else:
- fid = open(filename, (mode == 'c' and 'r' or mode)+'b')
- own_file = True
-
- if (mode == 'w+') and shape is None:
+ if mode == 'w+' and shape is None:
raise ValueError("shape must be given")
- fid.seek(0, 2)
- flen = fid.tell()
- descr = dtypedescr(dtype)
- _dbytes = descr.itemsize
-
- if shape is None:
- bytes = flen - offset
- if (bytes % _dbytes):
- fid.close()
- raise ValueError("Size of available data is not a "
- "multiple of the data-type size.")
- size = bytes // _dbytes
- shape = (size,)
- else:
- if not isinstance(shape, tuple):
- shape = (shape,)
- size = np.intp(1) # avoid default choice of np.int_, which might overflow
- for k in shape:
- size *= k
-
- bytes = long(offset + size*_dbytes)
-
- if mode == 'w+' or (mode == 'r+' and flen < bytes):
- fid.seek(bytes - 1, 0)
- fid.write(b'\0')
- fid.flush()
-
- if mode == 'c':
- acc = mmap.ACCESS_COPY
- elif mode == 'r':
- acc = mmap.ACCESS_READ
- else:
- acc = mmap.ACCESS_WRITE
-
- start = offset - offset % mmap.ALLOCATIONGRANULARITY
- bytes -= start
- array_offset = offset - start
- mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
-
- self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
- offset=array_offset, order=order)
- self._mmap = mm
- self.offset = offset
- self.mode = mode
-
- if isinstance(filename, basestring):
- self.filename = os.path.abspath(filename)
- elif is_pathlib_path(filename):
- self.filename = filename.resolve()
- # py3 returns int for TemporaryFile().name
- elif (hasattr(filename, "name") and
- isinstance(filename.name, basestring)):
- self.filename = os.path.abspath(filename.name)
- # same as memmap copies (e.g. memmap + 1)
+ if hasattr(filename, 'read'):
+ f_ctx = contextlib_nullcontext(filename)
else:
- self.filename = None
-
- if own_file:
- fid.close()
+ f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
+
+ with f_ctx as fid:
+ fid.seek(0, 2)
+ flen = fid.tell()
+ descr = dtypedescr(dtype)
+ _dbytes = descr.itemsize
+
+ if shape is None:
+ bytes = flen - offset
+ if bytes % _dbytes:
+ raise ValueError("Size of available data is not a "
+ "multiple of the data-type size.")
+ size = bytes // _dbytes
+ shape = (size,)
+ else:
+ if not isinstance(shape, tuple):
+ shape = (shape,)
+ size = np.intp(1) # avoid default choice of np.int_, which might overflow
+ for k in shape:
+ size *= k
+
+ bytes = long(offset + size*_dbytes)
+
+ if mode == 'w+' or (mode == 'r+' and flen < bytes):
+ fid.seek(bytes - 1, 0)
+ fid.write(b'\0')
+ fid.flush()
+
+ if mode == 'c':
+ acc = mmap.ACCESS_COPY
+ elif mode == 'r':
+ acc = mmap.ACCESS_READ
+ else:
+ acc = mmap.ACCESS_WRITE
+
+ start = offset - offset % mmap.ALLOCATIONGRANULARITY
+ bytes -= start
+ array_offset = offset - start
+ mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
+
+ self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
+ offset=array_offset, order=order)
+ self._mmap = mm
+ self.offset = offset
+ self.mode = mode
+
+ if is_pathlib_path(filename):
+ # special case - if we were constructed with a pathlib.path,
+ # then filename is a path object, not a string
+ self.filename = filename.resolve()
+ elif hasattr(fid, "name") and isinstance(fid.name, basestring):
+ # py3 returns int for TemporaryFile().name
+ self.filename = os.path.abspath(fid.name)
+ # same as memmap copies (e.g. memmap + 1)
+ else:
+ self.filename = None
return self
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
new file mode 100644
index 000000000..df0ed2df4
--- /dev/null
+++ b/numpy/core/multiarray.py
@@ -0,0 +1,1561 @@
+"""
+Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
+the multiarray and umath c-extension modules were merged into a single
+_multiarray_umath extension module. So we replicate the old namespace
+by importing from the extension module.
+
+"""
+
+import functools
+import warnings
+
+from . import overrides
+from . import _multiarray_umath
+import numpy as np
+from numpy.core._multiarray_umath import *
+from numpy.core._multiarray_umath import (
+ _fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
+ _ARRAY_API, _monotonicity
+ )
+
+__all__ = [
+ '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
+ 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
+ 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
+ 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP', '_fastCopyAndTranspose',
+ '_flagdict', '_insert', '_reconstruct', '_vec_string', '_monotonicity',
+ 'add_docstring', 'arange', 'array', 'bincount', 'broadcast',
+ 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
+ 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
+ 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
+ 'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
+ 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
+ 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
+ 'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
+ 'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
+ 'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
+ 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
+ 'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
+ 'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
+ 'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
+ 'where', 'zeros']
+
+
+arange.__module__ = 'numpy'
+array.__module__ = 'numpy'
+datetime_data.__module__ = 'numpy'
+empty.__module__ = 'numpy'
+frombuffer.__module__ = 'numpy'
+fromfile.__module__ = 'numpy'
+fromiter.__module__ = 'numpy'
+frompyfunc.__module__ = 'numpy'
+fromstring.__module__ = 'numpy'
+geterrobj.__module__ = 'numpy'
+may_share_memory.__module__ = 'numpy'
+nested_iters.__module__ = 'numpy'
+promote_types.__module__ = 'numpy'
+set_numeric_ops.__module__ = 'numpy'
+seterrobj.__module__ = 'numpy'
+zeros.__module__ = 'numpy'
+
+
+# We can't verify dispatcher signatures because NumPy's C functions don't
+# support introspection.
+array_function_from_c_func_and_dispatcher = functools.partial(
+ overrides.array_function_from_dispatcher,
+ module='numpy', docs_from_dispatcher=True, verify=False)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
+def empty_like(prototype, dtype=None, order=None, subok=None):
+ """
+ empty_like(prototype, dtype=None, order='K', subok=True)
+
+ Return a new array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ prototype : array_like
+ The shape and data-type of `prototype` define these same attributes
+ of the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
+ contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
+ as closely as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data with the same
+ shape and type as `prototype`.
+
+ See Also
+ --------
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+
+ Notes
+ -----
+ This function does *not* initialize the returned array; to do that use
+ `zeros_like` or `ones_like` instead. It may be marginally faster than
+ the functions that do set the array values.
+
+ Examples
+ --------
+ >>> a = ([1,2,3], [4,5,6]) # a is array-like
+ >>> np.empty_like(a)
+ array([[-1073741821, -1073741821, 3], #random
+ [ 0, 0, -1073741821]])
+ >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
+ >>> np.empty_like(a)
+ array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
+ [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
+
+ """
+ return (prototype,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
+def concatenate(arrays, axis=None, out=None):
+ """
+ concatenate((a1, a2, ...), axis=0, out=None)
+
+ Join a sequence of arrays along an existing axis.
+
+ Parameters
+ ----------
+ a1, a2, ... : sequence of array_like
+ The arrays must have the same shape, except in the dimension
+ corresponding to `axis` (the first, by default).
+ axis : int, optional
+ The axis along which the arrays will be joined. If axis is None,
+ arrays are flattened before use. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
+
+ Returns
+ -------
+ res : ndarray
+ The concatenated array.
+
+ See Also
+ --------
+ ma.concatenate : Concatenate function that preserves input masks.
+ array_split : Split an array into multiple sub-arrays of equal or
+ near-equal size.
+ split : Split array into a list of multiple sub-arrays of equal size.
+ hsplit : Split array into multiple sub-arrays horizontally (column wise)
+ vsplit : Split array into multiple sub-arrays vertically (row wise)
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+ stack : Stack a sequence of arrays along a new axis.
+ hstack : Stack arrays in sequence horizontally (column wise)
+ vstack : Stack arrays in sequence vertically (row wise)
+ dstack : Stack arrays in sequence depth wise (along third dimension)
+ block : Assemble arrays from blocks.
+
+ Notes
+ -----
+ When one or more of the arrays to be concatenated is a MaskedArray,
+ this function will return a MaskedArray object instead of an ndarray,
+ but the input masks are *not* preserved. In cases where a MaskedArray
+ is expected as input, use the ma.concatenate function from the masked
+ array module instead.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> b = np.array([[5, 6]])
+ >>> np.concatenate((a, b), axis=0)
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.concatenate((a, b.T), axis=1)
+ array([[1, 2, 5],
+ [3, 4, 6]])
+ >>> np.concatenate((a, b), axis=None)
+ array([1, 2, 3, 4, 5, 6])
+
+ This function will not preserve masking of MaskedArray inputs.
+
+ >>> a = np.ma.arange(3)
+ >>> a[1] = np.ma.masked
+ >>> b = np.arange(2, 5)
+ >>> a
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
+ >>> b
+ array([2, 3, 4])
+ >>> np.concatenate([a, b])
+ masked_array(data=[0, 1, 2, 2, 3, 4],
+ mask=False,
+ fill_value=999999)
+ >>> np.ma.concatenate([a, b])
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
+
+ """
+ for array in arrays:
+ yield array
+ yield out
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
+def inner(a, b):
+ """
+ inner(a, b)
+
+ Inner product of two arrays.
+
+ Ordinary inner product of vectors for 1-D arrays (without complex
+ conjugation), in higher dimensions a sum product over the last axes.
+
+ Parameters
+ ----------
+ a, b : array_like
+ If `a` and `b` are nonscalar, their last dimensions must match.
+
+ Returns
+ -------
+ out : ndarray
+ `out.shape = a.shape[:-1] + b.shape[:-1]`
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` and `b` has different size.
+
+ See Also
+ --------
+ tensordot : Sum products over arbitrary axes.
+ dot : Generalised matrix product, using second last dimension of `b`.
+ einsum : Einstein summation convention.
+
+ Notes
+ -----
+ For vectors (1-D arrays) it computes the ordinary inner-product::
+
+ np.inner(a, b) = sum(a[:]*b[:])
+
+ More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
+
+ np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
+
+ or explicitly::
+
+ np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
+ = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
+
+ In addition `a` or `b` may be scalars, in which case::
+
+ np.inner(a,b) = a*b
+
+ Examples
+ --------
+ Ordinary inner product for vectors:
+
+ >>> a = np.array([1,2,3])
+ >>> b = np.array([0,1,0])
+ >>> np.inner(a, b)
+ 2
+
+ A multidimensional example:
+
+ >>> a = np.arange(24).reshape((2,3,4))
+ >>> b = np.arange(4)
+ >>> np.inner(a, b)
+ array([[ 14, 38, 62],
+ [ 86, 110, 134]])
+
+ An example where `b` is a scalar:
+
+ >>> np.inner(np.eye(2), 7)
+ array([[ 7., 0.],
+ [ 0., 7.]])
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
+def where(condition, x=None, y=None):
+ """
+ where(condition, [x, y])
+
+ Return elements chosen from `x` or `y` depending on `condition`.
+
+ .. note::
+ When only `condition` is provided, this function is a shorthand for
+ ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
+ preferred, as it behaves correctly for subclasses. The rest of this
+ documentation covers only the case where all three arguments are
+ provided.
+
+ Parameters
+ ----------
+ condition : array_like, bool
+ Where True, yield `x`, otherwise yield `y`.
+ x, y : array_like
+ Values from which to choose. `x`, `y` and `condition` need to be
+ broadcastable to some shape.
+
+ Returns
+ -------
+ out : ndarray
+ An array with elements from `x` where `condition` is True, and elements
+ from `y` elsewhere.
+
+ See Also
+ --------
+ choose
+ nonzero : The function that is called when x and y are omitted
+
+ Notes
+ -----
+ If all the arrays are 1-D, `where` is equivalent to::
+
+ [xv if c else yv
+ for c, xv, yv in zip(condition, x, y)]
+
+ Examples
+ --------
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.where(a < 5, a, 10*a)
+ array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
+
+ This can be used on multidimensional arrays too:
+
+ >>> np.where([[True, False], [True, True]],
+ ... [[1, 2], [3, 4]],
+ ... [[9, 8], [7, 6]])
+ array([[1, 8],
+ [3, 4]])
+
+ The shapes of x, y, and the condition are broadcast together:
+
+ >>> x, y = np.ogrid[:3, :4]
+ >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
+ array([[10, 0, 0, 0],
+ [10, 11, 1, 1],
+ [10, 11, 12, 2]])
+
+ >>> a = np.array([[0, 1, 2],
+ ... [0, 2, 4],
+ ... [0, 3, 6]])
+ >>> np.where(a < 4, a, -1) # -1 is broadcast
+ array([[ 0, 1, 2],
+ [ 0, 2, -1],
+ [ 0, 3, -1]])
+ """
+ return (condition, x, y)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
+def lexsort(keys, axis=None):
+ """
+ lexsort(keys, axis=-1)
+
+ Perform an indirect stable sort using a sequence of keys.
+
+ Given multiple sorting keys, which can be interpreted as columns in a
+ spreadsheet, lexsort returns an array of integer indices that describes
+ the sort order by multiple columns. The last key in the sequence is used
+ for the primary sort order, the second-to-last key for the secondary sort
+ order, and so on. The keys argument must be a sequence of objects that
+ can be converted to arrays of the same shape. If a 2D array is provided
+ for the keys argument, it's rows are interpreted as the sorting keys and
+ sorting is according to the last row, second last row etc.
+
+ Parameters
+ ----------
+ keys : (k, N) array or tuple containing k (N,)-shaped sequences
+ The `k` different "columns" to be sorted. The last column (or row if
+ `keys` is a 2D array) is the primary sort key.
+ axis : int, optional
+ Axis to be indirectly sorted. By default, sort over the last axis.
+
+ Returns
+ -------
+ indices : (N,) ndarray of ints
+ Array of indices that sort the keys along the specified axis.
+
+ See Also
+ --------
+ argsort : Indirect sort.
+ ndarray.sort : In-place sort.
+ sort : Return a sorted copy of an array.
+
+ Examples
+ --------
+ Sort names: first by surname, then by name.
+
+ >>> surnames = ('Hertz', 'Galilei', 'Hertz')
+ >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
+ >>> ind = np.lexsort((first_names, surnames))
+ >>> ind
+ array([1, 2, 0])
+
+ >>> [surnames[i] + ", " + first_names[i] for i in ind]
+ ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
+
+ Sort two columns of numbers:
+
+ >>> a = [1,5,1,4,3,4,4] # First column
+ >>> b = [9,4,0,4,0,2,1] # Second column
+ >>> ind = np.lexsort((b,a)) # Sort by a, then by b
+ >>> print(ind)
+ [2 0 4 6 5 3 1]
+
+ >>> [(a[i],b[i]) for i in ind]
+ [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
+
+ Note that sorting is first according to the elements of ``a``.
+ Secondary sorting is according to the elements of ``b``.
+
+ A normal ``argsort`` would have yielded:
+
+ >>> [(a[i],b[i]) for i in np.argsort(a)]
+ [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
+
+ Structured arrays are sorted lexically by ``argsort``:
+
+ >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
+ ... dtype=np.dtype([('x', int), ('y', int)]))
+
+ >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
+ array([2, 0, 4, 6, 5, 3, 1])
+
+ """
+ if isinstance(keys, tuple):
+ return keys
+ else:
+ return (keys,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
+def can_cast(from_, to, casting=None):
+ """
+ can_cast(from_, to, casting='safe')
+
+ Returns True if cast between data types can occur according to the
+ casting rule. If from is a scalar or array scalar, also returns
+ True if the scalar value can be cast without overflow or truncation
+ to an integer.
+
+ Parameters
+ ----------
+ from_ : dtype, dtype specifier, scalar, or array
+ Data type, scalar, or array to cast from.
+ to : dtype or dtype specifier
+ Data type to cast to.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Returns
+ -------
+ out : bool
+ True if cast can occur according to the casting rule.
+
+ Notes
+ -----
+ Starting in NumPy 1.9, can_cast function now returns False in 'safe'
+ casting mode for integer/float dtype and string dtype if the string dtype
+ length is not long enough to store the max integer/float value converted
+ to a string. Previously can_cast in 'safe' mode returned True for
+ integer/float dtype and a string dtype of any length.
+
+ See also
+ --------
+ dtype, result_type
+
+ Examples
+ --------
+ Basic examples
+
+ >>> np.can_cast(np.int32, np.int64)
+ True
+ >>> np.can_cast(np.float64, complex)
+ True
+ >>> np.can_cast(complex, float)
+ False
+
+ >>> np.can_cast('i8', 'f8')
+ True
+ >>> np.can_cast('i8', 'f4')
+ False
+ >>> np.can_cast('i4', 'S4')
+ False
+
+ Casting scalars
+
+ >>> np.can_cast(100, 'i1')
+ True
+ >>> np.can_cast(150, 'i1')
+ False
+ >>> np.can_cast(150, 'u1')
+ True
+
+ >>> np.can_cast(3.5e100, np.float32)
+ False
+ >>> np.can_cast(1000.0, np.float32)
+ True
+
+ Array scalar checks the value, array does not
+
+ >>> np.can_cast(np.array(1000.0), np.float32)
+ True
+ >>> np.can_cast(np.array([1000.0]), np.float32)
+ False
+
+ Using the casting rules
+
+ >>> np.can_cast('i8', 'i8', 'no')
+ True
+ >>> np.can_cast('<i8', '>i8', 'no')
+ False
+
+ >>> np.can_cast('<i8', '>i8', 'equiv')
+ True
+ >>> np.can_cast('<i4', '>i8', 'equiv')
+ False
+
+ >>> np.can_cast('<i4', '>i8', 'safe')
+ True
+ >>> np.can_cast('<i8', '>i4', 'safe')
+ False
+
+ >>> np.can_cast('<i8', '>i4', 'same_kind')
+ True
+ >>> np.can_cast('<i8', '>u4', 'same_kind')
+ False
+
+ >>> np.can_cast('<i8', '>u4', 'unsafe')
+ True
+
+ """
+ return (from_,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
+def min_scalar_type(a):
+ """
+ min_scalar_type(a)
+
+ For scalar ``a``, returns the data type with the smallest size
+ and smallest scalar kind which can hold its value. For non-scalar
+ array ``a``, returns the vector's dtype unmodified.
+
+ Floating point values are not demoted to integers,
+ and complex values are not demoted to floats.
+
+ Parameters
+ ----------
+ a : scalar or array_like
+ The value whose minimal data type is to be found.
+
+ Returns
+ -------
+ out : dtype
+ The minimal data type.
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ See Also
+ --------
+ result_type, promote_types, dtype, can_cast
+
+ Examples
+ --------
+ >>> np.min_scalar_type(10)
+ dtype('uint8')
+
+ >>> np.min_scalar_type(-260)
+ dtype('int16')
+
+ >>> np.min_scalar_type(3.1)
+ dtype('float16')
+
+ >>> np.min_scalar_type(1e50)
+ dtype('float64')
+
+ >>> np.min_scalar_type(np.arange(4,dtype='f8'))
+ dtype('float64')
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
+def result_type(*arrays_and_dtypes):
+ """
+ result_type(*arrays_and_dtypes)
+
+ Returns the type that results from applying the NumPy
+ type promotion rules to the arguments.
+
+ Type promotion in NumPy works similarly to the rules in languages
+ like C++, with some slight differences. When both scalars and
+ arrays are used, the array's type takes precedence and the actual value
+ of the scalar is taken into account.
+
+ For example, calculating 3*a, where a is an array of 32-bit floats,
+ intuitively should result in a 32-bit float output. If the 3 is a
+ 32-bit integer, the NumPy rules indicate it can't convert losslessly
+ into a 32-bit float, so a 64-bit float should be the result type.
+ By examining the value of the constant, '3', we see that it fits in
+ an 8-bit integer, which can be cast losslessly into the 32-bit float.
+
+ Parameters
+ ----------
+ arrays_and_dtypes : list of arrays and dtypes
+ The operands of some operation whose result type is needed.
+
+ Returns
+ -------
+ out : dtype
+ The result type.
+
+ See also
+ --------
+ dtype, promote_types, min_scalar_type, can_cast
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The specific algorithm used is as follows.
+
+ Categories are determined by first checking which of boolean,
+ integer (int/uint), or floating point (float/complex) the maximum
+ kind of all the arrays and the scalars are.
+
+ If there are only scalars or the maximum category of the scalars
+ is higher than the maximum category of the arrays,
+ the data types are combined with :func:`promote_types`
+ to produce the return value.
+
+ Otherwise, `min_scalar_type` is called on each array, and
+ the resulting data types are all combined with :func:`promote_types`
+ to produce the return value.
+
+ The set of int values is not a subset of the uint values for types
+ with the same number of bits, something not reflected in
+ :func:`min_scalar_type`, but handled as a special case in `result_type`.
+
+ Examples
+ --------
+ >>> np.result_type(3, np.arange(7, dtype='i1'))
+ dtype('int8')
+
+ >>> np.result_type('i4', 'c8')
+ dtype('complex128')
+
+ >>> np.result_type(3.0, -2)
+ dtype('float64')
+
+ """
+ return arrays_and_dtypes
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
+def dot(a, b, out=None):
+ """
+ dot(a, b, out=None)
+
+ Dot product of two arrays. Specifically,
+
+ - If both `a` and `b` are 1-D arrays, it is inner product of vectors
+ (without complex conjugation).
+
+ - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
+ but using :func:`matmul` or ``a @ b`` is preferred.
+
+ - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
+ and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
+
+ - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
+ the last axis of `a` and `b`.
+
+ - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
+ sum product over the last axis of `a` and the second-to-last axis of `b`::
+
+ dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
+
+ Parameters
+ ----------
+ a : array_like
+ First argument.
+ b : array_like
+ Second argument.
+ out : ndarray, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a,b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ Returns
+ -------
+ output : ndarray
+ Returns the dot product of `a` and `b`. If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ If `out` is given, then it is returned.
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` is not the same size as
+ the second-to-last dimension of `b`.
+
+ See Also
+ --------
+ vdot : Complex-conjugating dot product.
+ tensordot : Sum products over arbitrary axes.
+ einsum : Einstein summation convention.
+ matmul : '@' operator as method with out parameter.
+
+ Examples
+ --------
+ >>> np.dot(3, 4)
+ 12
+
+ Neither argument is complex-conjugated:
+
+ >>> np.dot([2j, 3j], [2j, 3j])
+ (-13+0j)
+
+ For 2-D arrays it is the matrix product:
+
+ >>> a = [[1, 0], [0, 1]]
+ >>> b = [[4, 1], [2, 2]]
+ >>> np.dot(a, b)
+ array([[4, 1],
+ [2, 2]])
+
+ >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
+ >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
+ >>> np.dot(a, b)[2,3,2,1,2,2]
+ 499128
+ >>> sum(a[2,3,2,:] * b[1,2,:,2])
+ 499128
+
+ """
+ return (a, b, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
+def vdot(a, b):
+ """
+ vdot(a, b)
+
+ Return the dot product of two vectors.
+
+ The vdot(`a`, `b`) function handles complex numbers differently than
+ dot(`a`, `b`). If the first argument is complex the complex conjugate
+ of the first argument is used for the calculation of the dot product.
+
+ Note that `vdot` handles multidimensional arrays differently than `dot`:
+ it does *not* perform a matrix product, but flattens input arguments
+ to 1-D vectors first. Consequently, it should only be used for vectors.
+
+ Parameters
+ ----------
+ a : array_like
+ If `a` is complex the complex conjugate is taken before calculation
+ of the dot product.
+ b : array_like
+ Second argument to the dot product.
+
+ Returns
+ -------
+ output : ndarray
+ Dot product of `a` and `b`. Can be an int, float, or
+ complex depending on the types of `a` and `b`.
+
+ See Also
+ --------
+ dot : Return the dot product without using the complex conjugate of the
+ first argument.
+
+ Examples
+ --------
+ >>> a = np.array([1+2j,3+4j])
+ >>> b = np.array([5+6j,7+8j])
+ >>> np.vdot(a, b)
+ (70-8j)
+ >>> np.vdot(b, a)
+ (70+8j)
+
+ Note that higher-dimensional arrays are flattened!
+
+ >>> a = np.array([[1, 4], [5, 6]])
+ >>> b = np.array([[4, 1], [2, 2]])
+ >>> np.vdot(a, b)
+ 30
+ >>> np.vdot(b, a)
+ 30
+ >>> 1*4 + 4*1 + 5*2 + 6*2
+ 30
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
+def bincount(x, weights=None, minlength=None):
+ """
+ bincount(x, weights=None, minlength=0)
+
+ Count number of occurrences of each value in array of non-negative ints.
+
+ The number of bins (of size 1) is one larger than the largest value in
+ `x`. If `minlength` is specified, there will be at least this number
+ of bins in the output array (though it will be longer if necessary,
+ depending on the contents of `x`).
+ Each bin gives the number of occurrences of its index value in `x`.
+ If `weights` is specified the input array is weighted by it, i.e. if a
+ value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
+ of ``out[n] += 1``.
+
+ Parameters
+ ----------
+ x : array_like, 1 dimension, nonnegative ints
+ Input array.
+ weights : array_like, optional
+ Weights, array of the same shape as `x`.
+ minlength : int, optional
+ A minimum number of bins for the output array.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ out : ndarray of ints
+ The result of binning the input array.
+ The length of `out` is equal to ``np.amax(x)+1``.
+
+ Raises
+ ------
+ ValueError
+ If the input is not 1-dimensional, or contains elements with negative
+ values, or if `minlength` is negative.
+ TypeError
+ If the type of the input is float or complex.
+
+ See Also
+ --------
+ histogram, digitize, unique
+
+ Examples
+ --------
+ >>> np.bincount(np.arange(5))
+ array([1, 1, 1, 1, 1])
+ >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
+ array([1, 3, 1, 1, 0, 0, 0, 1])
+
+ >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
+ >>> np.bincount(x).size == np.amax(x)+1
+ True
+
+ The input array needs to be of integer dtype, otherwise a
+ TypeError is raised:
+
+ >>> np.bincount(np.arange(5, dtype=float))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: array cannot be safely cast to required type
+
+ A possible use of ``bincount`` is to perform sums over
+ variable-size chunks of an array, using the ``weights`` keyword.
+
+ >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
+ >>> x = np.array([0, 1, 1, 2, 2, 2])
+ >>> np.bincount(x, weights=w)
+ array([ 0.3, 0.7, 1.1])
+
+ """
+ return (x, weights)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
+def ravel_multi_index(multi_index, dims, mode=None, order=None):
+ """
+ ravel_multi_index(multi_index, dims, mode='raise', order='C')
+
+ Converts a tuple of index arrays into an array of flat
+ indices, applying boundary modes to the multi-index.
+
+ Parameters
+ ----------
+ multi_index : tuple of array_like
+ A tuple of integer arrays, one array for each dimension.
+ dims : tuple of ints
+ The shape of array into which the indices from ``multi_index`` apply.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices are handled. Can specify
+ either one mode or a tuple of modes, one mode per index.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ In 'clip' mode, a negative index which would normally
+ wrap will clip to 0 instead.
+ order : {'C', 'F'}, optional
+ Determines whether the multi-index should be viewed as
+ indexing in row-major (C-style) or column-major
+ (Fortran-style) order.
+
+ Returns
+ -------
+ raveled_indices : ndarray
+ An array of indices into the flattened version of an array
+ of dimensions ``dims``.
+
+ See Also
+ --------
+ unravel_index
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ Examples
+ --------
+ >>> arr = np.array([[3,6,6],[4,5,1]])
+ >>> np.ravel_multi_index(arr, (7,6))
+ array([22, 41, 37])
+ >>> np.ravel_multi_index(arr, (7,6), order='F')
+ array([31, 41, 13])
+ >>> np.ravel_multi_index(arr, (4,6), mode='clip')
+ array([22, 23, 19])
+ >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
+ array([12, 13, 13])
+
+ >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
+ 1621
+ """
+ return multi_index
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
+def unravel_index(indices, shape=None, order=None, dims=None):
+ """
+ unravel_index(indices, shape, order='C')
+
+ Converts a flat index or array of flat indices into a tuple
+ of coordinate arrays.
+
+ Parameters
+ ----------
+ indices : array_like
+ An integer array whose elements are indices into the flattened
+ version of an array of dimensions ``shape``. Before version 1.6.0,
+ this function accepted just one index value.
+ shape : tuple of ints
+ The shape of the array to use for unraveling ``indices``.
+
+ .. versionchanged:: 1.16.0
+ Renamed from ``dims`` to ``shape``.
+
+ order : {'C', 'F'}, optional
+ Determines whether the indices should be viewed as indexing in
+ row-major (C-style) or column-major (Fortran-style) order.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ unraveled_coords : tuple of ndarray
+ Each array in the tuple has the same shape as the ``indices``
+ array.
+
+ See Also
+ --------
+ ravel_multi_index
+
+ Examples
+ --------
+ >>> np.unravel_index([22, 41, 37], (7,6))
+ (array([3, 6, 6]), array([4, 5, 1]))
+ >>> np.unravel_index([31, 41, 13], (7,6), order='F')
+ (array([3, 6, 6]), array([4, 5, 1]))
+
+ >>> np.unravel_index(1621, (6,7,8,9))
+ (3, 1, 4, 1)
+
+ """
+ if dims is not None:
+ warnings.warn("'shape' argument should be used instead of 'dims'",
+ DeprecationWarning, stacklevel=3)
+ return (indices,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
+def copyto(dst, src, casting=None, where=None):
+ """
+ copyto(dst, src, casting='same_kind', where=True)
+
+ Copies values from one array to another, broadcasting as necessary.
+
+ Raises a TypeError if the `casting` rule is violated, and if
+ `where` is provided, it selects which elements to copy.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dst : ndarray
+ The array into which values are copied.
+ src : array_like
+ The array from which values are copied.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when copying.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `dst`, and selects elements to copy from `src` to `dst`
+ wherever it contains the value True.
+ """
+ return (dst, src, where)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
+def putmask(a, mask, values):
+ """
+ putmask(a, mask, values)
+
+ Changes elements of an array based on conditional and input values.
+
+ Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
+
+ If `values` is not the same size as `a` and `mask` then it will repeat.
+ This gives behavior different from ``a[mask] = values``.
+
+ Parameters
+ ----------
+ a : array_like
+ Target array.
+ mask : array_like
+ Boolean mask array. It has to be the same shape as `a`.
+ values : array_like
+ Values to put into `a` where `mask` is True. If `values` is smaller
+ than `a` it will be repeated.
+
+ See Also
+ --------
+ place, put, take, copyto
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> np.putmask(x, x>2, x**2)
+ >>> x
+ array([[ 0, 1, 2],
+ [ 9, 16, 25]])
+
+ If `values` is smaller than `a` it is repeated:
+
+ >>> x = np.arange(5)
+ >>> np.putmask(x, x>1, [-33, -44])
+ >>> x
+ array([ 0, 1, -33, -44, -33])
+
+ """
+ return (a, mask, values)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
+def packbits(myarray, axis=None):
+ """
+ packbits(myarray, axis=None)
+
+ Packs the elements of a binary-valued array into bits in a uint8 array.
+
+ The result is padded to full bytes by inserting zero bits at the end.
+
+ Parameters
+ ----------
+ myarray : array_like
+ An array of integers or booleans whose elements should be packed to
+ bits.
+ axis : int, optional
+ The dimension over which bit-packing is done.
+ ``None`` implies packing the flattened array.
+
+ Returns
+ -------
+ packed : ndarray
+ Array of type uint8 whose elements represent bits corresponding to the
+ logical (0 or nonzero) value of the input elements. The shape of
+ `packed` has the same number of dimensions as the input (unless `axis`
+ is None, in which case the output is 1-D).
+
+ See Also
+ --------
+ unpackbits: Unpacks elements of a uint8 array into a binary-valued output
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[[1,0,1],
+ ... [0,1,0]],
+ ... [[1,1,0],
+ ... [0,0,1]]])
+ >>> b = np.packbits(a, axis=-1)
+ >>> b
+ array([[[160],[64]],[[192],[32]]], dtype=uint8)
+
+ Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
+ and 32 = 0010 0000.
+
+ """
+ return (myarray,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
+def unpackbits(myarray, axis=None):
+ """
+ unpackbits(myarray, axis=None)
+
+ Unpacks elements of a uint8 array into a binary-valued output array.
+
+ Each element of `myarray` represents a bit-field that should be unpacked
+ into a binary-valued output array. The shape of the output array is either
+ 1-D (if `axis` is None) or the same shape as the input array with unpacking
+ done along the axis specified.
+
+ Parameters
+ ----------
+ myarray : ndarray, uint8 type
+ Input array.
+ axis : int, optional
+ The dimension over which bit-unpacking is done.
+ ``None`` implies unpacking the flattened array.
+
+ Returns
+ -------
+ unpacked : ndarray, uint8 type
+ The elements are binary-valued (0 or 1).
+
+ See Also
+ --------
+ packbits : Packs the elements of a binary-valued array into bits in a uint8
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
+ >>> a
+ array([[ 2],
+ [ 7],
+ [23]], dtype=uint8)
+ >>> b = np.unpackbits(a, axis=1)
+ >>> b
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
+
+ """
+ return (myarray,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
+def shares_memory(a, b, max_work=None):
+ """
+ shares_memory(a, b, max_work=None)
+
+ Determine if two arrays share memory
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem (maximum number
+ of candidate solutions to consider). The following special
+ values are recognized:
+
+ max_work=MAY_SHARE_EXACT (default)
+ The problem is solved exactly. In this case, the function returns
+ True only if there is an element shared between the arrays.
+ max_work=MAY_SHARE_BOUNDS
+ Only the memory bounds of a and b are checked.
+
+ Raises
+ ------
+ numpy.TooHardError
+ Exceeded max_work.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ may_share_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
+def may_share_memory(a, b, max_work=None):
+ """
+ may_share_memory(a, b, max_work=None)
+
+ Determine if two arrays might share memory
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Only the memory bounds of a and b are checked by default.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem. See
+ `shares_memory` for details. Default for ``may_share_memory``
+ is to do a bounds check.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ shares_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+ >>> x = np.zeros([3, 4])
+ >>> np.may_share_memory(x[:,0], x[:,1])
+ True
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
+def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
+ """
+ is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ Calculates which of the given dates are valid days, and which are not.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of bool, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of bool
+ An array with the same shape as ``dates``, containing True for
+ each valid day, and False for each invalid day.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # The weekdays are Friday, Saturday, and Monday
+ ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ array([False, False, True], dtype='bool')
+ """
+ return (dates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
+def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ First adjusts the date to fall on a valid day according to
+ the ``roll`` rule, then applies offsets to the given dates
+ counted in valid days.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ offsets : array_like of int
+ The array of offsets, which is broadcast with ``dates``.
+ roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
+ How to treat dates that do not fall on a valid day. The default
+ is 'raise'.
+
+ * 'raise' means to raise an exception for an invalid day.
+ * 'nat' means to return a NaT (not-a-time) for an invalid day.
+ * 'forward' and 'following' mean to take the first valid day
+ later in time.
+ * 'backward' and 'preceding' mean to take the first valid day
+ earlier in time.
+ * 'modifiedfollowing' means to take the first valid day
+ later in time unless it is across a Month boundary, in which
+ case to take the first valid day earlier in time.
+ * 'modifiedpreceding' means to take the first valid day
+ earlier in time unless it is across a Month boundary, in which
+ case to take the first valid day later in time.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of datetime64[D], optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of datetime64[D]
+ An array with a shape from broadcasting ``dates`` and ``offsets``
+ together, containing the dates with offsets applied.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # First business day in October 2011 (not accounting for holidays)
+ ... np.busday_offset('2011-10', 0, roll='forward')
+ numpy.datetime64('2011-10-03','D')
+ >>> # Last business day in February 2012 (not accounting for holidays)
+ ... np.busday_offset('2012-03', -1, roll='forward')
+ numpy.datetime64('2012-02-29','D')
+ >>> # Third Wednesday in January 2011
+ ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
+ numpy.datetime64('2011-01-19','D')
+ >>> # 2012 Mother's Day in Canada and the U.S.
+ ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
+ numpy.datetime64('2012-05-13','D')
+
+ >>> # First business day on or after a date
+ ... np.busday_offset('2011-03-20', 0, roll='forward')
+ numpy.datetime64('2011-03-21','D')
+ >>> np.busday_offset('2011-03-22', 0, roll='forward')
+ numpy.datetime64('2011-03-22','D')
+ >>> # First business day after a date
+ ... np.busday_offset('2011-03-20', 1, roll='backward')
+ numpy.datetime64('2011-03-21','D')
+ >>> np.busday_offset('2011-03-22', 1, roll='backward')
+ numpy.datetime64('2011-03-23','D')
+ """
+ return (dates, offsets, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
+def busday_count(begindates, enddates, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
+
+ Counts the number of valid days between `begindates` and
+ `enddates`, not including the day of `enddates`.
+
+ If ``enddates`` specifies a date value that is earlier than the
+ corresponding ``begindates`` date value, the count will be negative.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ begindates : array_like of datetime64[D]
+ The array of the first dates for counting.
+ enddates : array_like of datetime64[D]
+ The array of the end dates for counting, which are excluded
+ from the count themselves.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of int, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of int
+ An array with a shape from broadcasting ``begindates`` and ``enddates``
+ together, containing the number of valid days between
+ the begin and end dates.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+
+ Examples
+ --------
+ >>> # Number of weekdays in January 2011
+ ... np.busday_count('2011-01', '2011-02')
+ 21
+ >>> # Number of weekdays in 2011
+ ... np.busday_count('2011', '2012')
+ 260
+ >>> # Number of Saturdays in 2011
+ ... np.busday_count('2011', '2012', weekmask='Sat')
+ 53
+ """
+ return (begindates, enddates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(
+ _multiarray_umath.datetime_as_string)
+def datetime_as_string(arr, unit=None, timezone=None, casting=None):
+ """
+ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
+
+ Convert an array of datetimes into an array of strings.
+
+ Parameters
+ ----------
+ arr : array_like of datetime64
+ The array of UTC timestamps to format.
+ unit : str
+ One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
+ timezone : {'naive', 'UTC', 'local'} or tzinfo
+ Timezone information to use when displaying the datetime. If 'UTC', end
+ with a Z to indicate UTC time. If 'local', convert to the local timezone
+ first, and suffix with a +-#### timezone offset. If a tzinfo object,
+ then do as with 'local', but use the specified timezone.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
+ Casting to allow when changing between datetime units.
+
+ Returns
+ -------
+ str_arr : ndarray
+ An array of strings the same shape as `arr`.
+
+ Examples
+ --------
+ >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
+ >>> d
+ array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
+ '2002-10-27T07:30'], dtype='datetime64[m]')
+
+ Setting the timezone to UTC shows the same information, but with a Z suffix
+
+ >>> np.datetime_as_string(d, timezone='UTC')
+ array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
+ '2002-10-27T07:30Z'], dtype='<U35')
+
+ Note that we picked datetimes that cross a DST boundary. Passing in a
+ ``pytz`` timezone object will print the appropriate offset
+
+ >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
+ array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
+ '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
+
+ Passing in a unit will change the precision
+
+ >>> np.datetime_as_string(d, unit='h')
+ array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
+ dtype='<U32')
+ >>> np.datetime_as_string(d, unit='s')
+ array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
+ '2002-10-27T07:30:00'], dtype='<U38')
+
+ 'casting' can be used to specify whether precision can be changed
+
+ >>> np.datetime_as_string(d, unit='h', casting='safe')
+ TypeError: Cannot create a datetime string as units 'h' from a NumPy
+ datetime with units 'm' according to the rule 'safe'
+ """
+ return (arr,)
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 7ade3d224..8768cbe56 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -6,6 +6,7 @@ try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
+import functools
import itertools
import operator
import sys
@@ -18,7 +19,7 @@ from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, broadcast, can_cast, compare_chararrays,
- concatenate, copyto, count_nonzero, dot, dtype, empty,
+ concatenate, copyto, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
inner, int_asbuffer, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
@@ -27,7 +28,9 @@ from .multiarray import (
if sys.version_info[0] < 3:
from .multiarray import newbuffer, getbuffer
+from . import overrides
from . import umath
+from .overrides import set_module
from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT,
ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT,
ERR_LOG, ERR_DEFAULT, PINF, NAN)
@@ -40,7 +43,13 @@ ufunc = type(sin)
newaxis = None
if sys.version_info[0] >= 3:
- import pickle
+ if sys.version_info[1] in (6, 7):
+ try:
+ import pickle5 as pickle
+ except ImportError:
+ import pickle
+ else:
+ import pickle
basestring = str
import builtins
else:
@@ -48,6 +57,10 @@ else:
import __builtin__ as builtins
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
@@ -80,6 +93,7 @@ if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
+@set_module('numpy')
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
@@ -91,6 +105,11 @@ class ComplexWarning(RuntimeWarning):
pass
+def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_zeros_like_dispatcher)
def zeros_like(a, dtype=None, order='K', subok=True):
"""
Return an array of zeros with the same shape and type as a given array.
@@ -153,6 +172,7 @@ def zeros_like(a, dtype=None, order='K', subok=True):
return res
+@set_module('numpy')
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
@@ -205,6 +225,11 @@ def ones(shape, dtype=None, order='C'):
return a
+def _ones_like_dispatcher(a, dtype=None, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_ones_like_dispatcher)
def ones_like(a, dtype=None, order='K', subok=True):
"""
Return an array of ones with the same shape and type as a given array.
@@ -265,6 +290,7 @@ def ones_like(a, dtype=None, order='K', subok=True):
return res
+@set_module('numpy')
def full(shape, fill_value, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with `fill_value`.
@@ -311,6 +337,11 @@ def full(shape, fill_value, dtype=None, order='C'):
return a
+def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None):
+ return (a,)
+
+
+@array_function_dispatch(_full_like_dispatcher)
def full_like(a, fill_value, dtype=None, order='K', subok=True):
"""
Return a full array with the same shape and type as a given array.
@@ -368,6 +399,11 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
return res
+def _count_nonzero_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_nonzero_dispatcher)
def count_nonzero(a, axis=None):
"""
Counts the number of non-zero values in the array ``a``.
@@ -430,6 +466,7 @@ def count_nonzero(a, axis=None):
return a_bool.sum(axis=axis, dtype=np.intp)
+@set_module('numpy')
def asarray(a, dtype=None, order=None):
"""Convert the input to an array.
@@ -501,6 +538,7 @@ def asarray(a, dtype=None, order=None):
return array(a, dtype, copy=False, order=order)
+@set_module('numpy')
def asanyarray(a, dtype=None, order=None):
"""Convert the input to an ndarray, but pass ndarray subclasses through.
@@ -553,9 +591,10 @@ def asanyarray(a, dtype=None, order=None):
return array(a, dtype, copy=False, order=order, subok=True)
+@set_module('numpy')
def ascontiguousarray(a, dtype=None):
"""
- Return a contiguous array in memory (C order).
+ Return a contiguous array (ndim >= 1) in memory (C order).
Parameters
----------
@@ -586,13 +625,17 @@ def ascontiguousarray(a, dtype=None):
>>> x.flags['C_CONTIGUOUS']
True
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
+@set_module('numpy')
def asfortranarray(a, dtype=None):
"""
- Return an array laid out in Fortran order in memory.
+ Return an array (ndim >= 1) laid out in Fortran order in memory.
Parameters
----------
@@ -623,10 +666,14 @@ def asfortranarray(a, dtype=None):
>>> y.flags['F_CONTIGUOUS']
True
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
+@set_module('numpy')
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
@@ -698,7 +745,7 @@ def require(a, dtype=None, requirements=None):
if not requirements:
return asanyarray(a, dtype=dtype)
else:
- requirements = set(possible_flags[x.upper()] for x in requirements)
+ requirements = {possible_flags[x.upper()] for x in requirements}
if 'E' in requirements:
requirements.remove('E')
@@ -707,7 +754,7 @@ def require(a, dtype=None, requirements=None):
subok = True
order = 'A'
- if requirements >= set(['C', 'F']):
+ if requirements >= {'C', 'F'}:
raise ValueError('Cannot specify both "C" and "F" order')
elif 'F' in requirements:
order = 'F'
@@ -725,6 +772,7 @@ def require(a, dtype=None, requirements=None):
return arr
+@set_module('numpy')
def isfortran(a):
"""
Returns True if the array is Fortran contiguous but *not* C contiguous.
@@ -787,6 +835,11 @@ def isfortran(a):
return a.flags.fnc
+def _argwhere_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_argwhere_dispatcher)
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
@@ -828,6 +881,11 @@ def argwhere(a):
return transpose(nonzero(a))
+def _flatnonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_flatnonzero_dispatcher)
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
@@ -879,6 +937,11 @@ def _mode_from_name(mode):
return mode
+def _correlate_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_correlate_dispatcher)
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
@@ -947,6 +1010,11 @@ def correlate(a, v, mode='valid'):
return multiarray.correlate2(a, v, mode)
+def _convolve_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_convolve_dispatcher)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
@@ -1010,7 +1078,8 @@ def convolve(a, v, mode='full'):
References
----------
- .. [1] Wikipedia, "Convolution", http://en.wikipedia.org/wiki/Convolution.
+ .. [1] Wikipedia, "Convolution",
+ https://en.wikipedia.org/wiki/Convolution
Examples
--------
@@ -1045,6 +1114,11 @@ def convolve(a, v, mode='full'):
return multiarray.correlate(a, v[::-1], mode)
+def _outer_dispatcher(a, b, out=None):
+ return (a, b, out)
+
+
+@array_function_dispatch(_outer_dispatcher)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
@@ -1129,6 +1203,11 @@ def outer(a, b, out=None):
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
+def _tensordot_dispatcher(a, b, axes=None):
+ return (a, b)
+
+
+@array_function_dispatch(_tensordot_dispatcher)
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes for arrays >= 1-D.
@@ -1315,6 +1394,11 @@ def tensordot(a, b, axes=2):
return res.reshape(olda + oldb)
+def _roll_dispatcher(a, shift, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_roll_dispatcher)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
@@ -1404,6 +1488,11 @@ def roll(a, shift, axis=None):
return result
+def _rollaxis_dispatcher(a, axis, start=None):
+ return (a,)
+
+
+@array_function_dispatch(_rollaxis_dispatcher)
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
@@ -1508,11 +1597,14 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
--------
normalize_axis_index : normalizing a single scalar axis
"""
- try:
- axis = [operator.index(axis)]
- except TypeError:
- axis = tuple(axis)
- axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis)
+ # Optimization to speed-up the most common cases.
+ if type(axis) not in (tuple, list):
+ try:
+ axis = [operator.index(axis)]
+ except TypeError:
+ pass
+ # Going via an iterator directly is slower than via list comprehension.
+ axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
@@ -1521,6 +1613,11 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
return axis
+def _moveaxis_dispatcher(a, source, destination):
+ return (a,)
+
+
+@array_function_dispatch(_moveaxis_dispatcher)
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
@@ -1597,6 +1694,11 @@ def _move_axis_to_0(a, axis):
return moveaxis(a, axis, 0)
+def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
+ return (a, b)
+
+
+@array_function_dispatch(_cross_dispatcher)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
@@ -1797,6 +1899,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
little_endian = (sys.byteorder == 'little')
+@set_module('numpy')
def indices(dimensions, dtype=int):
"""
Return an array representing the indices of a grid.
@@ -1868,6 +1971,7 @@ def indices(dimensions, dtype=int):
return res
+@set_module('numpy')
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
@@ -1896,7 +2000,7 @@ def fromfunction(function, shape, **kwargs):
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
- `fromfunction` would match the `shape` parameter.
+ `fromfunction` would not match the `shape` parameter.
See Also
--------
@@ -1924,6 +2028,11 @@ def fromfunction(function, shape, **kwargs):
return function(*args, **kwargs)
+def _frombuffer(buf, dtype, shape, order):
+ return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
+
+
+@set_module('numpy')
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
@@ -1938,10 +2047,46 @@ def isscalar(num):
val : bool
True if `num` is a scalar type, False if it is not.
+ See Also
+ --------
+ ndim : Get the number of dimensions of an array
+
+ Notes
+ -----
+ In almost all cases ``np.ndim(x) == 0`` should be used instead of this
+ function, as that will also return true for 0d arrays. This is how
+ numpy overloads functions in the style of the ``dx`` arguments to `gradient`
+ and the ``bins`` argument to `histogram`. Some key differences:
+
+ +--------------------------------------+---------------+-------------------+
+ | x |``isscalar(x)``|``np.ndim(x) == 0``|
+ +======================================+===============+===================+
+ | PEP 3141 numeric objects (including | ``True`` | ``True`` |
+ | builtins) | | |
+ +--------------------------------------+---------------+-------------------+
+ | builtin string and buffer objects | ``True`` | ``True`` |
+ +--------------------------------------+---------------+-------------------+
+ | other builtin objects, like | ``False`` | ``True`` |
+ | `pathlib.Path`, `Exception`, | | |
+ | the result of `re.compile` | | |
+ +--------------------------------------+---------------+-------------------+
+ | third-party objects like | ``False`` | ``True`` |
+ | `matplotlib.figure.Figure` | | |
+ +--------------------------------------+---------------+-------------------+
+ | zero-dimensional numpy arrays | ``False`` | ``True`` |
+ +--------------------------------------+---------------+-------------------+
+ | other numpy arrays | ``False`` | ``False`` |
+ +--------------------------------------+---------------+-------------------+
+ | `list`, `tuple`, and other sequence | ``False`` | ``False`` |
+ | objects | | |
+ +--------------------------------------+---------------+-------------------+
+
Examples
--------
>>> np.isscalar(3.1)
True
+ >>> np.isscalar(np.array(3.1))
+ False
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
@@ -1964,6 +2109,7 @@ def isscalar(num):
or isinstance(num, numbers.Number))
+@set_module('numpy')
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
@@ -2015,7 +2161,7 @@ def binary_repr(num, width=None):
References
----------
.. [1] Wikipedia, "Two's complement",
- http://en.wikipedia.org/wiki/Two's_complement
+ https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
@@ -2074,6 +2220,7 @@ def binary_repr(num, width=None):
return '1' * (outwidth - binwidth) + binary
+@set_module('numpy')
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
@@ -2168,6 +2315,7 @@ def _maketup(descr, val):
return tuple(res)
+@set_module('numpy')
def identity(n, dtype=None):
"""
Return the identity array.
@@ -2200,6 +2348,11 @@ def identity(n, dtype=None):
return eye(n, dtype=dtype)
+def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_allclose_dispatcher)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
@@ -2271,6 +2424,11 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
return bool(res)
+def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_isclose_dispatcher)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
@@ -2386,6 +2544,11 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
return cond[()] # Flatten 0d arrays to scalars
+def _array_equal_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equal_dispatcher)
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
@@ -2428,6 +2591,11 @@ def array_equal(a1, a2):
return bool(asarray(a1 == a2).all())
+def _array_equiv_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equiv_dispatcher)
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
@@ -2482,12 +2650,10 @@ _errdict = {"ignore": ERR_IGNORE,
"print": ERR_PRINT,
"log": ERR_LOG}
-_errdict_rev = {}
-for key in _errdict.keys():
- _errdict_rev[_errdict[key]] = key
-del key
+_errdict_rev = {value: key for key, value in _errdict.items()}
+@set_module('numpy')
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
@@ -2538,7 +2704,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
- .. [1] http://en.wikipedia.org/wiki/IEEE_754
+ .. [1] https://en.wikipedia.org/wiki/IEEE_754
Examples
--------
@@ -2589,6 +2755,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
return old
+@set_module('numpy')
def geterr():
"""
Get the current way of handling floating-point errors.
@@ -2640,6 +2807,7 @@ def geterr():
return res
+@set_module('numpy')
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
@@ -2664,6 +2832,7 @@ def setbufsize(size):
return old
+@set_module('numpy')
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
@@ -2677,6 +2846,7 @@ def getbufsize():
return umath.geterrobj()[0]
+@set_module('numpy')
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
@@ -2769,6 +2939,7 @@ def seterrcall(func):
return old
+@set_module('numpy')
def geterrcall():
"""
Return the current callback function used on floating-point errors.
@@ -2821,6 +2992,7 @@ class _unspecified(object):
_Unspecified = _unspecified()
+@set_module('numpy')
class errstate(object):
"""
errstate(**kwargs)
@@ -2846,16 +3018,11 @@ class errstate(object):
Notes
-----
- The ``with`` statement was introduced in Python 2.5, and can only be used
- there by importing it: ``from __future__ import with_statement``. In
- earlier Python versions the ``with`` statement is not available.
-
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
- >>> from __future__ import with_statement # use 'with' in Python 2.5
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
>>> np.arange(3) / 0.
@@ -2914,15 +3081,10 @@ True_ = bool_(True)
def extend_all(module):
- adict = {}
- for a in __all__:
- adict[a] = 1
- try:
- mall = getattr(module, '__all__')
- except AttributeError:
- mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
+ existing = set(__all__)
+ mall = getattr(module, '__all__')
for a in mall:
- if a not in adict:
+ if a not in existing:
__all__.append(a)
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index f7f25dd95..f00f92286 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -92,7 +92,7 @@ from numpy.core.multiarray import (
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
-
+from numpy.core.overrides import set_module
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
@@ -102,6 +102,23 @@ __all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
+# we don't need all these imports, but we need to keep them for compatibility
+# for users using np.core.numerictypes.UPPER_TABLE
+from ._string_helpers import (
+ english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
+)
+
+from ._type_aliases import (
+ sctypeDict,
+ sctypeNA,
+ allTypes,
+ bitname,
+ sctypes,
+ _concrete_types,
+ _concrete_typeinfo,
+ _bits_of,
+)
+from ._dtype import _kind_name
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
@@ -112,354 +129,9 @@ else:
from __builtin__ import bool, int, float, complex, object, unicode, str
-# String-handling utilities to avoid locale-dependence.
-
-# "import string" is costly to import!
-# Construct the translation tables directly
-# "A" = chr(65), "a" = chr(97)
-_all_chars = [chr(_m) for _m in range(256)]
-_ascii_upper = _all_chars[65:65+26]
-_ascii_lower = _all_chars[97:97+26]
-LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
-UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
-
-
-def english_lower(s):
- """ Apply English case rules to convert ASCII strings to all lower case.
-
- This is an internal utility function to replace calls to str.lower() such
- that we can avoid changing behavior with changing locales. In particular,
- Turkish has distinct dotted and dotless variants of the Latin letter "I" in
- both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
-
- Parameters
- ----------
- s : str
-
- Returns
- -------
- lowered : str
-
- Examples
- --------
- >>> from numpy.core.numerictypes import english_lower
- >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
- 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
- >>> english_lower('')
- ''
- """
- lowered = s.translate(LOWER_TABLE)
- return lowered
-
-def english_upper(s):
- """ Apply English case rules to convert ASCII strings to all upper case.
-
- This is an internal utility function to replace calls to str.upper() such
- that we can avoid changing behavior with changing locales. In particular,
- Turkish has distinct dotted and dotless variants of the Latin letter "I" in
- both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
-
- Parameters
- ----------
- s : str
-
- Returns
- -------
- uppered : str
-
- Examples
- --------
- >>> from numpy.core.numerictypes import english_upper
- >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
- 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
- >>> english_upper('')
- ''
- """
- uppered = s.translate(UPPER_TABLE)
- return uppered
-
-def english_capitalize(s):
- """ Apply English case rules to convert the first character of an ASCII
- string to upper case.
-
- This is an internal utility function to replace calls to str.capitalize()
- such that we can avoid changing behavior with changing locales.
-
- Parameters
- ----------
- s : str
-
- Returns
- -------
- capitalized : str
-
- Examples
- --------
- >>> from numpy.core.numerictypes import english_capitalize
- >>> english_capitalize('int8')
- 'Int8'
- >>> english_capitalize('Int8')
- 'Int8'
- >>> english_capitalize('')
- ''
- """
- if s:
- return english_upper(s[0]) + s[1:]
- else:
- return s
-
-
-sctypeDict = {} # Contains all leaf-node scalar types with aliases
-sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
-allTypes = {} # Collect the types we will add to the module here
-
-def _evalname(name):
- k = 0
- for ch in name:
- if ch in '0123456789':
- break
- k += 1
- try:
- bits = int(name[k:])
- except ValueError:
- bits = 0
- base = name[:k]
- return base, bits
-
-def bitname(obj):
- """Return a bit-width name for a given type object"""
- name = obj.__name__
- base = ''
- char = ''
- try:
- if name[-1] == '_':
- newname = name[:-1]
- else:
- newname = name
- info = typeinfo[english_upper(newname)]
- assert(info.type == obj) # sanity check
- bits = info.bits
-
- except KeyError: # bit-width name
- base, bits = _evalname(name)
- char = base[0]
-
- if name == 'bool_':
- char = 'b'
- base = 'bool'
- elif name == 'void':
- char = 'V'
- base = 'void'
- elif name == 'object_':
- char = 'O'
- base = 'object'
- bits = 0
- elif name == 'datetime64':
- char = 'M'
- elif name == 'timedelta64':
- char = 'm'
-
- if sys.version_info[0] >= 3:
- if name == 'bytes_':
- char = 'S'
- base = 'bytes'
- elif name == 'str_':
- char = 'U'
- base = 'str'
- else:
- if name == 'string_':
- char = 'S'
- base = 'string'
- elif name == 'unicode_':
- char = 'U'
- base = 'unicode'
-
- bytes = bits // 8
-
- if char != '' and bytes != 0:
- char = "%s%d" % (char, bytes)
-
- return base, bits, char
-
-
-def _add_types():
- for type_name, info in typeinfo.items():
- name = english_lower(type_name)
- if not isinstance(info, type):
- # define C-name and insert typenum and typechar references also
- allTypes[name] = info.type
- sctypeDict[name] = info.type
- sctypeDict[info.char] = info.type
- sctypeDict[info.num] = info.type
-
- else: # generic class
- allTypes[name] = info
-_add_types()
-
-def _add_aliases():
- for type_name, info in typeinfo.items():
- if isinstance(info, type):
- continue
- name = english_lower(type_name)
-
- # insert bit-width version for this class (if relevant)
- base, bit, char = bitname(info.type)
- if base[-3:] == 'int' or char[0] in 'ui':
- continue
- if base != '':
- myname = "%s%d" % (base, bit)
- if (name not in ('longdouble', 'clongdouble') or
- myname not in allTypes):
- base_capitalize = english_capitalize(base)
- if base == 'complex':
- na_name = '%s%d' % (base_capitalize, bit//2)
- elif base == 'bool':
- na_name = base_capitalize
- else:
- na_name = "%s%d" % (base_capitalize, bit)
-
- allTypes[myname] = info.type
-
- # add mapping for both the bit name and the numarray name
- sctypeDict[myname] = info.type
- sctypeDict[na_name] = info.type
-
- # add forward, reverse, and string mapping to numarray
- sctypeNA[na_name] = info.type
- sctypeNA[info.type] = na_name
- sctypeNA[info.char] = na_name
- if char != '':
- sctypeDict[char] = info.type
- sctypeNA[char] = na_name
-_add_aliases()
-
-# Integers are handled so that the int32 and int64 types should agree
-# exactly with NPY_INT32, NPY_INT64. We need to enforce the same checking
-# as is done in arrayobject.h where the order of getting a bit-width match
-# is long, longlong, int, short, char.
-def _add_integer_aliases():
- _ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
- for ctype in _ctypes:
- i_info = typeinfo[ctype]
- u_info = typeinfo['U'+ctype]
- bits = i_info.bits # same for both
-
- for info, charname, intname, Intname in [
- (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits),
- (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]:
- if intname not in allTypes.keys():
- allTypes[intname] = info.type
- sctypeDict[intname] = info.type
- sctypeDict[Intname] = info.type
- sctypeDict[charname] = info.type
- sctypeNA[Intname] = info.type
- sctypeNA[charname] = info.type
- sctypeNA[info.type] = Intname
- sctypeNA[info.char] = Intname
-_add_integer_aliases()
-
-# We use these later
-void = allTypes['void']
+# We use this later
generic = allTypes['generic']
-#
-# Rework the Python names (so that float and complex and int are consistent
-# with Python usage)
-#
-def _set_up_aliases():
- type_pairs = [('complex_', 'cdouble'),
- ('int0', 'intp'),
- ('uint0', 'uintp'),
- ('single', 'float'),
- ('csingle', 'cfloat'),
- ('singlecomplex', 'cfloat'),
- ('float_', 'double'),
- ('intc', 'int'),
- ('uintc', 'uint'),
- ('int_', 'long'),
- ('uint', 'ulong'),
- ('cfloat', 'cdouble'),
- ('longfloat', 'longdouble'),
- ('clongfloat', 'clongdouble'),
- ('longcomplex', 'clongdouble'),
- ('bool_', 'bool'),
- ('unicode_', 'unicode'),
- ('object_', 'object')]
- if sys.version_info[0] >= 3:
- type_pairs.extend([('bytes_', 'string'),
- ('str_', 'unicode'),
- ('string_', 'string')])
- else:
- type_pairs.extend([('str_', 'string'),
- ('string_', 'string'),
- ('bytes_', 'string')])
- for alias, t in type_pairs:
- allTypes[alias] = allTypes[t]
- sctypeDict[alias] = sctypeDict[t]
- # Remove aliases overriding python types and modules
- to_remove = ['ulong', 'object', 'unicode', 'int', 'long', 'float',
- 'complex', 'bool', 'string', 'datetime', 'timedelta']
- if sys.version_info[0] >= 3:
- # Py3K
- to_remove.append('bytes')
- to_remove.append('str')
- to_remove.remove('unicode')
- to_remove.remove('long')
- for t in to_remove:
- try:
- del allTypes[t]
- del sctypeDict[t]
- except KeyError:
- pass
-_set_up_aliases()
-
-# Now, construct dictionary to lookup character codes from types
-_sctype2char_dict = {}
-def _construct_char_code_lookup():
- for name, info in typeinfo.items():
- if not isinstance(info, type):
- if info.char not in ['p', 'P']:
- _sctype2char_dict[info.type] = info.char
-_construct_char_code_lookup()
-
-
-sctypes = {'int': [],
- 'uint':[],
- 'float':[],
- 'complex':[],
- 'others':[bool, object, bytes, unicode, void]}
-
-def _add_array_type(typename, bits):
- try:
- t = allTypes['%s%d' % (typename, bits)]
- except KeyError:
- pass
- else:
- sctypes[typename].append(t)
-
-def _set_array_types():
- ibytes = [1, 2, 4, 8, 16, 32, 64]
- fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
- for bytes in ibytes:
- bits = 8*bytes
- _add_array_type('int', bits)
- _add_array_type('uint', bits)
- for bytes in fbytes:
- bits = 8*bytes
- _add_array_type('float', bits)
- _add_array_type('complex', 2*bits)
- _gi = dtype('p')
- if _gi.type not in sctypes['int']:
- indx = 0
- sz = _gi.itemsize
- _lst = sctypes['int']
- while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
- indx += 1
- sctypes['int'].insert(indx, _gi.type)
- sctypes['uint'].insert(indx, dtype('P').type)
-_set_array_types()
-
-
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
@@ -510,14 +182,14 @@ def maximum_sctype(t):
if g is None:
return t
t = g
- name = t.__name__
- base, bits = _evalname(name)
- if bits == 0:
- return t
- else:
+ base = _kind_name(dtype(t))
+ if base in sctypes:
return sctypes[base][-1]
+ else:
+ return t
+@set_module('numpy')
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
@@ -562,6 +234,8 @@ def issctype(rep):
except Exception:
return False
+
+@set_module('numpy')
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
@@ -616,6 +290,7 @@ def obj2sctype(rep, default=None):
return res.type
+@set_module('numpy')
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
@@ -654,6 +329,8 @@ def issubclass_(arg1, arg2):
except TypeError:
return False
+
+@set_module('numpy')
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
@@ -684,6 +361,8 @@ def issubsctype(arg1, arg2):
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
+
+@set_module('numpy')
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
@@ -764,9 +443,7 @@ _alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
- for name, info in typeinfo.items():
- if isinstance(info, type):
- continue
+ for name, info in _concrete_typeinfo.items():
obj = info.type
nbytes[obj] = info.bits // 8
_alignment[obj] = info.alignment
@@ -779,6 +456,8 @@ def _construct_lookups():
_construct_lookups()
+
+@set_module('numpy')
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
@@ -824,13 +503,17 @@ def sctype2char(sctype):
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError("unrecognized type")
- return _sctype2char_dict[sctype]
+ if sctype not in _concrete_types:
+ # for compatibility
+ raise KeyError(sctype)
+ return dtype(sctype).char
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
-
-
cast = _typedict()
+for key in _concrete_types:
+ cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
+
try:
ScalarType = [_types.IntType, _types.FloatType, _types.ComplexType,
_types.LongType, _types.BooleanType,
@@ -839,41 +522,9 @@ except AttributeError:
# Py3K
ScalarType = [int, float, complex, int, bool, bytes, str, memoryview]
-ScalarType.extend(_sctype2char_dict.keys())
+ScalarType.extend(_concrete_types)
ScalarType = tuple(ScalarType)
-for key in _sctype2char_dict.keys():
- cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
-# Create the typestring lookup dictionary
-_typestr = _typedict()
-for key in _sctype2char_dict.keys():
- if issubclass(key, allTypes['flexible']):
- _typestr[key] = _sctype2char_dict[key]
- else:
- _typestr[key] = empty((1,), key).dtype.str[1:]
-
-# Make sure all typestrings are in sctypeDict
-for key, val in _typestr.items():
- if val not in sctypeDict:
- sctypeDict[val] = key
-
-# Add additional strings to the sctypeDict
-
-if sys.version_info[0] >= 3:
- _toadd = ['int', 'float', 'complex', 'bool', 'object',
- 'str', 'bytes', 'object', ('a', allTypes['bytes_'])]
-else:
- _toadd = ['int', 'float', 'complex', 'bool', 'object', 'string',
- ('str', allTypes['string_']),
- 'unicode', 'object', ('a', allTypes['string_'])]
-
-for name in _toadd:
- if isinstance(name, tuple):
- sctypeDict[name[0]] = name[1]
- else:
- sctypeDict[name] = allTypes['%s_' % name]
-
-del _toadd, name
# Now add the types we've determined to this module
for key in allTypes:
@@ -947,6 +598,8 @@ def _register_types():
_register_types()
+
+@set_module('numpy')
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
new file mode 100644
index 000000000..0979858a1
--- /dev/null
+++ b/numpy/core/overrides.py
@@ -0,0 +1,241 @@
+"""Preliminary implementation of NEP-18
+
+TODO: rewrite this in C for performance.
+"""
+import collections
+import functools
+import os
+
+from numpy.core._multiarray_umath import add_docstring, ndarray
+from numpy.compat._inspect import getargspec
+
+
+_NDARRAY_ARRAY_FUNCTION = ndarray.__array_function__
+_NDARRAY_ONLY = [ndarray]
+
+ENABLE_ARRAY_FUNCTION = bool(
+ int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 0)))
+
+
+def get_overloaded_types_and_args(relevant_args):
+ """Returns a list of arguments on which to call __array_function__.
+
+ Parameters
+ ----------
+ relevant_args : iterable of array-like
+ Iterable of array-like arguments to check for __array_function__
+ methods.
+
+ Returns
+ -------
+ overloaded_types : collection of types
+ Types of arguments from relevant_args with __array_function__ methods.
+ overloaded_args : list
+ Arguments from relevant_args on which to call __array_function__
+ methods, in the order in which they should be called.
+ """
+ # Runtime is O(num_arguments * num_unique_types)
+ overloaded_types = []
+ overloaded_args = []
+ for arg in relevant_args:
+ arg_type = type(arg)
+ # We only collect arguments if they have a unique type, which ensures
+ # reasonable performance even with a long list of possibly overloaded
+ # arguments.
+ if (arg_type not in overloaded_types and
+ hasattr(arg_type, '__array_function__')):
+
+ # Create lists explicitly for the first type (usually the only one
+ # done) to avoid setting up the iterator for overloaded_args.
+ if overloaded_types:
+ overloaded_types.append(arg_type)
+ # By default, insert argument at the end, but if it is
+ # subclass of another argument, insert it before that argument.
+ # This ensures "subclasses before superclasses".
+ index = len(overloaded_args)
+ for i, old_arg in enumerate(overloaded_args):
+ if issubclass(arg_type, type(old_arg)):
+ index = i
+ break
+ overloaded_args.insert(index, arg)
+ else:
+ overloaded_types = [arg_type]
+ overloaded_args = [arg]
+
+ return overloaded_types, overloaded_args
+
+
+def array_function_implementation_or_override(
+ implementation, public_api, relevant_args, args, kwargs):
+ """Implement a function with checks for __array_function__ overrides.
+
+ Arguments
+ ---------
+ implementation : function
+ Function that implements the operation on NumPy array without
+ overrides when called like ``implementation(*args, **kwargs)``.
+ public_api : function
+ Function exposed by NumPy's public API originally called like
+ ``public_api(*args, **kwargs)`` on which arguments are now being
+ checked.
+ relevant_args : iterable
+ Iterable of arguments to check for __array_function__ methods.
+ args : tuple
+ Arbitrary positional arguments originally passed into ``public_api``.
+ kwargs : tuple
+ Arbitrary keyword arguments originally passed into ``public_api``.
+
+ Returns
+ -------
+ Result from calling `implementation()` or an `__array_function__`
+ method, as appropriate.
+
+ Raises
+ ------
+ TypeError : if no implementation is found.
+ """
+ # Check for __array_function__ methods.
+ types, overloaded_args = get_overloaded_types_and_args(relevant_args)
+ # Short-cut for common cases: no overload or only ndarray overload
+ # (directly or with subclasses that do not override __array_function__).
+ if (not overloaded_args or types == _NDARRAY_ONLY or
+ all(type(arg).__array_function__ is _NDARRAY_ARRAY_FUNCTION
+ for arg in overloaded_args)):
+ return implementation(*args, **kwargs)
+
+ # Call overrides
+ for overloaded_arg in overloaded_args:
+ # Use `public_api` instead of `implemenation` so __array_function__
+ # implementations can do equality/identity comparisons.
+ result = overloaded_arg.__array_function__(
+ public_api, types, args, kwargs)
+
+ if result is not NotImplemented:
+ return result
+
+ func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
+ raise TypeError("no implementation found for '{}' on types that implement "
+ '__array_function__: {}'
+ .format(func_name, list(map(type, overloaded_args))))
+
+
+ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
+
+
+def verify_matching_signatures(implementation, dispatcher):
+ """Verify that a dispatcher function has the right signature."""
+ implementation_spec = ArgSpec(*getargspec(implementation))
+ dispatcher_spec = ArgSpec(*getargspec(dispatcher))
+
+ if (implementation_spec.args != dispatcher_spec.args or
+ implementation_spec.varargs != dispatcher_spec.varargs or
+ implementation_spec.keywords != dispatcher_spec.keywords or
+ (bool(implementation_spec.defaults) !=
+ bool(dispatcher_spec.defaults)) or
+ (implementation_spec.defaults is not None and
+ len(implementation_spec.defaults) !=
+ len(dispatcher_spec.defaults))):
+ raise RuntimeError('implementation and dispatcher for %s have '
+ 'different function signatures' % implementation)
+
+ if implementation_spec.defaults is not None:
+ if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):
+ raise RuntimeError('dispatcher functions can only use None for '
+ 'default argument values')
+
+
+def set_module(module):
+ """Decorator for overriding __module__ on a function or class.
+
+ Example usage::
+
+ @set_module('numpy')
+ def example():
+ pass
+
+ assert example.__module__ == 'numpy'
+ """
+ def decorator(func):
+ if module is not None:
+ func.__module__ = module
+ return func
+ return decorator
+
+
+def array_function_dispatch(dispatcher, module=None, verify=True,
+ docs_from_dispatcher=False):
+ """Decorator for adding dispatch with the __array_function__ protocol.
+
+ See NEP-18 for example usage.
+
+ Parameters
+ ----------
+ dispatcher : callable
+ Function that when called like ``dispatcher(*args, **kwargs)`` with
+ arguments from the NumPy function call returns an iterable of
+ array-like arguments to check for ``__array_function__``.
+ module : str, optional
+ __module__ attribute to set on new function, e.g., ``module='numpy'``.
+ By default, module is copied from the decorated function.
+ verify : bool, optional
+ If True, verify the that the signature of the dispatcher and decorated
+ function signatures match exactly: all required and optional arguments
+ should appear in order with the same names, but the default values for
+ all optional arguments should be ``None``. Only disable verification
+ if the dispatcher's signature needs to deviate for some particular
+ reason, e.g., because the function has a signature like
+ ``func(*args, **kwargs)``.
+ docs_from_dispatcher : bool, optional
+ If True, copy docs from the dispatcher function onto the dispatched
+ function, rather than from the implementation. This is useful for
+ functions defined in C, which otherwise don't have docstrings.
+
+ Returns
+ -------
+ Function suitable for decorating the implementation of a NumPy function.
+ """
+
+ if not ENABLE_ARRAY_FUNCTION:
+ # __array_function__ requires an explicit opt-in for now
+ def decorator(implementation):
+ if module is not None:
+ implementation.__module__ = module
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+ return implementation
+ return decorator
+
+ def decorator(implementation):
+ if verify:
+ verify_matching_signatures(implementation, dispatcher)
+
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+
+ @functools.wraps(implementation)
+ def public_api(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return array_function_implementation_or_override(
+ implementation, public_api, relevant_args, args, kwargs)
+
+ if module is not None:
+ public_api.__module__ = module
+
+ # TODO: remove this when we drop Python 2 support (functools.wraps
+ # adds __wrapped__ automatically in later versions)
+ public_api.__wrapped__ = implementation
+
+ return public_api
+
+ return decorator
+
+
+def array_function_from_dispatcher(
+ implementation, module=None, verify=True, docs_from_dispatcher=True):
+ """Like array_function_dispatcher, but with function arguments flipped."""
+
+ def decorator(dispatcher):
+ return array_function_dispatch(
+ dispatcher, module, verify=verify,
+ docs_from_dispatcher=docs_from_dispatcher)(implementation)
+ return decorator
diff --git a/numpy/core/records.py b/numpy/core/records.py
index a483871ba..86a43306a 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -42,7 +42,8 @@ import warnings
from . import numeric as sb
from . import numerictypes as nt
-from numpy.compat import isfileobj, bytes, long, unicode
+from numpy.compat import isfileobj, bytes, long, unicode, os_fspath
+from numpy.core.overrides import set_module
from .arrayprint import get_printoptions
# All of the functions allow formats to be a dtype
@@ -82,6 +83,8 @@ def find_duplicate(list):
dup.append(list[i])
return dup
+
+@set_module('numpy')
class format_parser(object):
"""
Class to convert formats, names, titles description to a dtype.
@@ -287,10 +290,8 @@ class record(nt.void):
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
- rows = []
fmt = '%% %ds: %%s' % maxlen
- for name in names:
- rows.append(fmt % (name, getattr(self, name)))
+ rows = [fmt % (name, getattr(self, name)) for name in names]
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
@@ -737,9 +738,9 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
- If file is a string then that file is opened, else it is assumed
- to be a file object. The file object must support random access
- (i.e. it must have tell and seek methods).
+ If file is a string or a path-like object then that file is opened,
+ else it is assumed to be a file object. The file object must
+ support random access (i.e. it must have tell and seek methods).
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
@@ -763,10 +764,14 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
elif isinstance(shape, (int, long)):
shape = (shape,)
- name = 0
- if isinstance(fd, str):
+ if isfileobj(fd):
+ # file already opened
+ name = 0
+ else:
+ # open file
+ fd = open(os_fspath(fd), 'rb')
name = 1
- fd = open(fd, 'rb')
+
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
@@ -778,13 +783,13 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
itemsize = descr.itemsize
- shapeprod = sb.array(shape).prod()
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
- shape[shape.index(-1)] = size / -shapesize
+ shape[shape.index(-1)] = size // -shapesize
shape = tuple(shape)
- shapeprod = sb.array(shape).prod()
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
nbytes = shapeprod * itemsize
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index f826b278f..467b590ac 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -4,7 +4,6 @@ import os
import sys
import pickle
import copy
-import sysconfig
import warnings
import platform
from os.path import join
@@ -153,7 +152,8 @@ def check_math_capabilities(config, moredefs, mathlibs):
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
- moredefs.append((fname2def(h).replace(".", "_"), 1))
+ h = h.replace(".", "_").replace(os.path.sep, "_")
+ moredefs.append((fname2def(h), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
@@ -379,8 +379,9 @@ def check_mathlib(config_cmd):
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
- if config.check_compiler_gcc4():
- return '__attribute__((visibility("hidden")))'
+ hide = '__attribute__((visibility("hidden")))'
+ if config.check_gcc_function_attribute(hide, 'hideme'):
+ return hide
else:
return ''
@@ -514,9 +515,9 @@ def configuration(parent_package='',top_path=None):
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
- # put private include directory in build_dir on search path
+ # put common include directory in build_dir on search path
# allows using code generation in headers headers
- config.add_include_dirs(join(build_dir, "src", "private"))
+ config.add_include_dirs(join(build_dir, "src", "common"))
config.add_include_dirs(join(build_dir, "src", "npymath"))
target = join(build_dir, header_dir, '_numpyconfig.h')
@@ -603,7 +604,7 @@ def configuration(parent_package='',top_path=None):
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
- config.add_include_dirs(join(local_dir, "src", "private"))
+ config.add_include_dirs(join(local_dir, "src", "common"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
@@ -677,7 +678,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')
]
-
+
# Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
is_msvc = platform.system() == 'Windows'
config.add_installed_library('npymath',
@@ -697,12 +698,13 @@ def configuration(parent_package='',top_path=None):
#######################################################################
# This library is created for the build but it is not installed
- npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
+ npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
+ join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
- join('src', 'private', 'npy_partition.h.src'),
+ join('src', 'common', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
- join('src', 'private', 'npy_binsearch.h.src'),
+ join('src', 'common', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
@@ -710,16 +712,71 @@ def configuration(parent_package='',top_path=None):
include_dirs=[])
#######################################################################
- # multiarray module #
+ # multiarray_tests module #
+ #######################################################################
+
+ config.add_extension('_multiarray_tests',
+ sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
+ join('src', 'common', 'mem_overlap.c')],
+ depends=[join('src', 'common', 'mem_overlap.h'),
+ join('src', 'common', 'npy_extint128.h')],
+ libraries=['npymath'])
+
+ #######################################################################
+ # _multiarray_umath module - common part #
+ #######################################################################
+
+ common_deps = [
+ join('src', 'common', 'array_assign.h'),
+ join('src', 'common', 'binop_override.h'),
+ join('src', 'common', 'cblasfuncs.h'),
+ join('src', 'common', 'lowlevel_strided_loops.h'),
+ join('src', 'common', 'mem_overlap.h'),
+ join('src', 'common', 'npy_cblas.h'),
+ join('src', 'common', 'npy_config.h'),
+ join('src', 'common', 'npy_ctypes.h'),
+ join('src', 'common', 'npy_extint128.h'),
+ join('src', 'common', 'npy_import.h'),
+ join('src', 'common', 'npy_longdouble.h'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'common', 'ucsnarrow.h'),
+ join('src', 'common', 'ufunc_override.h'),
+ join('src', 'common', 'umathmodule.h'),
+ join('src', 'common', 'numpyos.h'),
+ ]
+
+ common_src = [
+ join('src', 'common', 'array_assign.c'),
+ join('src', 'common', 'mem_overlap.c'),
+ join('src', 'common', 'npy_longdouble.c'),
+ join('src', 'common', 'templ_common.h.src'),
+ join('src', 'common', 'ucsnarrow.c'),
+ join('src', 'common', 'ufunc_override.c'),
+ join('src', 'common', 'numpyos.c'),
+ ]
+
+ blas_info = get_info('blas_opt', 0)
+ if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
+ extra_info = blas_info
+ # These files are also in MANIFEST.in so that they are always in
+ # the source distribution independently of HAVE_CBLAS.
+ common_src.extend([join('src', 'common', 'cblasfuncs.c'),
+ join('src', 'common', 'python_xerbla.c'),
+ ])
+ if uses_accelerate_framework(blas_info):
+ common_src.extend(get_sgemv_fix())
+ else:
+ extra_info = {}
+
+ #######################################################################
+ # _multiarray_umath module - multiarray part #
#######################################################################
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
- join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
- join('src', 'multiarray', 'cblasfuncs.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
@@ -735,24 +792,14 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'number.h'),
- join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'strfuncs.h'),
join('src', 'multiarray', 'typeinfo.h'),
- join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
- join('src', 'private', 'npy_config.h'),
- join('src', 'private', 'templ_common.h.src'),
- join('src', 'private', 'lowlevel_strided_loops.h'),
- join('src', 'private', 'mem_overlap.h'),
- join('src', 'private', 'npy_longdouble.h'),
- join('src', 'private', 'ufunc_override.h'),
- join('src', 'private', 'binop_override.h'),
- join('src', 'private', 'npy_extint128.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
@@ -778,7 +825,6 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
- join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
@@ -811,7 +857,6 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
- join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
@@ -821,40 +866,11 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'temp_elide.c'),
join('src', 'multiarray', 'typeinfo.c'),
join('src', 'multiarray', 'usertypes.c'),
- join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
- join('src', 'private', 'templ_common.h.src'),
- join('src', 'private', 'mem_overlap.c'),
- join('src', 'private', 'npy_longdouble.c'),
- join('src', 'private', 'ufunc_override.c'),
]
- blas_info = get_info('blas_opt', 0)
- if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
- extra_info = blas_info
- # These files are also in MANIFEST.in so that they are always in
- # the source distribution independently of HAVE_CBLAS.
- multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
- join('src', 'multiarray', 'python_xerbla.c'),
- ])
- if uses_accelerate_framework(blas_info):
- multiarray_src.extend(get_sgemv_fix())
- else:
- extra_info = {}
-
- config.add_extension('multiarray',
- sources=multiarray_src +
- [generate_config_h,
- generate_numpyconfig_h,
- generate_numpy_api,
- join(codegen_dir, 'generate_numpy_api.py'),
- join('*.py')],
- depends=deps + multiarray_deps,
- libraries=['npymath', 'npysort'],
- extra_info=extra_info)
-
#######################################################################
- # umath module #
+ # _multiarray_umath module - umath part #
#######################################################################
def generate_umath_c(ext, build_dir):
@@ -877,40 +893,43 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
+ join('src', 'umath', 'matmul.h.src'),
+ join('src', 'umath', 'matmul.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'extobj.c'),
join('src', 'umath', 'cpuid.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
- join('src', 'private', 'mem_overlap.c'),
- join('src', 'private', 'npy_longdouble.c'),
- join('src', 'private', 'ufunc_override.c')]
+ ]
umath_deps = [
generate_umath_py,
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('src', 'multiarray', 'common.h'),
- join('src', 'private', 'templ_common.h.src'),
+ join('src', 'multiarray', 'number.h'),
+ join('src', 'common', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'override.h'),
join(codegen_dir, 'generate_ufunc_api.py'),
- join('src', 'private', 'lowlevel_strided_loops.h'),
- join('src', 'private', 'mem_overlap.h'),
- join('src', 'private', 'npy_longdouble.h'),
- join('src', 'private', 'ufunc_override.h'),
- join('src', 'private', 'binop_override.h')] + npymath_sources
-
- config.add_extension('umath',
- sources=umath_src +
+ ]
+
+ config.add_extension('_multiarray_umath',
+ sources=multiarray_src + umath_src +
+ npymath_sources + common_src +
[generate_config_h,
- generate_numpyconfig_h,
- generate_umath_c,
- generate_ufunc_api],
- depends=deps + umath_deps,
- libraries=['npymath'],
- )
+ generate_numpyconfig_h,
+ generate_numpy_api,
+ join(codegen_dir, 'generate_numpy_api.py'),
+ join('*.py'),
+ generate_umath_c,
+ generate_ufunc_api,
+ ],
+ depends=deps + multiarray_deps + umath_deps +
+ common_deps,
+ libraries=['npymath', 'npysort'],
+ extra_info=extra_info)
#######################################################################
# umath_tests module #
@@ -933,16 +952,6 @@ def configuration(parent_package='',top_path=None):
config.add_extension('_struct_ufunc_tests',
sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])
- #######################################################################
- # multiarray_tests module #
- #######################################################################
-
- config.add_extension('_multiarray_tests',
- sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),
- join('src', 'private', 'mem_overlap.c')],
- depends=[join('src', 'private', 'mem_overlap.h'),
- join('src', 'private', 'npy_extint128.h')],
- libraries=['npymath'])
#######################################################################
# operand_flag_tests module #
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 356482b07..f837df112 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -41,7 +41,8 @@ C_ABI_VERSION = 0x01000009
# 0x0000000b - 1.13.x
# 0x0000000c - 1.14.x
# 0x0000000c - 1.15.x
-C_API_VERSION = 0x0000000c
+# 0x0000000d - 1.16.x
+C_API_VERSION = 0x0000000d
class MismatchCAPIWarning(Warning):
pass
@@ -110,7 +111,7 @@ OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
"rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
"copysign", "nextafter", "ftello", "fseeko",
"strtoll", "strtoull", "cbrt", "strtold_l", "fallocate",
- "backtrace"]
+ "backtrace", "madvise"]
OPTIONAL_HEADERS = [
@@ -120,6 +121,7 @@ OPTIONAL_HEADERS = [
"features.h", # for glibc version linux
"xlocale.h", # see GH#8367
"dlfcn.h", # dladdr
+ "sys/mman.h", #madvise
]
# optional gcc compiler builtins and their call arguments and optional a
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 319c25088..a529d2ad7 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -3,11 +3,26 @@ from __future__ import division, absolute_import, print_function
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
+import functools
+import operator
+import types
+import warnings
from . import numeric as _nx
+from . import overrides
from .numeric import array, asanyarray, newaxis
from .multiarray import normalize_axis_index
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _atleast_1d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
@@ -60,6 +75,12 @@ def atleast_1d(*arys):
else:
return res
+
+def _atleast_2d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
@@ -112,6 +133,12 @@ def atleast_2d(*arys):
else:
return res
+
+def _atleast_3d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
@@ -179,6 +206,27 @@ def atleast_3d(*arys):
return res
+def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
+ if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
+ warnings.warn('arrays to stack must be passed as a "sequence" type '
+ 'such as list or tuple. Support for non-sequence '
+ 'iterables such as generators is deprecated as of '
+ 'NumPy 1.16 and will raise an error in the future.',
+ FutureWarning, stacklevel=stacklevel)
+ return ()
+ return arrays
+
+
+def _warn_for_nonsequence(arrays):
+ if not overrides.ENABLE_ARRAY_FUNCTION:
+ _arrays_for_stack_dispatcher(arrays, stacklevel=4)
+
+
+def _vhstack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
@@ -231,8 +279,11 @@ def vstack(tup):
[4]])
"""
+ _warn_for_nonsequence(tup)
return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
+
+@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
@@ -280,6 +331,7 @@ def hstack(tup):
[3, 4]])
"""
+ _warn_for_nonsequence(tup)
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
@@ -288,6 +340,15 @@ def hstack(tup):
return _nx.concatenate(arrs, 1)
+def _stack_dispatcher(arrays, axis=None, out=None):
+ arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
+ for a in arrays:
+ yield a
+ if out is not None:
+ yield out
+
+
+@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
@@ -344,11 +405,12 @@ def stack(arrays, axis=0, out=None):
[3, 4]])
"""
+ _warn_for_nonsequence(arrays)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
- shapes = set(arr.shape for arr in arrays)
+ shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
@@ -360,6 +422,14 @@ def stack(arrays, axis=0, out=None):
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
+def _block_format_index(index):
+ """
+ Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
+ """
+ idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
+ return 'arrays' + idx_str
+
+
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
@@ -370,19 +440,27 @@ def _block_check_depths_match(arrays, parent_index=[]):
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
- The parameter `parent_index` is the full index of `arrays` within the
- nested lists passed to _block_check_depths_match at the top of the
- recursion.
- The return value is a pair. The first item returned is the full index
- of an element (specifically the first element) from the bottom of the
- nesting in `arrays`. An empty list at the bottom of the nesting is
- represented by a `None` index.
- The second item is the maximum of the ndims of the arrays nested in
- `arrays`.
+ Parameters
+ ----------
+ arrays : nested list of arrays
+ The arrays to check
+ parent_index : list of int
+ The full index of `arrays` within the nested lists passed to
+ `_block_check_depths_match` at the top of the recursion.
+
+ Returns
+ -------
+ first_index : list of int
+ The full index of an element from the bottom of the nesting in
+ `arrays`. If any element at the bottom is an empty list, this will
+ refer to it, and the last index along the empty axis will be `None`.
+ max_arr_ndim : int
+ The maximum of the ndims of the arrays nested in `arrays`.
+ final_size: int
+ The number of elements in the final array. This is used the motivate
+ the choice of algorithm used using benchmarking wisdom.
+
"""
- def format_index(index):
- idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
- return 'arrays' + idx_str
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
@@ -393,15 +471,16 @@ def _block_check_depths_match(arrays, parent_index=[]):
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
- format_index(parent_index)
+ _block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
- first_index, max_arr_ndim = next(idxs_ndims)
- for index, ndim in idxs_ndims:
+ first_index, max_arr_ndim, final_size = next(idxs_ndims)
+ for index, ndim, size in idxs_ndims:
+ final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
@@ -410,51 +489,183 @@ def _block_check_depths_match(arrays, parent_index=[]):
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
- format_index(index)
+ _block_format_index(index)
)
)
- return first_index, max_arr_ndim
+ # propagate our flag that indicates an empty list at the bottom
+ if index[-1] is None:
+ first_index = index
+
+ return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
- return parent_index + [None], 0
+ return parent_index + [None], 0, 0
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ size = _nx.size(arrays)
+ return parent_index, _nx.ndim(arrays), size
+
+
+def _atleast_nd(a, ndim):
+ # Ensures `a` has at least `ndim` dimensions by prepending
+ # ones to `a.shape` as necessary
+ return array(a, ndmin=ndim, copy=False, subok=True)
+
+
+def _accumulate(values):
+ # Helper function because Python 2.7 doesn't have
+ # itertools.accumulate
+ value = 0
+ accumulated = []
+ for v in values:
+ value += v
+ accumulated.append(value)
+ return accumulated
+
+
+def _concatenate_shapes(shapes, axis):
+ """Given array shapes, return the resulting shape and slices prefixes.
+
+ These help in nested concatation.
+ Returns
+ -------
+ shape: tuple of int
+ This tuple satisfies:
+ ```
+ shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
+ shape == concatenate(arrs, axis).shape
+ ```
+
+ slice_prefixes: tuple of (slice(start, end), )
+ For a list of arrays being concatenated, this returns the slice
+ in the larger array at axis that needs to be sliced into.
+
+ For example, the following holds:
+ ```
+ ret = concatenate([a, b, c], axis)
+ _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
+
+ ret[(slice(None),) * axis + sl_a] == a
+ ret[(slice(None),) * axis + sl_b] == b
+ ret[(slice(None),) * axis + sl_c] == c
+ ```
+
+ Thses are called slice prefixes since they are used in the recursive
+ blocking algorithm to compute the left-most slices during the
+ recursion. Therefore, they must be prepended to rest of the slice
+ that was computed deeper in the recusion.
+
+ These are returned as tuples to ensure that they can quickly be added
+ to existing slice tuple without creating a new tuple everytime.
+
+ """
+ # Cache a result that will be reused.
+ shape_at_axis = [shape[axis] for shape in shapes]
+
+ # Take a shape, any shape
+ first_shape = shapes[0]
+ first_shape_pre = first_shape[:axis]
+ first_shape_post = first_shape[axis+1:]
+
+ if any(shape[:axis] != first_shape_pre or
+ shape[axis+1:] != first_shape_post for shape in shapes):
+ raise ValueError(
+ 'Mismatched array shapes in block along axis {}.'.format(axis))
+
+ shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
+
+ offsets_at_axis = _accumulate(shape_at_axis)
+ slice_prefixes = [(slice(start, end),)
+ for start, end in zip([0] + offsets_at_axis,
+ offsets_at_axis)]
+ return shape, slice_prefixes
+
+
+def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
+ """
+ Returns the shape of the final array, along with a list
+ of slices and a list of arrays that can be used for assignment inside the
+ new array
+
+ Parameters
+ ----------
+ arrays : nested list of arrays
+ The arrays to check
+ max_depth : list of int
+ The number of nested lists
+ result_ndim: int
+ The number of dimensions in thefinal array.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape that the final array will take on.
+ slices: list of tuple of slices
+ The slices into the full array required for assignment. These are
+ required to be prepended with ``(Ellipsis, )`` to obtain to correct
+ final index.
+ arrays: list of ndarray
+ The data to assign to each slice of the full array
+
+ """
+ if depth < max_depth:
+ shapes, slices, arrays = zip(
+ *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
+ for arr in arrays])
+
+ axis = result_ndim - max_depth + depth
+ shape, slice_prefixes = _concatenate_shapes(shapes, axis)
+
+ # Prepend the slice prefix and flatten the slices
+ slices = [slice_prefix + the_slice
+ for slice_prefix, inner_slices in zip(slice_prefixes, slices)
+ for the_slice in inner_slices]
+
+ # Flatten the array list
+ arrays = functools.reduce(operator.add, arrays)
+
+ return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
- return parent_index, _nx.ndim(arrays)
+ # type(arrays) is not list
+ # Return the slice and the array inside a list to be consistent with
+ # the recursive case.
+ arr = _atleast_nd(arrays, result_ndim)
+ return arr.shape, [()], [arr]
-def _block(arrays, max_depth, result_ndim):
+def _block(arrays, max_depth, result_ndim, depth=0):
"""
- Internal implementation of block. `arrays` is the argument passed to
+ Internal implementation of block based on repeated concatenation.
+ `arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
- def atleast_nd(a, ndim):
- # Ensures `a` has at least `ndim` dimensions by prepending
- # ones to `a.shape` as necessary
- return array(a, ndmin=ndim, copy=False, subok=True)
-
- def block_recursion(arrays, depth=0):
- if depth < max_depth:
- if len(arrays) == 0:
- raise ValueError('Lists cannot be empty')
- arrs = [block_recursion(arr, depth+1) for arr in arrays]
- return _nx.concatenate(arrs, axis=-(max_depth-depth))
- else:
- # We've 'bottomed out' - arrays is either a scalar or an array
- # type(arrays) is not list
- return atleast_nd(arrays, result_ndim)
-
- try:
- return block_recursion(arrays)
- finally:
- # recursive closures have a cyclic reference to themselves, which
- # requires gc to collect (gh-10620). To avoid this problem, for
- # performance and PyPy friendliness, we break the cycle:
- block_recursion = None
+ if depth < max_depth:
+ arrs = [_block(arr, max_depth, result_ndim, depth+1)
+ for arr in arrays]
+ return _nx.concatenate(arrs, axis=-(max_depth-depth))
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ # type(arrays) is not list
+ return _atleast_nd(arrays, result_ndim)
+
+
+def _block_dispatcher(arrays):
+ # Use type(...) is list to match the behavior of np.block(), which special
+ # cases list specifically rather than allowing for generic iterables or
+ # tuple. Also, we know that list.__array_function__ will never exist.
+ if type(arrays) is list:
+ for subarrays in arrays:
+ for subarray in _block_dispatcher(subarrays):
+ yield subarray
+ else:
+ yield arrays
+@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
@@ -603,6 +814,74 @@ def block(arrays):
"""
- bottom_index, arr_ndim = _block_check_depths_match(arrays)
+ arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
+
+ # It was found through benchmarking that making an array of final size
+ # around 256x256 was faster by straight concatenation on a
+ # i7-7700HQ processor and dual channel ram 2400MHz.
+ # It didn't seem to matter heavily on the dtype used.
+ #
+ # A 2D array using repeated concatenation requires 2 copies of the array.
+ #
+ # The fastest algorithm will depend on the ratio of CPU power to memory
+ # speed.
+ # One can monitor the results of the benchmark
+ # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
+ # to tune this parameter until a C version of the `_block_info_recursion`
+ # algorithm is implemented which would likely be faster than the python
+ # version.
+ if list_ndim * final_size > (2 * 512 * 512):
+ return _block_slicing(arrays, list_ndim, result_ndim)
+ else:
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+
+# Theses helper functions are mostly used for testing.
+# They allow us to write tests that directly call `_block_slicing`
+# or `_block_concatenate` wtihout blocking large arrays to forse the wisdom
+# to trigger the desired path.
+def _block_setup(arrays):
+ """
+ Returns
+ (`arrays`, list_ndim, result_ndim, final_size)
+ """
+ bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
- return _block(arrays, list_ndim, max(arr_ndim, list_ndim))
+ if bottom_index and bottom_index[-1] is None:
+ raise ValueError(
+ 'List at {} cannot be empty'.format(
+ _block_format_index(bottom_index)
+ )
+ )
+ result_ndim = max(arr_ndim, list_ndim)
+ return arrays, list_ndim, result_ndim, final_size
+
+
+def _block_slicing(arrays, list_ndim, result_ndim):
+ shape, slices, arrays = _block_info_recursion(
+ arrays, list_ndim, result_ndim)
+ dtype = _nx.result_type(*[arr.dtype for arr in arrays])
+
+ # Test preferring F only in the case that all input arrays are F
+ F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
+ C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
+ order = 'F' if F_order and not C_order else 'C'
+ result = _nx.empty(shape=shape, dtype=dtype, order=order)
+ # Note: In a c implementation, the function
+ # PyArray_CreateMultiSortedStridePerm could be used for more advanced
+ # guessing of the desired order.
+
+ for the_slice, arr in zip(slices, arrays):
+ result[(Ellipsis,) + the_slice] = arr
+ return result
+
+
+def _block_concatenate(arrays, list_ndim, result_ndim):
+ result = _block(arrays, list_ndim, result_ndim)
+ if list_ndim == 0:
+ # Catch an edge case where _block returns a view because
+ # `arrays` is a single numpy array and not a list of numpy arrays.
+ # This might copy scalars or lists twice, but this isn't a likely
+ # usecase for those interested in performance
+ result = result.copy()
+ return result
diff --git a/numpy/core/src/multiarray/array_assign.c b/numpy/core/src/common/array_assign.c
index a48e245d8..02a423e3a 100644
--- a/numpy/core/src/multiarray/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -84,23 +84,73 @@ broadcast_error: {
/* See array_assign.h for parameter documentation */
NPY_NO_EXPORT int
-raw_array_is_aligned(int ndim, char *data, npy_intp *strides, int alignment)
+raw_array_is_aligned(int ndim, npy_intp *shape,
+ char *data, npy_intp *strides, int alignment)
{
- if (alignment > 1) {
- npy_intp align_check = (npy_intp)data;
- int idim;
- for (idim = 0; idim < ndim; ++idim) {
- align_check |= strides[idim];
+ /*
+ * The code below expects the following:
+ * * that alignment is a power of two, as required by the C standard.
+ * * that casting from pointer to uintp gives a sensible representation
+ * we can use bitwise operations on (perhaps *not* req. by C std,
+ * but assumed by glibc so it should be fine)
+ * * that casting stride from intp to uintp (to avoid dependence on the
+ * signed int representation) preserves remainder wrt alignment, so
+ * stride%a is the same as ((unsigned intp)stride)%a. Req. by C std.
+ *
+ * The code checks whether the lowest log2(alignment) bits of `data`
+ * and all `strides` are 0, as this implies that
+ * (data + n*stride)%alignment == 0 for all integers n.
+ */
+
+ if (alignment > 1) {
+ npy_uintp align_check = (npy_uintp)data;
+ int i;
+
+ for (i = 0; i < ndim; i++) {
+#if NPY_RELAXED_STRIDES_CHECKING
+ /* skip dim == 1 as it is not required to have stride 0 */
+ if (shape[i] > 1) {
+ /* if shape[i] == 1, the stride is never used */
+ align_check |= (npy_uintp)strides[i];
+ }
+ else if (shape[i] == 0) {
+ /* an array with zero elements is always aligned */
+ return 1;
+ }
+#else /* not NPY_RELAXED_STRIDES_CHECKING */
+ align_check |= (npy_uintp)strides[i];
+#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
return npy_is_aligned((void *)align_check, alignment);
}
- else {
+ else if (alignment == 1) {
return 1;
}
+ else {
+ /* always return false for alignment == 0, which means cannot-be-aligned */
+ return 0;
+ }
}
+NPY_NO_EXPORT int
+IsAligned(PyArrayObject *ap)
+{
+ return raw_array_is_aligned(PyArray_NDIM(ap), PyArray_DIMS(ap),
+ PyArray_DATA(ap), PyArray_STRIDES(ap),
+ PyArray_DESCR(ap)->alignment);
+}
+
+NPY_NO_EXPORT int
+IsUintAligned(PyArrayObject *ap)
+{
+ return raw_array_is_aligned(PyArray_NDIM(ap), PyArray_DIMS(ap),
+ PyArray_DATA(ap), PyArray_STRIDES(ap),
+ npy_uint_alignment(PyArray_DESCR(ap)->elsize));
+}
+
+
/* Returns 1 if the arrays have overlapping data, 0 otherwise */
NPY_NO_EXPORT int
diff --git a/numpy/core/src/multiarray/array_assign.h b/numpy/core/src/common/array_assign.h
index 3fecff007..69ef56bb4 100644
--- a/numpy/core/src/multiarray/array_assign.h
+++ b/numpy/core/src/common/array_assign.h
@@ -87,10 +87,28 @@ broadcast_strides(int ndim, npy_intp *shape,
/*
* Checks whether a data pointer + set of strides refers to a raw
- * array which is fully aligned data.
+ * array whose elements are all aligned to a given alignment. Returns
+ * 1 if data is aligned to alignment or 0 if not.
+ * alignment should be a power of two, or may be the sentinel value 0 to mean
+ * cannot-be-aligned, in which case 0 (false) is always returned.
*/
NPY_NO_EXPORT int
-raw_array_is_aligned(int ndim, char *data, npy_intp *strides, int alignment);
+raw_array_is_aligned(int ndim, npy_intp *shape,
+ char *data, npy_intp *strides, int alignment);
+
+/*
+ * Checks if an array is aligned to its "true alignment"
+ * given by dtype->alignment.
+ */
+NPY_NO_EXPORT int
+IsAligned(PyArrayObject *ap);
+
+/*
+ * Checks if an array is aligned to its "uint alignment"
+ * given by npy_uint_alignment(dtype->elsize).
+ */
+NPY_NO_EXPORT int
+IsUintAligned(PyArrayObject *ap);
/* Returns 1 if the arrays have overlapping data, 0 otherwise */
NPY_NO_EXPORT int
diff --git a/numpy/core/src/private/binop_override.h b/numpy/core/src/common/binop_override.h
index 47df63e38..47df63e38 100644
--- a/numpy/core/src/private/binop_override.h
+++ b/numpy/core/src/common/binop_override.h
diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/common/cblasfuncs.c
index c941bb29b..39572fed4 100644
--- a/numpy/core/src/multiarray/cblasfuncs.c
+++ b/numpy/core/src/common/cblasfuncs.c
@@ -12,32 +12,6 @@
#include "npy_cblas.h"
#include "arraytypes.h"
#include "common.h"
-#include "mem_overlap.h"
-
-
-/*
- * Helper: call appropriate BLAS dot function for typenum.
- * Strides are NumPy strides.
- */
-static void
-blas_dot(int typenum, npy_intp n,
- void *a, npy_intp stridea, void *b, npy_intp strideb, void *res)
-{
- switch (typenum) {
- case NPY_DOUBLE:
- DOUBLE_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- case NPY_FLOAT:
- FLOAT_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- case NPY_CDOUBLE:
- CDOUBLE_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- case NPY_CFLOAT:
- CFLOAT_dot(a, stridea, b, strideb, res, n, NULL);
- break;
- }
-}
static const double oneD[2] = {1.0, 0.0}, zeroD[2] = {0.0, 0.0};
@@ -208,12 +182,13 @@ _select_matrix_shape(PyArrayObject *array)
* This also makes sure that the data segment is aligned with
* an itemsize address as well by returning one if not true.
*/
-static int
+NPY_NO_EXPORT int
_bad_strides(PyArrayObject *ap)
{
int itemsize = PyArray_ITEMSIZE(ap);
int i, N=PyArray_NDIM(ap);
npy_intp *strides = PyArray_STRIDES(ap);
+ npy_intp *dims = PyArray_DIMS(ap);
if (((npy_intp)(PyArray_DATA(ap)) % itemsize) != 0) {
return 1;
@@ -222,6 +197,9 @@ _bad_strides(PyArrayObject *ap)
if ((strides[i] < 0) || (strides[i] % itemsize) != 0) {
return 1;
}
+ if ((strides[i] == 0 && dims[i] > 1)) {
+ return 1;
+ }
}
return 0;
@@ -379,77 +357,9 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
}
}
- if (out != NULL) {
- int d;
-
- /* verify that out is usable */
- if (PyArray_NDIM(out) != nd ||
- PyArray_TYPE(out) != typenum ||
- !PyArray_ISCARRAY(out)) {
-
- PyErr_SetString(PyExc_ValueError,
- "output array is not acceptable (must have the right datatype, "
- "number of dimensions, and be a C-Array)");
- goto fail;
- }
- for (d = 0; d < nd; ++d) {
- if (dimensions[d] != PyArray_DIM(out, d)) {
- PyErr_SetString(PyExc_ValueError,
- "output array has wrong dimensions");
- goto fail;
- }
- }
-
- /* check for memory overlap */
- if (!(solve_may_share_memory(out, ap1, 1) == 0 &&
- solve_may_share_memory(out, ap2, 1) == 0)) {
- /* allocate temporary output array */
- out_buf = (PyArrayObject *)PyArray_NewLikeArray(out, NPY_CORDER,
- NULL, 0);
- if (out_buf == NULL) {
- goto fail;
- }
-
- /* set copy-back */
- Py_INCREF(out);
- if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) {
- Py_DECREF(out);
- goto fail;
- }
- }
- else {
- Py_INCREF(out);
- out_buf = out;
- }
- Py_INCREF(out);
- result = out;
- }
- else {
- double prior1, prior2;
- PyTypeObject *subtype;
- PyObject *tmp;
-
- /* Choose which subtype to return */
- if (Py_TYPE(ap1) != Py_TYPE(ap2)) {
- prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
- prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
- subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
- }
- else {
- prior1 = prior2 = 0.0;
- subtype = Py_TYPE(ap1);
- }
-
- tmp = (PyObject *)(prior2 > prior1 ? ap2 : ap1);
-
- out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
- typenum, NULL, NULL, 0, 0, tmp);
- if (out_buf == NULL) {
- goto fail;
- }
-
- Py_INCREF(out_buf);
- result = out_buf;
+ out_buf = new_array_for_sum(ap1, ap2, out, nd, dimensions, typenum, &result);
+ if (out_buf == NULL) {
+ goto fail;
}
numbytes = PyArray_NBYTES(out_buf);
@@ -617,10 +527,10 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
NPY_BEGIN_ALLOW_THREADS;
/* Dot product between two vectors -- Level 1 BLAS */
- blas_dot(typenum, l,
+ PyArray_DESCR(out_buf)->f->dotfunc(
PyArray_DATA(ap1), PyArray_STRIDE(ap1, (ap1shape == _row)),
PyArray_DATA(ap2), PyArray_STRIDE(ap2, 0),
- PyArray_DATA(out_buf));
+ PyArray_DATA(out_buf), l, NULL);
NPY_END_ALLOW_THREADS;
}
else if (ap1shape == _matrix && ap2shape != _matrix) {
diff --git a/numpy/core/src/multiarray/cblasfuncs.h b/numpy/core/src/common/cblasfuncs.h
index 66ce4ca5b..66ce4ca5b 100644
--- a/numpy/core/src/multiarray/cblasfuncs.h
+++ b/numpy/core/src/common/cblasfuncs.h
diff --git a/numpy/core/src/private/get_attr_string.h b/numpy/core/src/common/get_attr_string.h
index bec87c5ed..bec87c5ed 100644
--- a/numpy/core/src/private/get_attr_string.h
+++ b/numpy/core/src/common/get_attr_string.h
diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index f9c671f77..5f139cffb 100644
--- a/numpy/core/src/private/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -7,7 +7,9 @@
/*
* NOTE: This API should remain private for the time being, to allow
* for further refinement. I think the 'aligned' mechanism
- * needs changing, for example.
+ * needs changing, for example.
+ *
+ * Note: Updated in 2018 to distinguish "true" from "uint" alignment.
*/
/*
@@ -69,8 +71,9 @@ typedef void (PyArray_StridedBinaryOp)(char *dst, npy_intp dst_stride,
* strided memory. Returns NULL if there is a problem with the inputs.
*
* aligned:
- * Should be 1 if the src and dst pointers are always aligned,
- * 0 otherwise.
+ * Should be 1 if the src and dst pointers always point to
+ * locations at which a uint of equal size to dtype->elsize
+ * would be aligned, 0 otherwise.
* src_stride:
* Should be the src stride if it will always be the same,
* NPY_MAX_INTP otherwise.
@@ -165,8 +168,9 @@ PyArray_GetDTypeCopySwapFn(int aligned,
* function when the transfer function is no longer required.
*
* aligned:
- * Should be 1 if the src and dst pointers are always aligned,
- * 0 otherwise.
+ * Should be 1 if the src and dst pointers always point to
+ * locations at which a uint of equal size to dtype->elsize
+ * would be aligned, 0 otherwise.
* src_stride:
* Should be the src stride if it will always be the same,
* NPY_MAX_INTP otherwise.
diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/common/mem_overlap.c
index 21db1893b..21db1893b 100644
--- a/numpy/core/src/private/mem_overlap.c
+++ b/numpy/core/src/common/mem_overlap.c
diff --git a/numpy/core/src/private/mem_overlap.h b/numpy/core/src/common/mem_overlap.h
index 8044f1663..8044f1663 100644
--- a/numpy/core/src/private/mem_overlap.h
+++ b/numpy/core/src/common/mem_overlap.h
diff --git a/numpy/core/src/private/npy_binsearch.h.src b/numpy/core/src/common/npy_binsearch.h.src
index ce3b34b0e..ce3b34b0e 100644
--- a/numpy/core/src/private/npy_binsearch.h.src
+++ b/numpy/core/src/common/npy_binsearch.h.src
diff --git a/numpy/core/src/private/npy_cblas.h b/numpy/core/src/common/npy_cblas.h
index a083f3bcc..a083f3bcc 100644
--- a/numpy/core/src/private/npy_cblas.h
+++ b/numpy/core/src/common/npy_cblas.h
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/common/npy_config.h
index eabcf99da..eedfbe364 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/common/npy_config.h
@@ -6,21 +6,6 @@
#include "numpy/npy_cpu.h"
#include "numpy/npy_os.h"
-/*
- * largest alignment the copy loops might require
- * required as string, void and complex types might get copied using larger
- * instructions than required to operate on them. E.g. complex float is copied
- * in 8 byte moves but arithmetic on them only loads in 4 byte moves.
- * the sparc platform may need that alignment for long doubles.
- * amd64 is not harmed much by the bloat as the system provides 16 byte
- * alignment by default.
- */
-#if (defined NPY_CPU_X86 || defined _WIN32)
-#define NPY_MAX_COPY_ALIGNMENT 8
-#else
-#define NPY_MAX_COPY_ALIGNMENT 16
-#endif
-
/* blacklist */
/* Disable broken Sun Workshop Pro math functions */
@@ -59,7 +44,6 @@
#undef HAVE_CATANH
#undef HAVE_CATANHF
#undef HAVE_CATANHL
-
#undef HAVE_CSQRT
#undef HAVE_CSQRTF
#undef HAVE_CSQRTL
diff --git a/numpy/core/src/common/npy_ctypes.h b/numpy/core/src/common/npy_ctypes.h
new file mode 100644
index 000000000..f26db9e05
--- /dev/null
+++ b/numpy/core/src/common/npy_ctypes.h
@@ -0,0 +1,49 @@
+#ifndef NPY_CTYPES_H
+#define NPY_CTYPES_H
+
+#include <Python.h>
+
+#include "npy_import.h"
+
+/*
+ * Check if a python type is a ctypes class.
+ *
+ * Works like the Py<type>_Check functions, returning true if the argument
+ * looks like a ctypes object.
+ *
+ * This entire function is just a wrapper around the Python function of the
+ * same name.
+ */
+NPY_INLINE static int
+npy_ctypes_check(PyTypeObject *obj)
+{
+ static PyObject *py_func = NULL;
+ PyObject *ret_obj;
+ int ret;
+
+ npy_cache_import("numpy.core._internal", "npy_ctypes_check", &py_func);
+ if (py_func == NULL) {
+ goto fail;
+ }
+
+ ret_obj = PyObject_CallFunctionObjArgs(py_func, (PyObject *)obj, NULL);
+ if (ret_obj == NULL) {
+ goto fail;
+ }
+
+ ret = PyObject_IsTrue(ret_obj);
+ if (ret == -1) {
+ goto fail;
+ }
+
+ return ret;
+
+fail:
+ /* If the above fails, then we should just assume that the type is not from
+ * ctypes
+ */
+ PyErr_Clear();
+ return 0;
+}
+
+#endif
diff --git a/numpy/core/src/private/npy_extint128.h b/numpy/core/src/common/npy_extint128.h
index a887ff317..a887ff317 100644
--- a/numpy/core/src/private/npy_extint128.h
+++ b/numpy/core/src/common/npy_extint128.h
diff --git a/numpy/core/src/private/npy_fpmath.h b/numpy/core/src/common/npy_fpmath.h
index dbb3fb23d..dbb3fb23d 100644
--- a/numpy/core/src/private/npy_fpmath.h
+++ b/numpy/core/src/common/npy_fpmath.h
diff --git a/numpy/core/src/private/npy_import.h b/numpy/core/src/common/npy_import.h
index 221e1e645..221e1e645 100644
--- a/numpy/core/src/private/npy_import.h
+++ b/numpy/core/src/common/npy_import.h
diff --git a/numpy/core/src/private/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c
index 508fbceac..561f4b825 100644
--- a/numpy/core/src/private/npy_longdouble.c
+++ b/numpy/core/src/common/npy_longdouble.c
@@ -1,17 +1,11 @@
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
#include "numpy/ndarraytypes.h"
#include "numpy/npy_math.h"
-
-/* This is a backport of Py_SETREF */
-#define NPY_SETREF(op, op2) \
- do { \
- PyObject *_py_tmp = (PyObject *)(op); \
- (op) = (op2); \
- Py_DECREF(_py_tmp); \
- } while (0)
-
+#include "npy_pycompat.h"
/*
* Heavily derived from PyLong_FromDouble
@@ -66,7 +60,7 @@ npy_longdouble_to_PyLong(npy_longdouble ldval)
npy_ulonglong chunk = (npy_ulonglong)frac;
PyObject *l_chunk;
/* v = v << chunk_size */
- NPY_SETREF(v, PyNumber_Lshift(v, l_chunk_size));
+ Py_SETREF(v, PyNumber_Lshift(v, l_chunk_size));
if (v == NULL) {
goto done;
}
@@ -77,7 +71,7 @@ npy_longdouble_to_PyLong(npy_longdouble ldval)
goto done;
}
/* v = v | chunk */
- NPY_SETREF(v, PyNumber_Or(v, l_chunk));
+ Py_SETREF(v, PyNumber_Or(v, l_chunk));
Py_DECREF(l_chunk);
if (v == NULL) {
goto done;
@@ -90,7 +84,7 @@ npy_longdouble_to_PyLong(npy_longdouble ldval)
/* v = -v */
if (neg) {
- NPY_SETREF(v, PyNumber_Negative(v));
+ Py_SETREF(v, PyNumber_Negative(v));
if (v == NULL) {
goto done;
}
diff --git a/numpy/core/src/private/npy_longdouble.h b/numpy/core/src/common/npy_longdouble.h
index 036b53070..036b53070 100644
--- a/numpy/core/src/private/npy_longdouble.h
+++ b/numpy/core/src/common/npy_longdouble.h
diff --git a/numpy/core/src/private/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src
index a22cf911c..a22cf911c 100644
--- a/numpy/core/src/private/npy_partition.h.src
+++ b/numpy/core/src/common/npy_partition.h.src
diff --git a/numpy/core/src/private/npy_pycompat.h b/numpy/core/src/common/npy_pycompat.h
index aa0b5c122..aa0b5c122 100644
--- a/numpy/core/src/private/npy_pycompat.h
+++ b/numpy/core/src/common/npy_pycompat.h
diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src
new file mode 100644
index 000000000..c31a82764
--- /dev/null
+++ b/numpy/core/src/common/npy_sort.h.src
@@ -0,0 +1,83 @@
+#ifndef __NPY_SORT_H__
+#define __NPY_SORT_H__
+
+/* Python include is for future object sorts */
+#include <Python.h>
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h>
+
+#define NPY_ENOMEM 1
+#define NPY_ECOMP 2
+
+static NPY_INLINE int npy_get_msb(npy_uintp unum)
+{
+ int depth_limit = 0;
+ while (unum >>= 1) {
+ depth_limit++;
+ }
+ return depth_limit;
+}
+
+
+/*
+ *****************************************************************************
+ ** NUMERIC SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble, datetime, timedelta#
+ */
+
+int quicksort_@suff@(void *vec, npy_intp cnt, void *null);
+int heapsort_@suff@(void *vec, npy_intp cnt, void *null);
+int mergesort_@suff@(void *vec, npy_intp cnt, void *null);
+int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+
+/**end repeat**/
+
+
+
+/*
+ *****************************************************************************
+ ** STRING SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #suff = string, unicode#
+ */
+
+int quicksort_@suff@(void *vec, npy_intp cnt, void *arr);
+int heapsort_@suff@(void *vec, npy_intp cnt, void *arr);
+int mergesort_@suff@(void *vec, npy_intp cnt, void *arr);
+int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+
+/**end repeat**/
+
+
+/*
+ *****************************************************************************
+ ** GENERIC SORT **
+ *****************************************************************************
+ */
+
+
+int npy_quicksort(void *vec, npy_intp cnt, void *arr);
+int npy_heapsort(void *vec, npy_intp cnt, void *arr);
+int npy_mergesort(void *vec, npy_intp cnt, void *arr);
+int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+
+#endif
diff --git a/numpy/core/src/multiarray/numpyos.c b/numpy/core/src/common/numpyos.c
index 52dcbf3c8..d60b1ca17 100644
--- a/numpy/core/src/multiarray/numpyos.c
+++ b/numpy/core/src/common/numpyos.c
@@ -769,3 +769,31 @@ NumPyOS_ascii_ftoLf(FILE *fp, long double *value)
}
return r;
}
+
+NPY_NO_EXPORT npy_longlong
+NumPyOS_strtoll(const char *str, char **endptr, int base)
+{
+#if defined HAVE_STRTOLL
+ return strtoll(str, endptr, base);
+#elif defined _MSC_VER
+ return _strtoi64(str, endptr, base);
+#else
+ /* ok on 64 bit posix */
+ return PyOS_strtol(str, endptr, base);
+#endif
+}
+
+NPY_NO_EXPORT npy_ulonglong
+NumPyOS_strtoull(const char *str, char **endptr, int base)
+{
+#if defined HAVE_STRTOULL
+ return strtoull(str, endptr, base);
+#elif defined _MSC_VER
+ return _strtoui64(str, endptr, base);
+#else
+ /* ok on 64 bit posix */
+ return PyOS_strtoul(str, endptr, base);
+#endif
+}
+
+
diff --git a/numpy/core/src/multiarray/numpyos.h b/numpy/core/src/common/numpyos.h
index 7ca795a6f..4deed8400 100644
--- a/numpy/core/src/multiarray/numpyos.h
+++ b/numpy/core/src/common/numpyos.h
@@ -31,4 +31,11 @@ NumPyOS_ascii_ftoLf(FILE *fp, long double *value);
NPY_NO_EXPORT int
NumPyOS_ascii_isspace(int c);
+/* Convert a string to an int in an arbitrary base */
+NPY_NO_EXPORT npy_longlong
+NumPyOS_strtoll(const char *str, char **endptr, int base);
+
+/* Convert a string to an int in an arbitrary base */
+NPY_NO_EXPORT npy_ulonglong
+NumPyOS_strtoull(const char *str, char **endptr, int base);
#endif
diff --git a/numpy/core/src/multiarray/python_xerbla.c b/numpy/core/src/common/python_xerbla.c
index bdf0b9058..bdf0b9058 100644
--- a/numpy/core/src/multiarray/python_xerbla.c
+++ b/numpy/core/src/common/python_xerbla.c
diff --git a/numpy/core/src/private/templ_common.h.src b/numpy/core/src/common/templ_common.h.src
index a65a00758..a65a00758 100644
--- a/numpy/core/src/private/templ_common.h.src
+++ b/numpy/core/src/common/templ_common.h.src
diff --git a/numpy/core/src/multiarray/ucsnarrow.c b/numpy/core/src/common/ucsnarrow.c
index 8e293e9f2..8e293e9f2 100644
--- a/numpy/core/src/multiarray/ucsnarrow.c
+++ b/numpy/core/src/common/ucsnarrow.c
diff --git a/numpy/core/src/multiarray/ucsnarrow.h b/numpy/core/src/common/ucsnarrow.h
index fe31a5e25..fe31a5e25 100644
--- a/numpy/core/src/multiarray/ucsnarrow.h
+++ b/numpy/core/src/common/ucsnarrow.h
diff --git a/numpy/core/src/common/ufunc_override.c b/numpy/core/src/common/ufunc_override.c
new file mode 100644
index 000000000..89f08a9cb
--- /dev/null
+++ b/numpy/core/src/common/ufunc_override.c
@@ -0,0 +1,121 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include "npy_pycompat.h"
+#include "get_attr_string.h"
+#include "npy_import.h"
+#include "ufunc_override.h"
+
+/*
+ * Check whether an object has __array_ufunc__ defined on its class and it
+ * is not the default, i.e., the object is not an ndarray, and its
+ * __array_ufunc__ is not the same as that of ndarray.
+ *
+ * Returns a new reference, the value of type(obj).__array_ufunc__ if it
+ * exists and is different from that of ndarray, and NULL otherwise.
+ */
+NPY_NO_EXPORT PyObject *
+PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj)
+{
+ static PyObject *ndarray_array_ufunc = NULL;
+ PyObject *cls_array_ufunc;
+
+ /* On first entry, cache ndarray's __array_ufunc__ */
+ if (ndarray_array_ufunc == NULL) {
+ ndarray_array_ufunc = PyObject_GetAttrString((PyObject *)&PyArray_Type,
+ "__array_ufunc__");
+ }
+
+ /* Fast return for ndarray */
+ if (PyArray_CheckExact(obj)) {
+ return NULL;
+ }
+ /*
+ * Does the class define __array_ufunc__? (Note that LookupSpecial has fast
+ * return for basic python types, so no need to worry about those here)
+ */
+ cls_array_ufunc = PyArray_LookupSpecial(obj, "__array_ufunc__");
+ if (cls_array_ufunc == NULL) {
+ return NULL;
+ }
+ /* Ignore if the same as ndarray.__array_ufunc__ */
+ if (cls_array_ufunc == ndarray_array_ufunc) {
+ Py_DECREF(cls_array_ufunc);
+ return NULL;
+ }
+ return cls_array_ufunc;
+}
+
+/*
+ * Check whether an object has __array_ufunc__ defined on its class and it
+ * is not the default, i.e., the object is not an ndarray, and its
+ * __array_ufunc__ is not the same as that of ndarray.
+ *
+ * Returns 1 if this is the case, 0 if not.
+ */
+
+NPY_NO_EXPORT int
+PyUFunc_HasOverride(PyObject * obj)
+{
+ PyObject *method = PyUFuncOverride_GetNonDefaultArrayUfunc(obj);
+ if (method) {
+ Py_DECREF(method);
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+/*
+ * Get possible out argument from kwds, and returns the number of outputs
+ * contained within it: if a tuple, the number of elements in it, 1 otherwise.
+ * The out argument itself is returned in out_kwd_obj, and the outputs
+ * in the out_obj array (as borrowed references).
+ *
+ * Returns 0 if no outputs found, -1 if kwds is not a dict (with an error set).
+ */
+NPY_NO_EXPORT int
+PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject ***out_objs)
+{
+ if (kwds == NULL) {
+ Py_INCREF(Py_None);
+ *out_kwd_obj = Py_None;
+ return 0;
+ }
+ if (!PyDict_CheckExact(kwds)) {
+ PyErr_SetString(PyExc_TypeError,
+ "Internal Numpy error: call to PyUFuncOverride_GetOutObjects "
+ "with non-dict kwds");
+ *out_kwd_obj = NULL;
+ return -1;
+ }
+ /* borrowed reference */
+ *out_kwd_obj = PyDict_GetItemString(kwds, "out");
+ if (*out_kwd_obj == NULL) {
+ Py_INCREF(Py_None);
+ *out_kwd_obj = Py_None;
+ return 0;
+ }
+ if (PyTuple_CheckExact(*out_kwd_obj)) {
+ /*
+ * The C-API recommends calling PySequence_Fast before any of the other
+ * PySequence_Fast* functions. This is required for PyPy
+ */
+ PyObject *seq;
+ seq = PySequence_Fast(*out_kwd_obj,
+ "Could not convert object to sequence");
+ if (seq == NULL) {
+ *out_kwd_obj = NULL;
+ return -1;
+ }
+ *out_objs = PySequence_Fast_ITEMS(seq);
+ *out_kwd_obj = seq;
+ return PySequence_Fast_GET_SIZE(seq);
+ }
+ else {
+ Py_INCREF(*out_kwd_obj);
+ *out_objs = out_kwd_obj;
+ return 1;
+ }
+}
diff --git a/numpy/core/src/common/ufunc_override.h b/numpy/core/src/common/ufunc_override.h
new file mode 100644
index 000000000..bf86865c9
--- /dev/null
+++ b/numpy/core/src/common/ufunc_override.h
@@ -0,0 +1,37 @@
+#ifndef __UFUNC_OVERRIDE_H
+#define __UFUNC_OVERRIDE_H
+
+#include "npy_config.h"
+
+/*
+ * Check whether an object has __array_ufunc__ defined on its class and it
+ * is not the default, i.e., the object is not an ndarray, and its
+ * __array_ufunc__ is not the same as that of ndarray.
+ *
+ * Returns a new reference, the value of type(obj).__array_ufunc__ if it
+ * exists and is different from that of ndarray, and NULL otherwise.
+ */
+NPY_NO_EXPORT PyObject *
+PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj);
+
+/*
+ * Check whether an object has __array_ufunc__ defined on its class and it
+ * is not the default, i.e., the object is not an ndarray, and its
+ * __array_ufunc__ is not the same as that of ndarray.
+ *
+ * Returns 1 if this is the case, 0 if not.
+ */
+NPY_NO_EXPORT int
+PyUFunc_HasOverride(PyObject *obj);
+
+/*
+ * Get possible out argument from kwds, and returns the number of outputs
+ * contained within it: if a tuple, the number of elements in it, 1 otherwise.
+ * The out argument itself is returned in out_kwd_obj, and the outputs
+ * in the out_obj array (as borrowed references).
+ *
+ * Returns 0 if no outputs found, -1 if kwds is not a dict (with an error set).
+ */
+NPY_NO_EXPORT int
+PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject ***out_objs);
+#endif
diff --git a/numpy/core/src/common/umathmodule.h b/numpy/core/src/common/umathmodule.h
new file mode 100644
index 000000000..6998596ee
--- /dev/null
+++ b/numpy/core/src/common/umathmodule.h
@@ -0,0 +1,8 @@
+#include "__umath_generated.c"
+#include "__ufunc_api.c"
+
+PyObject * add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args);
+PyObject * ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds));
+int initumath(PyObject *m);
+
+
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 67c9a333c..2a8275572 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -6,10 +6,18 @@
#include "numpy/arrayscalars.h"
#include "numpy/npy_math.h"
#include "numpy/halffloat.h"
+#include "common.h"
#include "mem_overlap.h"
#include "npy_extint128.h"
#include "common.h"
+
+#if defined(MS_WIN32) || defined(__CYGWIN__)
+#define EXPORT(x) __declspec(dllexport) x
+#else
+#define EXPORT(x) x
+#endif
+
#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
/* test PyArray_IsPythonScalar, before including private py3 compat header */
@@ -30,6 +38,12 @@ IsPythonScalar(PyObject * dummy, PyObject *args)
#include "npy_pycompat.h"
+/** Function to test calling via ctypes */
+EXPORT(void*) forward_pointer(void *x)
+{
+ return x;
+}
+
/*
* TODO:
* - Handle mode
@@ -1641,6 +1655,42 @@ extint_ceildiv_128_64(PyObject *NPY_UNUSED(self), PyObject *args) {
return pylong_from_int128(c);
}
+struct TestStruct1 {
+ npy_uint8 a;
+ npy_complex64 b;
+};
+
+struct TestStruct2 {
+ npy_uint32 a;
+ npy_complex64 b;
+};
+
+struct TestStruct3 {
+ npy_uint8 a;
+ struct TestStruct1 b;
+};
+
+static PyObject *
+get_struct_alignments(PyObject *NPY_UNUSED(self), PyObject *args) {
+ PyObject *ret = PyTuple_New(3);
+ PyObject *alignment, *size, *val;
+
+/**begin repeat
+ * #N = 1,2,3#
+ */
+ alignment = PyInt_FromLong(_ALIGN(struct TestStruct@N@));
+ size = PyInt_FromLong(sizeof(struct TestStruct@N@));
+ val = PyTuple_Pack(2, alignment, size);
+ Py_DECREF(alignment);
+ Py_DECREF(size);
+ if (val == NULL) {
+ return NULL;
+ }
+ PyTuple_SET_ITEM(ret, @N@-1, val);
+/**end repeat**/
+ return ret;
+}
+
static char get_fpu_mode_doc[] = (
"get_fpu_mode()\n"
@@ -1818,6 +1868,16 @@ printf_float_g(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
return PrintFloat_Printf_g(obj, precision);
}
+static PyObject *
+getset_numericops(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject * ops = PyArray_GetNumericOps();
+ if (ops == NULL) {
+ return NULL;
+ }
+ return PyLong_FromLong(PyArray_SetNumericOps(ops));
+}
+
static PyMethodDef Multiarray_TestsMethods[] = {
{"IsPythonScalar",
IsPythonScalar,
@@ -1926,6 +1986,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"get_fpu_mode",
get_fpu_mode,
METH_VARARGS, get_fpu_mode_doc},
+ {"getset_numericops",
+ getset_numericops,
+ METH_NOARGS, NULL},
/**begin repeat
* #name = cabs, carg#
*/
@@ -1956,6 +2019,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"format_float_OSprintf_g",
(PyCFunction)printf_float_g,
METH_VARARGS , NULL},
+ {"get_struct_alignments",
+ get_struct_alignments,
+ METH_VARARGS, NULL},
{NULL, NULL, 0, NULL} /* Sentinel */
};
@@ -2000,3 +2066,9 @@ init_multiarray_tests(void)
}
return RETVAL;
}
+
+NPY_NO_EXPORT int
+test_not_exported(void)
+{
+ return 1;
+}
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index fe957bf16..6755095d7 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -22,8 +22,16 @@
#include "npy_config.h"
#include "alloc.h"
+
#include <assert.h>
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#if defined MADV_HUGEPAGE && defined HAVE_MADVISE
+#define HAVE_MADV_HUGEPAGE
+#endif
+#endif
+
#define NBUCKETS 1024 /* number of buckets for data*/
#define NBUCKETS_DIM 16 /* number of buckets for dimensions/strides */
#define NCACHE 7 /* number of cache entries per bucket */
@@ -52,6 +60,7 @@ static NPY_INLINE void *
_npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
cache_bucket * cache, void * (*alloc)(size_t))
{
+ void * p;
assert((esz == 1 && cache == datacache) ||
(esz == sizeof(npy_intp) && cache == dimcache));
assert(NPY_CHECK_GIL_HELD());
@@ -60,19 +69,21 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
return cache[nelem].ptrs[--(cache[nelem].available)];
}
}
+ p = alloc(nelem * esz);
+ if (p) {
#ifdef _PyPyGC_AddMemoryPressure
- {
- size_t size = nelem * esz;
- void * ret = alloc(size);
- if (ret != NULL)
- {
- _PyPyPyGC_AddMemoryPressure(size);
+ _PyPyPyGC_AddMemoryPressure(nelem * esz);
+#endif
+#ifdef HAVE_MADV_HUGEPAGE
+ /* allow kernel allocating huge pages for large arrays */
+ if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u)))) {
+ npy_uintp offset = 4096u - (npy_uintp)p % (4096u);
+ npy_uintp length = nelem * esz - offset;
+ madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE);
}
- return ret;
- }
-#else
- return alloc(nelem * esz);
#endif
+ }
+ return p;
}
/*
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 74fbb88c2..b9c1e1be7 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -24,6 +24,38 @@
#include "array_assign.h"
+/* Check both uint and true alignment */
+NPY_NO_EXPORT int
+copycast_isaligned(int ndim, npy_intp *shape,
+ PyArray_Descr *dtype, char *data, npy_intp *strides)
+{
+ int aligned;
+ int big_aln, small_aln;
+
+ int uint_aln = npy_uint_alignment(dtype->elsize);
+ int true_aln = dtype->alignment;
+
+ /* uint alignment can be 0, meaning not uint alignable */
+ if (uint_aln == 0) {
+ return 0;
+ }
+
+ if (true_aln >= uint_aln) {
+ big_aln = true_aln;
+ small_aln = uint_aln;
+ }
+ else {
+ big_aln = uint_aln;
+ small_aln = true_aln;
+ }
+
+ aligned = raw_array_is_aligned(ndim, shape, data, strides, big_aln);
+ if (aligned && big_aln % small_aln != 0) {
+ aligned = raw_array_is_aligned(ndim, shape, data, strides, small_aln);
+ }
+ return aligned;
+}
+
/*
* Assigns the array from 'src' to 'dst'. The strides must already have
* been broadcast.
@@ -48,11 +80,9 @@ raw_array_assign_array(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
- aligned = raw_array_is_aligned(ndim,
- dst_data, dst_strides, dst_dtype->alignment) &&
- raw_array_is_aligned(ndim,
- src_data, src_strides, src_dtype->alignment);
+ aligned =
+ copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) &&
+ copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides);
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareTwoRawArrayIter(
@@ -133,11 +163,9 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
- aligned = raw_array_is_aligned(ndim,
- dst_data, dst_strides, dst_dtype->alignment) &&
- raw_array_is_aligned(ndim,
- src_data, src_strides, src_dtype->alignment);
+ aligned =
+ copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) &&
+ copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides);
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareThreeRawArrayIter(
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 17de99cb9..ecb5be47b 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -45,12 +45,13 @@ raw_array_assign_scalar(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
- aligned = raw_array_is_aligned(ndim, dst_data, dst_strides,
- dst_dtype->alignment);
- if (!npy_is_aligned(src_data, src_dtype->alignment)) {
- aligned = 0;
- }
+ /* Check both uint and true alignment */
+ aligned = raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
+ npy_uint_alignment(dst_dtype->elsize)) &&
+ raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
+ dst_dtype->alignment) &&
+ npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize) &&
+ npy_is_aligned(src_data, src_dtype->alignment));
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareOneRawArrayIter(
@@ -118,12 +119,13 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
- aligned = raw_array_is_aligned(ndim, dst_data, dst_strides,
- dst_dtype->alignment);
- if (!npy_is_aligned(src_data, src_dtype->alignment)) {
- aligned = 0;
- }
+ /* Check both uint and true alignment */
+ aligned = raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
+ npy_uint_alignment(dst_dtype->elsize)) &&
+ raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
+ dst_dtype->alignment) &&
+ npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize) &&
+ npy_is_aligned(src_data, src_dtype->alignment));
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareTwoRawArrayIter(
@@ -224,7 +226,8 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
* we also skip this if 'dst' has an object dtype.
*/
if ((!PyArray_EquivTypes(PyArray_DESCR(dst), src_dtype) ||
- !npy_is_aligned(src_data, src_dtype->alignment)) &&
+ !(npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize)) &&
+ npy_is_aligned(src_data, src_dtype->alignment))) &&
PyArray_SIZE(dst) > 1 &&
!PyDataType_REFCHK(PyArray_DESCR(dst))) {
char *tmp_src_data;
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 8ba3f5310..97aaee93d 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -656,11 +656,9 @@ array_might_be_written(PyArrayObject *obj)
{
const char *msg =
"Numpy has detected that you (may be) writing to an array returned\n"
- "by numpy.diagonal or by selecting multiple fields in a structured\n"
- "array. This code will likely break in a future numpy release --\n"
- "see numpy.diagonal or arrays.indexing reference docs for details.\n"
- "The quick fix is to make an explicit copy (e.g., do\n"
- "arr.diagonal().copy() or arr[['f0','f1']].copy()).";
+ "by numpy.diagonal. This code will likely break in a future numpy\n"
+ "release -- see numpy.diagonal docs for details. The quick fix is\n"
+ "to make an explicit copy (e.g., do arr.diagonal().copy()).";
if (PyArray_FLAGS(obj) & NPY_ARRAY_WARN_ON_WRITE) {
/* 2012-07-17, 1.7 */
if (DEPRECATE_FUTUREWARNING(msg) < 0) {
@@ -1218,38 +1216,8 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op)
}
}
-/* This is a copy of _PyErr_ChainExceptions, with:
- * - a minimal implementation for python 2
- * - __cause__ used instead of __context__
- */
-NPY_NO_EXPORT void
-PyArray_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)
-{
- if (exc == NULL)
- return;
-
- if (PyErr_Occurred()) {
- /* only py3 supports this anyway */
- #ifdef NPY_PY3K
- PyObject *exc2, *val2, *tb2;
- PyErr_Fetch(&exc2, &val2, &tb2);
- PyErr_NormalizeException(&exc, &val, &tb);
- if (tb != NULL) {
- PyException_SetTraceback(val, tb);
- Py_DECREF(tb);
- }
- Py_DECREF(exc);
- PyErr_NormalizeException(&exc2, &val2, &tb2);
- PyException_SetCause(val2, val);
- PyErr_Restore(exc2, val2, tb2);
- #endif
- }
- else {
- PyErr_Restore(exc, val, tb);
- }
-}
-
-/* Silence the current error and emit a deprecation warning instead.
+/*
+ * Silence the current error and emit a deprecation warning instead.
*
* If warnings are raised as errors, this sets the warning __cause__ to the
* silenced error.
@@ -1259,7 +1227,7 @@ DEPRECATE_silence_error(const char *msg) {
PyObject *exc, *val, *tb;
PyErr_Fetch(&exc, &val, &tb);
if (DEPRECATE(msg) < 0) {
- PyArray_ChainExceptionsCause(exc, val, tb);
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
return -1;
}
Py_XDECREF(exc);
@@ -1268,6 +1236,118 @@ DEPRECATE_silence_error(const char *msg) {
return 0;
}
+/*
+ * Comparisons can fail, but we do not always want to pass on the exception
+ * (see comment in array_richcompare below), but rather return NotImplemented.
+ * Here, an exception should be set on entrance.
+ * Returns either NotImplemented with the exception cleared, or NULL
+ * with the exception set.
+ * Raises deprecation warnings for cases where behaviour is meant to change
+ * (2015-05-14, 1.10)
+ */
+
+NPY_NO_EXPORT PyObject *
+_failed_comparison_workaround(PyArrayObject *self, PyObject *other, int cmp_op)
+{
+ PyObject *exc, *val, *tb;
+ PyArrayObject *array_other;
+ int other_is_flexible, ndim_other;
+ int self_is_flexible = PyTypeNum_ISFLEXIBLE(PyArray_DESCR(self)->type_num);
+
+ PyErr_Fetch(&exc, &val, &tb);
+ /*
+ * Determine whether other has a flexible dtype; here, inconvertible
+ * is counted as inflexible. (This repeats work done in the ufunc,
+ * but OK to waste some time in an unlikely path.)
+ */
+ array_other = (PyArrayObject *)PyArray_FROM_O(other);
+ if (array_other) {
+ other_is_flexible = PyTypeNum_ISFLEXIBLE(
+ PyArray_DESCR(array_other)->type_num);
+ ndim_other = PyArray_NDIM(array_other);
+ Py_DECREF(array_other);
+ }
+ else {
+ PyErr_Clear(); /* we restore the original error if needed */
+ other_is_flexible = 0;
+ ndim_other = 0;
+ }
+ if (cmp_op == Py_EQ || cmp_op == Py_NE) {
+ /*
+ * note: for == and !=, a structured dtype self cannot get here,
+ * but a string can. Other can be string or structured.
+ */
+ if (other_is_flexible || self_is_flexible) {
+ /*
+ * For scalars, returning NotImplemented is correct.
+ * For arrays, we emit a future deprecation warning.
+ * When this warning is removed, a correctly shaped
+ * array of bool should be returned.
+ */
+ if (ndim_other != 0 || PyArray_NDIM(self) != 0) {
+ /* 2015-05-14, 1.10 */
+ if (DEPRECATE_FUTUREWARNING(
+ "elementwise comparison failed; returning scalar "
+ "instead, but in the future will perform "
+ "elementwise comparison") < 0) {
+ goto fail;
+ }
+ }
+ }
+ else {
+ /*
+ * If neither self nor other had a flexible dtype, the error cannot
+ * have been caused by a lack of implementation in the ufunc.
+ *
+ * 2015-05-14, 1.10
+ */
+ if (DEPRECATE(
+ "elementwise comparison failed; "
+ "this will raise an error in the future.") < 0) {
+ goto fail;
+ }
+ }
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ else if (other_is_flexible || self_is_flexible) {
+ /*
+ * For LE, LT, GT, GE and a flexible self or other, we return
+ * NotImplemented, which is the correct answer since the ufuncs do
+ * not in fact implement loops for those. On python 3 this will
+ * get us the desired TypeError, but on python 2, one gets strange
+ * ordering, so we emit a warning.
+ */
+#if !defined(NPY_PY3K)
+ /* 2015-05-14, 1.10 */
+ if (DEPRECATE(
+ "unorderable dtypes; returning scalar but in "
+ "the future this will be an error") < 0) {
+ goto fail;
+ }
+#endif
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ else {
+ /* LE, LT, GT, or GE with non-flexible other; just pass on error */
+ goto fail;
+ }
+
+fail:
+ /*
+ * Reraise the original exception, possibly chaining with a new one.
+ */
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+ return NULL;
+}
+
NPY_NO_EXPORT PyObject *
array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
{
@@ -1365,26 +1445,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
result = PyArray_GenericBinaryFunction(self,
(PyObject *)other,
n_ops.equal);
- /*
- * If the comparison results in NULL, then the
- * two array objects can not be compared together;
- * indicate that
- */
- if (result == NULL) {
- /*
- * Comparisons should raise errors when element-wise comparison
- * is not possible.
- */
- /* 2015-05-14, 1.10 */
- if (DEPRECATE_silence_error(
- "elementwise == comparison failed; "
- "this will raise an error in the future.") < 0) {
- return NULL;
- }
-
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
break;
case Py_NE:
RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
@@ -1436,21 +1496,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
result = PyArray_GenericBinaryFunction(self, (PyObject *)other,
n_ops.not_equal);
- if (result == NULL) {
- /*
- * Comparisons should raise errors when element-wise comparison
- * is not possible.
- */
- /* 2015-05-14, 1.10 */
- if (DEPRECATE_silence_error(
- "elementwise != comparison failed; "
- "this will raise an error in the future.") < 0) {
- return NULL;
- }
-
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
break;
case Py_GT:
RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
@@ -1463,8 +1508,37 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
n_ops.greater_equal);
break;
default:
- result = Py_NotImplemented;
- Py_INCREF(result);
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ if (result == NULL) {
+ /*
+ * 2015-05-14, 1.10; updated 2018-06-18, 1.16.
+ *
+ * Comparisons can raise errors when element-wise comparison is not
+ * possible. Some of these, though, should not be passed on.
+ * In particular, the ufuncs do not have loops for flexible dtype,
+ * so those should be treated separately. Furthermore, for EQ and NE,
+ * we should never fail.
+ *
+ * Our ideal behaviour would be:
+ *
+ * 1. For EQ and NE:
+ * - If self and other are scalars, return NotImplemented,
+ * so that python can assign True of False as appropriate.
+ * - If either is an array, return an array of False or True.
+ *
+ * 2. For LT, LE, GE, GT:
+ * - If self or other was flexible, return NotImplemented
+ * (as is in fact the case), so python can raise a TypeError.
+ * - If other is not convertible to an array, pass on the error
+ * (MHvK, 2018-06-18: not sure about this, but it's what we have).
+ *
+ * However, for backwards compatibilty, we cannot yet return arrays,
+ * so we raise warnings instead. Furthermore, we warn on python2
+ * for LT, LE, GE, GT, since fall-back behaviour is poorly defined.
+ */
+ result = _failed_comparison_workaround(self, other, cmp_op);
}
return result;
}
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 553737a3a..823ee7115 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -150,32 +150,6 @@ MyPyLong_AsUnsigned@Type@ (PyObject *obj)
/**end repeat**/
-static npy_longlong
-npy_strtoll(const char *str, char **endptr, int base)
-{
-#if defined HAVE_STRTOLL
- return strtoll(str, endptr, base);
-#elif defined _MSC_VER
- return _strtoi64(str, endptr, base);
-#else
- /* ok on 64 bit posix */
- return PyOS_strtol(str, endptr, base);
-#endif
-}
-
-static npy_ulonglong
-npy_strtoull(const char *str, char **endptr, int base)
-{
-#if defined HAVE_STRTOULL
- return strtoull(str, endptr, base);
-#elif defined _MSC_VER
- return _strtoui64(str, endptr, base);
-#else
- /* ok on 64 bit posix */
- return PyOS_strtoul(str, endptr, base);
-#endif
-}
-
/*
*****************************************************************************
** GETITEM AND SETITEM **
@@ -1797,8 +1771,8 @@ BOOL_scan(FILE *fp, npy_bool *ip, void *NPY_UNUSED(ignore),
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_datetime, npy_timedelta#
- * #func = (PyOS_strtol, PyOS_strtoul)*4, npy_strtoll, npy_strtoull,
- * npy_strtoll*2#
+ * #func = (PyOS_strtol, PyOS_strtoul)*4, NumPyOS_strtoll, NumPyOS_strtoull,
+ * NumPyOS_strtoll*2#
* #btype = (npy_long, npy_ulong)*4, npy_longlong, npy_ulonglong,
* npy_longlong*2#
*/
@@ -3620,9 +3594,10 @@ OBJECT_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp
#define BOOL_fill NULL
/* this requires buffer to be filled with objects or NULL */
-static void
+static int
OBJECT_fill(PyObject **buffer, npy_intp length, void *NPY_UNUSED(ignored))
{
+ int retval = 0;
npy_intp i;
PyObject *start = buffer[0];
PyObject *delta = buffer[1];
@@ -3630,27 +3605,31 @@ OBJECT_fill(PyObject **buffer, npy_intp length, void *NPY_UNUSED(ignored))
delta = PyNumber_Subtract(delta, start);
if (!delta) {
- return;
+ return -1;
}
second = start = PyNumber_Add(start, delta);
if (!start) {
- goto finish;
+ goto error;
}
buffer += 2;
for (i = 2; i < length; i++, buffer++) {
start = PyNumber_Add(start, delta);
if (!start) {
- goto finish;
+ goto error;
}
Py_XDECREF(*buffer);
*buffer = start;
}
+ goto finish;
+
+error:
+ retval = -1;
finish:
Py_XDECREF(second);
Py_DECREF(delta);
- return;
+ return retval;
}
/**begin repeat
@@ -3664,7 +3643,7 @@ finish:
* npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
*/
-static void
+static int
@NAME@_fill(@type@ *buffer, npy_intp length, void *NPY_UNUSED(ignored))
{
npy_intp i;
@@ -3675,10 +3654,11 @@ static void
for (i = 2; i < length; ++i) {
buffer[i] = start + i*delta;
}
+ return 0;
}
/**end repeat**/
-static void
+static int
HALF_fill(npy_half *buffer, npy_intp length, void *NPY_UNUSED(ignored))
{
npy_intp i;
@@ -3689,6 +3669,7 @@ HALF_fill(npy_half *buffer, npy_intp length, void *NPY_UNUSED(ignored))
for (i = 2; i < length; ++i) {
buffer[i] = npy_float_to_half(start + i*delta);
}
+ return 0;
}
/**begin repeat
@@ -3696,7 +3677,7 @@ HALF_fill(npy_half *buffer, npy_intp length, void *NPY_UNUSED(ignored))
* #NAME = CFLOAT, CDOUBLE, CLONGDOUBLE#
* #type = npy_cfloat, npy_cdouble, npy_clongdouble#
*/
-static void
+static int
@NAME@_fill(@type@ *buffer, npy_intp length, void *NPY_UNUSED(ignore))
{
npy_intp i;
@@ -3714,6 +3695,7 @@ static void
buffer->real = start.real + i*delta.real;
buffer->imag = start.imag + i*delta.imag;
}
+ return 0;
}
/**end repeat**/
@@ -4193,6 +4175,53 @@ small_correlate(const char * d_, npy_intp dstride,
}
/*
+*/
+
+/* A clone function for the datetime dtype c_metadata */
+static NpyAuxData *
+_datetime_dtype_metadata_clone(NpyAuxData *data)
+{
+ PyArray_DatetimeDTypeMetaData *newdata =
+ (PyArray_DatetimeDTypeMetaData *)PyArray_malloc(
+ sizeof(*newdata));
+ if (newdata == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ memcpy(newdata, data, sizeof(*newdata));
+
+ return (NpyAuxData *)newdata;
+}
+
+/*
+ * Allcoate and initialize a PyArray_DatetimeDTypeMetaData object
+ */
+static NpyAuxData*
+_create_datetime_metadata(NPY_DATETIMEUNIT base, int num)
+{
+ PyArray_DatetimeDTypeMetaData *data;
+
+ /* Allocate memory for the metadata */
+ data = PyArray_malloc(sizeof(*data));
+ if (data == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ /* Initialize the base aux data */
+ memset(data, 0, sizeof(PyArray_DatetimeDTypeMetaData));
+ data->base.free = (NpyAuxData_FreeFunc *)PyArray_free;
+ data->base.clone = _datetime_dtype_metadata_clone;
+
+ data->meta.base = base;
+ data->meta.num = num;
+
+ return (NpyAuxData*)data;
+}
+
+
+/*
*****************************************************************************
** SETUP FUNCTION POINTERS **
*****************************************************************************
@@ -4327,12 +4356,11 @@ static PyArray_Descr @from@_Descr = {
* cfloat, cdouble, clongdouble,
* object, datetime, timedelta#
* #sort = 1*18, 0*1, 1*2#
- * #num = 1*15, 2*3, 1*3#
* #fromtype = npy_bool,
* npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_half, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble,
+ * npy_cfloat, npy_cdouble, npy_clongdouble,
* PyObject *, npy_datetime, npy_timedelta#
* #NAME = Bool,
* Byte, UByte, Short, UShort, Int, UInt,
@@ -4433,10 +4461,9 @@ NPY_NO_EXPORT PyArray_Descr @from@_Descr = {
/* type_num */
NPY_@from@,
/* elsize */
- @num@ * sizeof(@fromtype@),
+ sizeof(@fromtype@),
/* alignment */
- @num@ * _ALIGN(@fromtype@) > NPY_MAX_COPY_ALIGNMENT ?
- NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@fromtype@),
+ _ALIGN(@fromtype@),
/* subarray */
NULL,
/* fields */
@@ -4550,66 +4577,6 @@ PyArray_DescrFromType(int type)
return ret;
}
-/* A clone function for the datetime dtype metadata */
-static NpyAuxData *
-datetime_dtype_metadata_clone(NpyAuxData *data)
-{
- PyArray_DatetimeDTypeMetaData *newdata =
- (PyArray_DatetimeDTypeMetaData *)PyArray_malloc(
- sizeof(PyArray_DatetimeDTypeMetaData));
- if (newdata == NULL) {
- return NULL;
- }
-
- memcpy(newdata, data, sizeof(PyArray_DatetimeDTypeMetaData));
-
- return (NpyAuxData *)newdata;
-}
-
-/*
- * Initializes the c_metadata field for the _builtin_descrs DATETIME
- * and TIMEDELTA.
- *
- * must not be static, gcc 4.1.2 on redhat 5 then miscompiles this function
- * see gh-5163
- *
- */
-NPY_NO_EXPORT int
-initialize_builtin_datetime_metadata(void)
-{
- PyArray_DatetimeDTypeMetaData *data1, *data2;
-
- /* Allocate memory for the metadata */
- data1 = PyArray_malloc(sizeof(PyArray_DatetimeDTypeMetaData));
- if (data1 == NULL) {
- return -1;
- }
- data2 = PyArray_malloc(sizeof(PyArray_DatetimeDTypeMetaData));
- if (data2 == NULL) {
- PyArray_free(data1);
- return -1;
- }
-
- /* Initialize the base aux data */
- memset(data1, 0, sizeof(PyArray_DatetimeDTypeMetaData));
- memset(data2, 0, sizeof(PyArray_DatetimeDTypeMetaData));
- data1->base.free = (NpyAuxData_FreeFunc *)PyArray_free;
- data2->base.free = (NpyAuxData_FreeFunc *)PyArray_free;
- data1->base.clone = datetime_dtype_metadata_clone;
- data2->base.clone = datetime_dtype_metadata_clone;
-
- /* Set to the default metadata */
- data1->meta.base = NPY_DATETIME_DEFAULTUNIT;
- data1->meta.num = 1;
- data2->meta.base = NPY_DATETIME_DEFAULTUNIT;
- data2->meta.num = 1;
-
- _builtin_descrs[NPY_DATETIME]->c_metadata = (NpyAuxData *)data1;
- _builtin_descrs[NPY_TIMEDELTA]->c_metadata = (NpyAuxData *)data2;
-
- return 0;
-}
-
/*
*****************************************************************************
** SETUP TYPE INFO **
@@ -4678,7 +4645,14 @@ set_typeinfo(PyObject *dict)
/**end repeat**/
- if (initialize_builtin_datetime_metadata() < 0) {
+ _builtin_descrs[NPY_DATETIME]->c_metadata = _create_datetime_metadata(
+ NPY_DATETIME_DEFAULTUNIT, 1);
+ if (_builtin_descrs[NPY_DATETIME]->c_metadata == NULL) {
+ return -1;
+ }
+ _builtin_descrs[NPY_TIMEDELTA]->c_metadata = _create_datetime_metadata(
+ NPY_DATETIME_DEFAULTUNIT, 1);
+ if (_builtin_descrs[NPY_DATETIME]->c_metadata == NULL) {
return -1;
}
@@ -4791,13 +4765,10 @@ set_typeinfo(PyObject *dict)
* CFLOAT, CDOUBLE, CLONGDOUBLE#
* #Name = Half, Float, Double, LongDouble,
* CFloat, CDouble, CLongDouble#
- * #num = 1, 1, 1, 1, 2, 2, 2#
*/
s = PyArray_typeinfo(
NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@name@,
- @num@ * _ALIGN(@type@) > NPY_MAX_COPY_ALIGNMENT ?
- NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@type@),
- &Py@Name@ArrType_Type
+ _ALIGN(@type@), &Py@Name@ArrType_Type
);
if (s == NULL) {
return -1;
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 663930337..2f66d7f2f 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -134,7 +134,7 @@ static int
_append_str(_tmp_string_t *s, char const *p)
{
for (; *p != '\0'; p++) {
- if (_append_char(s, *p) != 0) {
+ if (_append_char(s, *p) < 0) {
return -1;
}
}
@@ -142,6 +142,53 @@ _append_str(_tmp_string_t *s, char const *p)
}
/*
+ * Append a PEP3118-formatted field name, ":name:", to str
+ */
+static int
+_append_field_name(_tmp_string_t *str, PyObject *name)
+{
+ int ret = -1;
+ char *p;
+ Py_ssize_t len;
+ PyObject *tmp;
+#if defined(NPY_PY3K)
+ /* FIXME: XXX -- should it use UTF-8 here? */
+ tmp = PyUnicode_AsUTF8String(name);
+#else
+ tmp = name;
+ Py_INCREF(tmp);
+#endif
+ if (tmp == NULL || PyBytes_AsStringAndSize(tmp, &p, &len) < 0) {
+ PyErr_Clear();
+ PyErr_SetString(PyExc_ValueError, "invalid field name");
+ goto fail;
+ }
+ if (_append_char(str, ':') < 0) {
+ goto fail;
+ }
+ while (len > 0) {
+ if (*p == ':') {
+ PyErr_SetString(PyExc_ValueError,
+ "':' is not an allowed character in buffer "
+ "field names");
+ goto fail;
+ }
+ if (_append_char(str, *p) < 0) {
+ goto fail;
+ }
+ ++p;
+ --len;
+ }
+ if (_append_char(str, ':') < 0) {
+ goto fail;
+ }
+ ret = 0;
+fail:
+ Py_XDECREF(tmp);
+ return ret;
+}
+
+/*
* Return non-zero if a type is aligned in each item in the given array,
* AND, the descr element size is a multiple of the alignment,
* AND, the array data is positioned to alignment granularity.
@@ -215,37 +262,49 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
subarray_tuple = Py_BuildValue("(O)", descr->subarray->shape);
}
- _append_char(str, '(');
+ if (_append_char(str, '(') < 0) {
+ ret = -1;
+ goto subarray_fail;
+ }
for (k = 0; k < PyTuple_GET_SIZE(subarray_tuple); ++k) {
if (k > 0) {
- _append_char(str, ',');
+ if (_append_char(str, ',') < 0) {
+ ret = -1;
+ goto subarray_fail;
+ }
}
item = PyTuple_GET_ITEM(subarray_tuple, k);
dim_size = PyNumber_AsSsize_t(item, NULL);
PyOS_snprintf(buf, sizeof(buf), "%ld", (long)dim_size);
- _append_str(str, buf);
+ if (_append_str(str, buf) < 0) {
+ ret = -1;
+ goto subarray_fail;
+ }
total_count *= dim_size;
}
- _append_char(str, ')');
-
- Py_DECREF(subarray_tuple);
+ if (_append_char(str, ')') < 0) {
+ ret = -1;
+ goto subarray_fail;
+ }
old_offset = *offset;
ret = _buffer_format_string(descr->subarray->base, str, obj, offset,
active_byteorder);
*offset = old_offset + (*offset - old_offset) * total_count;
+
+ subarray_fail:
+ Py_DECREF(subarray_tuple);
return ret;
}
else if (PyDataType_HASFIELDS(descr)) {
Py_ssize_t base_offset = *offset;
- _append_str(str, "T{");
+ if (_append_str(str, "T{") < 0) return -1;
for (k = 0; k < PyTuple_GET_SIZE(descr->names); ++k) {
- PyObject *name, *item, *offset_obj, *tmp;
+ PyObject *name, *item, *offset_obj;
PyArray_Descr *child;
- char *p;
- Py_ssize_t len, new_offset;
+ Py_ssize_t new_offset;
int ret;
name = PyTuple_GET_ITEM(descr->names, k);
@@ -269,7 +328,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
return -1;
}
while (*offset < new_offset) {
- _append_char(str, 'x');
+ if (_append_char(str, 'x') < 0) return -1;
++*offset;
}
@@ -281,36 +340,9 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
}
/* Insert field name */
-#if defined(NPY_PY3K)
- /* FIXME: XXX -- should it use UTF-8 here? */
- tmp = PyUnicode_AsUTF8String(name);
-#else
- tmp = name;
-#endif
- if (tmp == NULL || PyBytes_AsStringAndSize(tmp, &p, &len) < 0) {
- PyErr_Clear();
- PyErr_SetString(PyExc_ValueError, "invalid field name");
- return -1;
- }
- _append_char(str, ':');
- while (len > 0) {
- if (*p == ':') {
- Py_DECREF(tmp);
- PyErr_SetString(PyExc_ValueError,
- "':' is not an allowed character in buffer "
- "field names");
- return -1;
- }
- _append_char(str, *p);
- ++p;
- --len;
- }
- _append_char(str, ':');
-#if defined(NPY_PY3K)
- Py_DECREF(tmp);
-#endif
+ if (_append_field_name(str, name) < 0) return -1;
}
- _append_char(str, '}');
+ if (_append_char(str, '}') < 0) return -1;
}
else {
int is_standard_size = 1;
@@ -338,7 +370,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
/* Prefer native types, to cater for Cython */
is_standard_size = 0;
if (*active_byteorder != '@') {
- _append_char(str, '@');
+ if (_append_char(str, '@') < 0) return -1;
*active_byteorder = '@';
}
}
@@ -346,7 +378,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
/* Data types that have no standard size */
is_standard_size = 0;
if (*active_byteorder != '^') {
- _append_char(str, '^');
+ if (_append_char(str, '^') < 0) return -1;
*active_byteorder = '^';
}
}
@@ -354,7 +386,7 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
descr->byteorder == '=') {
is_standard_size = 1;
if (*active_byteorder != descr->byteorder) {
- _append_char(str, descr->byteorder);
+ if (_append_char(str, descr->byteorder) < 0) return -1;
*active_byteorder = descr->byteorder;
}
@@ -372,45 +404,45 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
}
switch (descr->type_num) {
- case NPY_BOOL: if (_append_char(str, '?')) return -1; break;
- case NPY_BYTE: if (_append_char(str, 'b')) return -1; break;
- case NPY_UBYTE: if (_append_char(str, 'B')) return -1; break;
- case NPY_SHORT: if (_append_char(str, 'h')) return -1; break;
- case NPY_USHORT: if (_append_char(str, 'H')) return -1; break;
- case NPY_INT: if (_append_char(str, 'i')) return -1; break;
- case NPY_UINT: if (_append_char(str, 'I')) return -1; break;
+ case NPY_BOOL: if (_append_char(str, '?') < 0) return -1; break;
+ case NPY_BYTE: if (_append_char(str, 'b') < 0) return -1; break;
+ case NPY_UBYTE: if (_append_char(str, 'B') < 0) return -1; break;
+ case NPY_SHORT: if (_append_char(str, 'h') < 0) return -1; break;
+ case NPY_USHORT: if (_append_char(str, 'H') < 0) return -1; break;
+ case NPY_INT: if (_append_char(str, 'i') < 0) return -1; break;
+ case NPY_UINT: if (_append_char(str, 'I') < 0) return -1; break;
case NPY_LONG:
if (is_standard_size && (NPY_SIZEOF_LONG == 8)) {
- if (_append_char(str, 'q')) return -1;
+ if (_append_char(str, 'q') < 0) return -1;
}
else {
- if (_append_char(str, 'l')) return -1;
+ if (_append_char(str, 'l') < 0) return -1;
}
break;
case NPY_ULONG:
if (is_standard_size && (NPY_SIZEOF_LONG == 8)) {
- if (_append_char(str, 'Q')) return -1;
+ if (_append_char(str, 'Q') < 0) return -1;
}
else {
- if (_append_char(str, 'L')) return -1;
+ if (_append_char(str, 'L') < 0) return -1;
}
break;
- case NPY_LONGLONG: if (_append_char(str, 'q')) return -1; break;
- case NPY_ULONGLONG: if (_append_char(str, 'Q')) return -1; break;
- case NPY_HALF: if (_append_char(str, 'e')) return -1; break;
- case NPY_FLOAT: if (_append_char(str, 'f')) return -1; break;
- case NPY_DOUBLE: if (_append_char(str, 'd')) return -1; break;
- case NPY_LONGDOUBLE: if (_append_char(str, 'g')) return -1; break;
- case NPY_CFLOAT: if (_append_str(str, "Zf")) return -1; break;
- case NPY_CDOUBLE: if (_append_str(str, "Zd")) return -1; break;
- case NPY_CLONGDOUBLE: if (_append_str(str, "Zg")) return -1; break;
+ case NPY_LONGLONG: if (_append_char(str, 'q') < 0) return -1; break;
+ case NPY_ULONGLONG: if (_append_char(str, 'Q') < 0) return -1; break;
+ case NPY_HALF: if (_append_char(str, 'e') < 0) return -1; break;
+ case NPY_FLOAT: if (_append_char(str, 'f') < 0) return -1; break;
+ case NPY_DOUBLE: if (_append_char(str, 'd') < 0) return -1; break;
+ case NPY_LONGDOUBLE: if (_append_char(str, 'g') < 0) return -1; break;
+ case NPY_CFLOAT: if (_append_str(str, "Zf") < 0) return -1; break;
+ case NPY_CDOUBLE: if (_append_str(str, "Zd") < 0) return -1; break;
+ case NPY_CLONGDOUBLE: if (_append_str(str, "Zg") < 0) return -1; break;
/* XXX NPY_DATETIME */
/* XXX NPY_TIMEDELTA */
- case NPY_OBJECT: if (_append_char(str, 'O')) return -1; break;
+ case NPY_OBJECT: if (_append_char(str, 'O') < 0) return -1; break;
case NPY_STRING: {
char buf[128];
PyOS_snprintf(buf, sizeof(buf), "%ds", descr->elsize);
- if (_append_str(str, buf)) return -1;
+ if (_append_str(str, buf) < 0) return -1;
break;
}
case NPY_UNICODE: {
@@ -418,14 +450,14 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
char buf[128];
assert(descr->elsize % 4 == 0);
PyOS_snprintf(buf, sizeof(buf), "%dw", descr->elsize / 4);
- if (_append_str(str, buf)) return -1;
+ if (_append_str(str, buf) < 0) return -1;
break;
}
case NPY_VOID: {
/* Insert padding bytes */
char buf[128];
PyOS_snprintf(buf, sizeof(buf), "%dx", descr->elsize);
- if (_append_str(str, buf)) return -1;
+ if (_append_str(str, buf) < 0) return -1;
break;
}
default:
@@ -491,8 +523,12 @@ _buffer_info_new(PyObject *obj)
* fields will not hit this code path and are currently unsupported
* in _buffer_format_string.
*/
- _append_char(&fmt, 'B');
- _append_char(&fmt, '\0');
+ if (_append_char(&fmt, 'B') < 0) {
+ goto fail;
+ }
+ if (_append_char(&fmt, '\0') < 0) {
+ goto fail;
+ }
info->ndim = 1;
info->shape = malloc(sizeof(Py_ssize_t) * 2);
if (info->shape == NULL) {
@@ -543,15 +579,17 @@ _buffer_info_new(PyObject *obj)
err = _buffer_format_string(descr, &fmt, obj, NULL, NULL);
Py_DECREF(descr);
if (err != 0) {
- free(fmt.s);
goto fail;
}
- _append_char(&fmt, '\0');
+ if (_append_char(&fmt, '\0') < 0) {
+ goto fail;
+ }
info->format = fmt.s;
return info;
fail:
+ free(fmt.s);
free(info);
return NULL;
}
@@ -989,8 +1027,11 @@ _descriptor_from_pep3118_format(char *s)
Py_DECREF(str);
Py_DECREF(_numpy_internal);
if (descr == NULL) {
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
PyErr_Format(PyExc_ValueError,
"'%s' is not a valid PEP 3118 buffer format string", buf);
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
free(buf);
return NULL;
}
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index e47dd81b9..90ee2c5b2 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -5,6 +5,7 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
+#include "lowlevel_strided_loops.h"
#include "npy_config.h"
@@ -1102,7 +1103,18 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
if (out == newin) {
outgood = 1;
}
- if (!outgood && PyArray_ISONESEGMENT(out) &&
+
+
+ /* make sure the shape of the output array is the same */
+ if (!PyArray_SAMESHAPE(newin, out)) {
+ PyErr_SetString(PyExc_ValueError, "clip: Output array must have the"
+ "same shape as the input.");
+ goto fail;
+ }
+
+ if (!outgood && PyArray_EQUIVALENTLY_ITERABLE(
+ self, out, PyArray_TRIVIALLY_ITERABLE_OP_READ,
+ PyArray_TRIVIALLY_ITERABLE_OP_NOREAD) &&
PyArray_CHKFLAGS(out, NPY_ARRAY_ALIGNED) &&
PyArray_ISNOTSWAPPED(out) &&
PyArray_EquivTypes(PyArray_DESCR(out), indescr)) {
@@ -1111,15 +1123,19 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
/*
* Do we still not have a suitable output array?
- * Create one, now
+ * Create one, now. No matter why the array is not suitable a copy has
+ * to be made. This may be just to avoid memory overlap though.
*/
if (!outgood) {
int oflags;
- if (PyArray_ISFORTRAN(out))
+ if (PyArray_ISFORTRAN(self)) {
oflags = NPY_ARRAY_FARRAY;
- else
+ }
+ else {
oflags = NPY_ARRAY_CARRAY;
- oflags |= NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST;
+ }
+ oflags |= (NPY_ARRAY_WRITEBACKIFCOPY | NPY_ARRAY_FORCECAST |
+ NPY_ARRAY_ENSURECOPY);
Py_INCREF(indescr);
newout = (PyArrayObject*)PyArray_FromArray(out, indescr, oflags);
if (newout == NULL) {
@@ -1131,13 +1147,6 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
Py_INCREF(newout);
}
- /* make sure the shape of the output array is the same */
- if (!PyArray_SAMESHAPE(newin, newout)) {
- PyErr_SetString(PyExc_ValueError, "clip: Output array must have the"
- "same shape as the input.");
- goto fail;
- }
-
/* Now we can call the fast-clip function */
min_data = max_data = NULL;
if (mina != NULL) {
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index c6f4847b3..2e51cee7e 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -15,6 +15,7 @@
#include "buffer.h"
#include "get_attr_string.h"
+#include "mem_overlap.h"
/*
* The casting to use for implicit assignment operations resulting from
@@ -439,12 +440,18 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
return 0;
}
- /* Recursive case, first check the sequence contains only one type */
+ /*
+ * The C-API recommends calling PySequence_Fast before any of the other
+ * PySequence_Fast* functions. This is required for PyPy
+ */
seq = PySequence_Fast(obj, "Could not convert object to sequence");
if (seq == NULL) {
goto fail;
}
+
+ /* Recursive case, first check the sequence contains only one type */
size = PySequence_Fast_GET_SIZE(seq);
+ /* objects is borrowed, do not release seq */
objects = PySequence_Fast_ITEMS(seq);
common_type = size > 0 ? Py_TYPE(objects[0]) : NULL;
for (i = 1; i < size; ++i) {
@@ -588,50 +595,6 @@ _zerofill(PyArrayObject *ret)
return 0;
}
-NPY_NO_EXPORT int
-_IsAligned(PyArrayObject *ap)
-{
- int i;
- npy_uintp aligned;
- npy_uintp alignment = PyArray_DESCR(ap)->alignment;
-
- /* alignment 1 types should have a efficient alignment for copy loops */
- if (PyArray_ISFLEXIBLE(ap) || PyArray_ISSTRING(ap)) {
- npy_intp itemsize = PyArray_ITEMSIZE(ap);
- /* power of two sizes may be loaded in larger moves */
- if (((itemsize & (itemsize - 1)) == 0)) {
- alignment = itemsize > NPY_MAX_COPY_ALIGNMENT ?
- NPY_MAX_COPY_ALIGNMENT : itemsize;
- }
- else {
- /* if not power of two it will be accessed bytewise */
- alignment = 1;
- }
- }
-
- if (alignment == 1) {
- return 1;
- }
- aligned = (npy_uintp)PyArray_DATA(ap);
-
- for (i = 0; i < PyArray_NDIM(ap); i++) {
-#if NPY_RELAXED_STRIDES_CHECKING
- /* skip dim == 1 as it is not required to have stride 0 */
- if (PyArray_DIM(ap, i) > 1) {
- /* if shape[i] == 1, the stride is never used */
- aligned |= (npy_uintp)PyArray_STRIDES(ap)[i];
- }
- else if (PyArray_DIM(ap, i) == 0) {
- /* an array with zero elements is always aligned */
- return 1;
- }
-#else /* not NPY_RELAXED_STRIDES_CHECKING */
- aligned |= (npy_uintp)PyArray_STRIDES(ap)[i];
-#endif /* not NPY_RELAXED_STRIDES_CHECKING */
- }
- return npy_is_aligned((void *)aligned, alignment);
-}
-
NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap)
{
@@ -652,12 +615,6 @@ _IsWriteable(PyArrayObject *ap)
* If it is a writeable array, then return TRUE
* If we can find an array object
* or a writeable buffer object as the final base object
- * or a string object (for pickling support memory savings).
- * - this last could be removed if a proper pickleable
- * buffer was added to Python.
- *
- * MW: I think it would better to disallow switching from READONLY
- * to WRITEABLE like this...
*/
while(PyArray_Check(base)) {
@@ -666,15 +623,6 @@ _IsWriteable(PyArrayObject *ap)
}
base = PyArray_BASE((PyArrayObject *)base);
}
-
- /*
- * here so pickle support works seamlessly
- * and unpickled array can be set and reset writeable
- * -- could be abused --
- */
- if (PyString_Check(base)) {
- return NPY_TRUE;
- }
#if defined(NPY_PY3K)
if (PyObject_GetBuffer(base, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
PyErr_Clear();
@@ -862,3 +810,102 @@ _may_have_objects(PyArray_Descr *dtype)
return (PyDataType_HASFIELDS(base) ||
PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) );
}
+
+/*
+ * Make a new empty array, of the passed size, of a type that takes the
+ * priority of ap1 and ap2 into account.
+ *
+ * If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an
+ * updateifcopy temporary array may be returned. If `result` is non-NULL, the
+ * output array to be returned (`out` if non-NULL and the newly allocated array
+ * otherwise) is incref'd and put to *result.
+ */
+NPY_NO_EXPORT PyArrayObject *
+new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
+ int nd, npy_intp dimensions[], int typenum, PyArrayObject **result)
+{
+ PyArrayObject *out_buf;
+
+ if (out) {
+ int d;
+
+ /* verify that out is usable */
+ if (PyArray_NDIM(out) != nd ||
+ PyArray_TYPE(out) != typenum ||
+ !PyArray_ISCARRAY(out)) {
+ PyErr_SetString(PyExc_ValueError,
+ "output array is not acceptable (must have the right datatype, "
+ "number of dimensions, and be a C-Array)");
+ return 0;
+ }
+ for (d = 0; d < nd; ++d) {
+ if (dimensions[d] != PyArray_DIM(out, d)) {
+ PyErr_SetString(PyExc_ValueError,
+ "output array has wrong dimensions");
+ return 0;
+ }
+ }
+
+ /* check for memory overlap */
+ if (!(solve_may_share_memory(out, ap1, 1) == 0 &&
+ solve_may_share_memory(out, ap2, 1) == 0)) {
+ /* allocate temporary output array */
+ out_buf = (PyArrayObject *)PyArray_NewLikeArray(out, NPY_CORDER,
+ NULL, 0);
+ if (out_buf == NULL) {
+ return NULL;
+ }
+
+ /* set copy-back */
+ Py_INCREF(out);
+ if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) {
+ Py_DECREF(out);
+ Py_DECREF(out_buf);
+ return NULL;
+ }
+ }
+ else {
+ Py_INCREF(out);
+ out_buf = out;
+ }
+
+ if (result) {
+ Py_INCREF(out);
+ *result = out;
+ }
+
+ return out_buf;
+ }
+ else {
+ PyTypeObject *subtype;
+ double prior1, prior2;
+ /*
+ * Need to choose an output array that can hold a sum
+ * -- use priority to determine which subtype.
+ */
+ if (Py_TYPE(ap2) != Py_TYPE(ap1)) {
+ prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
+ prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
+ subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
+ }
+ else {
+ prior1 = prior2 = 0.0;
+ subtype = Py_TYPE(ap1);
+ }
+
+ out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
+ typenum, NULL, NULL, 0, 0,
+ (PyObject *)
+ (prior2 > prior1 ? ap2 : ap1));
+
+ if (out_buf != NULL && result) {
+ Py_INCREF(out_buf);
+ *result = out_buf;
+ }
+
+ return out_buf;
+ }
+}
+
+
+
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index ae9b960c8..0e162903d 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -1,5 +1,6 @@
#ifndef _NPY_PRIVATE_COMMON_H_
#define _NPY_PRIVATE_COMMON_H_
+#include "structmember.h"
#include <numpy/npy_common.h>
#include <numpy/npy_cpu.h>
#include <numpy/ndarraytypes.h>
@@ -56,9 +57,6 @@ index2ptr(PyArrayObject *mp, npy_intp i);
NPY_NO_EXPORT int
_zerofill(PyArrayObject *ret);
-NPY_NO_EXPORT int
-_IsAligned(PyArrayObject *ap);
-
NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap);
@@ -182,6 +180,16 @@ check_and_adjust_axis(int *axis, int ndim)
return check_and_adjust_axis_msg(axis, ndim, Py_None);
}
+/* used for some alignment checks */
+#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
+#define _UINT_ALIGN(type) npy_uint_alignment(sizeof(type))
+/*
+ * Disable harmless compiler warning "4116: unnamed type definition in
+ * parentheses" which is caused by the _ALIGN macro.
+ */
+#if defined(_MSC_VER)
+#pragma warning(disable:4116)
+#endif
/*
* return true if pointer is aligned to 'alignment'
@@ -190,15 +198,45 @@ static NPY_INLINE int
npy_is_aligned(const void * p, const npy_uintp alignment)
{
/*
- * alignment is usually a power of two
- * the test is faster than a direct modulo
+ * Assumes alignment is a power of two, as required by the C standard.
+ * Assumes cast from pointer to uintp gives a sensible representation we
+ * can use bitwise & on (not required by C standard, but used by glibc).
+ * This test is faster than a direct modulo.
+ * Note alignment value of 0 is allowed and returns False.
*/
- if (NPY_LIKELY((alignment & (alignment - 1)) == 0)) {
- return ((npy_uintp)(p) & ((alignment) - 1)) == 0;
- }
- else {
- return ((npy_uintp)(p) % alignment) == 0;
+ return ((npy_uintp)(p) & ((alignment) - 1)) == 0;
+}
+
+/* Get equivalent "uint" alignment given an itemsize, for use in copy code */
+static NPY_INLINE int
+npy_uint_alignment(int itemsize)
+{
+ npy_uintp alignment = 0; /* return value of 0 means unaligned */
+
+ switch(itemsize){
+ case 1:
+ return 1;
+ case 2:
+ alignment = _ALIGN(npy_uint16);
+ break;
+ case 4:
+ alignment = _ALIGN(npy_uint32);
+ break;
+ case 8:
+ alignment = _ALIGN(npy_uint64);
+ break;
+ case 16:
+ /*
+ * 16 byte types are copied using 2 uint64 assignments.
+ * See the strided copy function in lowlevel_strided_loops.c.
+ */
+ alignment = _ALIGN(npy_uint64);
+ break;
+ default:
+ break;
}
+
+ return alignment;
}
/*
@@ -283,4 +321,17 @@ blas_stride(npy_intp stride, unsigned itemsize)
#include "ucsnarrow.h"
+/*
+ * Make a new empty array, of the passed size, of a type that takes the
+ * priority of ap1 and ap2 into account.
+ *
+ * If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an
+ * updateifcopy temporary array may be returned. If `result` is non-NULL, the
+ * output array to be returned (`out` if non-NULL and the newly allocated array
+ * otherwise) is incref'd and put to *result.
+ */
+NPY_NO_EXPORT PyArrayObject *
+new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
+ int nd, npy_intp dimensions[], int typenum, PyArrayObject **result);
+
#endif
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index bcb44f6d1..10e3478e2 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -21,11 +21,17 @@
* and 0 if the array is not monotonic.
*/
static int
-check_array_monotonic(const double *a, npy_int lena)
+check_array_monotonic(const double *a, npy_intp lena)
{
npy_intp i;
double next;
- double last = a[0];
+ double last;
+
+ if (lena == 0) {
+ /* all bin edges hold the same value */
+ return 1;
+ }
+ last = a[0];
/* Skip repeated values at the beginning of the array */
for (i = 1; (i < lena) && (a[i] == last); i++);
@@ -209,106 +215,41 @@ fail:
return NULL;
}
-/*
- * digitize(x, bins, right=False) returns an array of integers the same length
- * as x. The values i returned are such that bins[i - 1] <= x < bins[i] if
- * bins is monotonically increasing, or bins[i - 1] > x >= bins[i] if bins
- * is monotonically decreasing. Beyond the bounds of bins, returns either
- * i = 0 or i = len(bins) as appropriate. If right == True the comparison
- * is bins [i - 1] < x <= bins[i] or bins [i - 1] >= x > bins[i]
- */
+/* Internal function to expose check_array_monotonic to python */
NPY_NO_EXPORT PyObject *
-arr_digitize(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
+arr__monotonicity(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
+ static char *kwlist[] = {"x", NULL};
PyObject *obj_x = NULL;
- PyObject *obj_bins = NULL;
PyArrayObject *arr_x = NULL;
- PyArrayObject *arr_bins = NULL;
- PyObject *ret = NULL;
- npy_intp len_bins;
- int monotonic, right = 0;
- NPY_BEGIN_THREADS_DEF
-
- static char *kwlist[] = {"x", "bins", "right", NULL};
+ long monotonic;
+ npy_intp len_x;
+ NPY_BEGIN_THREADS_DEF;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i:digitize", kwlist,
- &obj_x, &obj_bins, &right)) {
- goto fail;
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|_monotonicity", kwlist,
+ &obj_x)) {
+ return NULL;
}
- /* PyArray_SearchSorted will make `x` contiguous even if we don't */
- arr_x = (PyArrayObject *)PyArray_FROMANY(obj_x, NPY_DOUBLE, 0, 0,
- NPY_ARRAY_CARRAY_RO);
+ /*
+ * TODO:
+ * `x` could be strided, needs change to check_array_monotonic
+ * `x` is forced to double for this check
+ */
+ arr_x = (PyArrayObject *)PyArray_FROMANY(
+ obj_x, NPY_DOUBLE, 1, 1, NPY_ARRAY_CARRAY_RO);
if (arr_x == NULL) {
- goto fail;
- }
-
- /* TODO: `bins` could be strided, needs change to check_array_monotonic */
- arr_bins = (PyArrayObject *)PyArray_FROMANY(obj_bins, NPY_DOUBLE, 1, 1,
- NPY_ARRAY_CARRAY_RO);
- if (arr_bins == NULL) {
- goto fail;
- }
-
- len_bins = PyArray_SIZE(arr_bins);
- if (len_bins == 0) {
- PyErr_SetString(PyExc_ValueError, "bins must have non-zero length");
- goto fail;
+ return NULL;
}
- NPY_BEGIN_THREADS_THRESHOLDED(len_bins)
- monotonic = check_array_monotonic((const double *)PyArray_DATA(arr_bins),
- len_bins);
+ len_x = PyArray_SIZE(arr_x);
+ NPY_BEGIN_THREADS_THRESHOLDED(len_x)
+ monotonic = check_array_monotonic(
+ (const double *)PyArray_DATA(arr_x), len_x);
NPY_END_THREADS
+ Py_DECREF(arr_x);
- if (monotonic == 0) {
- PyErr_SetString(PyExc_ValueError,
- "bins must be monotonically increasing or decreasing");
- goto fail;
- }
-
- /* PyArray_SearchSorted needs an increasing array */
- if (monotonic == - 1) {
- PyArrayObject *arr_tmp = NULL;
- npy_intp shape = PyArray_DIM(arr_bins, 0);
- npy_intp stride = -PyArray_STRIDE(arr_bins, 0);
- void *data = (void *)(PyArray_BYTES(arr_bins) - stride * (shape - 1));
-
- arr_tmp = (PyArrayObject *)PyArray_NewFromDescrAndBase(
- &PyArray_Type, PyArray_DescrFromType(NPY_DOUBLE),
- 1, &shape, &stride, data,
- PyArray_FLAGS(arr_bins), NULL, (PyObject *)arr_bins);
- Py_DECREF(arr_bins);
- if (!arr_tmp) {
- goto fail;
- }
- arr_bins = arr_tmp;
- }
-
- ret = PyArray_SearchSorted(arr_bins, (PyObject *)arr_x,
- right ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, NULL);
- if (!ret) {
- goto fail;
- }
-
- /* If bins is decreasing, ret has bins from end, not start */
- if (monotonic == -1) {
- npy_intp *ret_data =
- (npy_intp *)PyArray_DATA((PyArrayObject *)ret);
- npy_intp len_ret = PyArray_SIZE((PyArrayObject *)ret);
-
- NPY_BEGIN_THREADS_THRESHOLDED(len_ret)
- while (len_ret--) {
- *ret_data = len_bins - *ret_data;
- ret_data++;
- }
- NPY_END_THREADS
- }
-
- fail:
- Py_XDECREF(arr_x);
- Py_XDECREF(arr_bins);
- return ret;
+ return PyInt_FromLong(monotonic);
}
/*
@@ -654,6 +595,10 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
else if (j == lenxp - 1) {
dres[i] = dy[j];
}
+ else if (dx[j] == x_val) {
+ /* Avoid potential non-finite interpolation */
+ dres[i] = dy[j];
+ }
else {
const npy_double slope = (slopes != NULL) ? slopes[j] :
(dy[j+1] - dy[j]) / (dx[j+1] - dx[j]);
@@ -822,6 +767,10 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
else if (j == lenxp - 1) {
dres[i] = dy[j];
}
+ else if (dx[j] == x_val) {
+ /* Avoid potential non-finite interpolation */
+ dres[i] = dy[j];
+ }
else {
if (slopes!=NULL) {
dres[i].real = slopes[j].real*(x_val - dx[j]) + dy[j].real;
@@ -1207,7 +1156,32 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
int i, ret_ndim;
npy_intp ret_dims[NPY_MAXDIMS], ret_strides[NPY_MAXDIMS];
- char *kwlist[] = {"indices", "dims", "order", NULL};
+ char *kwlist[] = {"indices", "shape", "order", NULL};
+
+ /*
+ * TODO: remove this in favor of warning raised in the dispatcher when
+ * __array_function__ is enabled by default.
+ */
+
+ /*
+ * Continue to support the older "dims" argument in place
+ * of the "shape" argument. Issue an appropriate warning
+ * if "dims" is detected in keywords, then replace it with
+ * the new "shape" argument and continue processing as usual.
+ */
+ if (kwds) {
+ PyObject *dims_item, *shape_item;
+ dims_item = PyDict_GetItemString(kwds, "dims");
+ shape_item = PyDict_GetItemString(kwds, "shape");
+ if (dims_item != NULL && shape_item == NULL) {
+ if (DEPRECATE("'shape' argument should be"
+ " used instead of 'dims'") < 0) {
+ return NULL;
+ }
+ PyDict_SetItemString(kwds, "shape", dims_item);
+ PyDict_DelItemString(kwds, "dims");
+ }
+ }
if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:unravel_index",
kwlist,
diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h
index 51508531c..082139910 100644
--- a/numpy/core/src/multiarray/compiled_base.h
+++ b/numpy/core/src/multiarray/compiled_base.h
@@ -7,7 +7,7 @@ arr_insert(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
arr_bincount(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
-arr_digitize(PyObject *, PyObject *, PyObject *kwds);
+arr__monotonicity(PyObject *, PyObject *, PyObject *kwds);
NPY_NO_EXPORT PyObject *
arr_interp(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index e88582a51..7db467308 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -614,22 +614,6 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype)
}
dtype = PyArray_DESCR(self);
-
- if (type != NULL && !PyArray_EquivTypes(dtype, type) &&
- (PyArray_FLAGS(self) & NPY_ARRAY_WARN_ON_WRITE)) {
- const char *msg =
- "Numpy has detected that you may be viewing or writing to an array "
- "returned by selecting multiple fields in a structured array. \n\n"
- "This code may break in numpy 1.16 because this will return a view "
- "instead of a copy -- see release notes for details.";
- /* 2016-09-19, 1.12 */
- if (DEPRECATE_FUTUREWARNING(msg) < 0) {
- return NULL;
- }
- /* Only warn once per array */
- PyArray_CLEARFLAGS(self, NPY_ARRAY_WARN_ON_WRITE);
- }
-
flags = PyArray_FLAGS(self);
Py_INCREF(dtype);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 3df764a48..33a706412 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -2028,7 +2028,6 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
{
int i, n, allscalars = 0;
PyArrayObject **mps = NULL;
- PyObject *otmp;
PyArray_Descr *intype = NULL, *stype = NULL;
PyArray_Descr *newtype = NULL;
NPY_SCALARKIND scalarkind = NPY_NOSCALAR, intypekind = NPY_NOSCALAR;
@@ -2067,9 +2066,13 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
}
for (i = 0; i < n; i++) {
- otmp = PySequence_GetItem(op, i);
+ PyObject *otmp = PySequence_GetItem(op, i);
+ if (otmp == NULL) {
+ goto fail;
+ }
if (!PyArray_CheckAnyScalar(otmp)) {
newtype = PyArray_DescrFromObject(otmp, intype);
+ Py_DECREF(otmp);
Py_XDECREF(intype);
if (newtype == NULL) {
goto fail;
@@ -2079,6 +2082,7 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
}
else {
newtype = PyArray_DescrFromObject(otmp, stype);
+ Py_DECREF(otmp);
Py_XDECREF(stype);
if (newtype == NULL) {
goto fail;
@@ -2088,7 +2092,6 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
mps[i] = (PyArrayObject *)Py_None;
Py_INCREF(Py_None);
}
- Py_XDECREF(otmp);
}
if (intype == NULL) {
/* all scalars */
@@ -2112,6 +2115,9 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
newtype = PyArray_PromoteTypes(intype, stype);
Py_XDECREF(intype);
intype = newtype;
+ if (newtype == NULL) {
+ goto fail;
+ }
}
for (i = 0; i < n; i++) {
Py_XDECREF(mps[i]);
@@ -2123,8 +2129,9 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
/* Make sure all arrays are actual array objects. */
for (i = 0; i < n; i++) {
int flags = NPY_ARRAY_CARRAY;
+ PyObject *otmp = PySequence_GetItem(op, i);
- if ((otmp = PySequence_GetItem(op, i)) == NULL) {
+ if (otmp == NULL) {
goto fail;
}
if (!allscalars && ((PyObject *)(mps[i]) == Py_None)) {
@@ -2133,8 +2140,8 @@ PyArray_ConvertToCommonType(PyObject *op, int *retn)
Py_DECREF(Py_None);
}
Py_INCREF(intype);
- mps[i] = (PyArrayObject*)
- PyArray_FromAny(otmp, intype, 0, 0, flags, NULL);
+ mps[i] = (PyArrayObject*)PyArray_FromAny(otmp, intype, 0, 0,
+ flags, NULL);
Py_DECREF(otmp);
if (mps[i] == NULL) {
goto fail;
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 3c61f3b2b..a17621946 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -11,7 +11,7 @@
#include "npy_config.h"
-#include "npy_import.h"
+#include "npy_ctypes.h"
#include "npy_pycompat.h"
#include "multiarraymodule.h"
@@ -667,7 +667,6 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
int *out_is_object)
{
PyObject *e;
- int r;
npy_intp n, i;
Py_buffer buffer_view;
PyObject * seq;
@@ -849,46 +848,48 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
return 0;
}
else {
- npy_intp dtmp[NPY_MAXDIMS];
- int j, maxndim_m1 = *maxndim - 1;
- e = PySequence_Fast_GET_ITEM(seq, 0);
-
- r = discover_dimensions(e, &maxndim_m1, d + 1, check_it,
- stop_at_string, stop_at_tuple,
- out_is_object);
- if (r < 0) {
+ int all_elems_maxndim = *maxndim - 1;
+ npy_intp *all_elems_d = d + 1;
+ int all_dimensions_match = 1;
+
+ /* Get the dimensions of the first item as a baseline */
+ PyObject *first = PySequence_Fast_GET_ITEM(seq, 0);
+ if (discover_dimensions(
+ first, &all_elems_maxndim, all_elems_d, check_it,
+ stop_at_string, stop_at_tuple, out_is_object) < 0) {
Py_DECREF(seq);
- return r;
+ return -1;
}
- /* For the dimension truncation check below */
- *maxndim = maxndim_m1 + 1;
+ /* Compare the dimensions of all the remaining items */
for (i = 1; i < n; ++i) {
- e = PySequence_Fast_GET_ITEM(seq, i);
- /* Get the dimensions of the first item */
- r = discover_dimensions(e, &maxndim_m1, dtmp, check_it,
- stop_at_string, stop_at_tuple,
- out_is_object);
- if (r < 0) {
+ int j;
+ int elem_maxndim = *maxndim - 1;
+ npy_intp elem_d[NPY_MAXDIMS];
+
+ PyObject *elem = PySequence_Fast_GET_ITEM(seq, i);
+ if (discover_dimensions(
+ elem, &elem_maxndim, elem_d, check_it,
+ stop_at_string, stop_at_tuple, out_is_object) < 0) {
Py_DECREF(seq);
- return r;
+ return -1;
}
- /* Reduce max_ndim_m1 to just items which match */
- for (j = 0; j < maxndim_m1; ++j) {
- if (dtmp[j] != d[j+1]) {
- maxndim_m1 = j;
+ /* Find the number of left-dimensions which match, j */
+ for (j = 0; j < elem_maxndim && j < all_elems_maxndim; ++j) {
+ if (elem_d[j] != all_elems_d[j]) {
break;
}
}
+ if (j != elem_maxndim || j != all_elems_maxndim) {
+ all_dimensions_match = 0;
+ }
+ all_elems_maxndim = j;
}
- /*
- * If the dimensions are truncated, need to produce
- * an object array.
- */
- if (maxndim_m1 + 1 < *maxndim) {
+ *maxndim = all_elems_maxndim + 1;
+ if (!all_dimensions_match) {
+ /* typically results in an array containing variable-length lists */
*out_is_object = 1;
- *maxndim = maxndim_m1 + 1;
}
}
@@ -1382,15 +1383,7 @@ _array_from_buffer_3118(PyObject *memoryview)
* Note that even if the above are fixed in master, we have to drop the
* early patch versions of python to actually make use of the fixes.
*/
-
- int is_ctypes = _is_from_ctypes(view->obj);
- if (is_ctypes < 0) {
- /* This error is not useful */
- PyErr_WriteUnraisable(view->obj);
- is_ctypes = 0;
- }
-
- if (!is_ctypes) {
+ if (!npy_ctypes_check(Py_TYPE(view->obj))) {
/* This object has no excuse for a broken PEP3118 buffer */
PyErr_Format(
PyExc_RuntimeError,
@@ -1709,9 +1702,9 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
*out_ndim = NPY_MAXDIMS;
is_object = 0;
- if (discover_dimensions(op, out_ndim, out_dims, check_it,
- stop_at_string, stop_at_tuple,
- &is_object) < 0) {
+ if (discover_dimensions(
+ op, out_ndim, out_dims, check_it,
+ stop_at_string, stop_at_tuple, &is_object) < 0) {
Py_DECREF(*out_dtype);
if (PyErr_Occurred()) {
return -1;
@@ -2031,7 +2024,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
newtype = oldtype;
Py_INCREF(oldtype);
}
- if (PyDataType_ISUNSIZED(newtype)) {
+ else if (PyDataType_ISUNSIZED(newtype)) {
PyArray_DESCR_REPLACE(newtype);
if (newtype == NULL) {
return NULL;
@@ -2834,7 +2827,8 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
* contiguous strides, etc.
*/
if (PyArray_GetDTypeTransferFunction(
- PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst),
+ IsUintAligned(src) && IsAligned(src) &&
+ IsUintAligned(dst) && IsAligned(dst),
src_stride, dst_stride,
PyArray_DESCR(src), PyArray_DESCR(dst),
0,
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index df9b9cec4..a8550d958 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -2845,6 +2845,16 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
*out = NPY_DATETIME_NAT;
return 0;
}
+ else if (PyArray_IsScalar(obj, Integer)) {
+ /* Use the default unit if none was specified */
+ if (meta->base == NPY_FR_ERROR) {
+ meta->base = NPY_DATETIME_DEFAULTUNIT;
+ meta->num = 1;
+ }
+
+ *out = PyLong_AsLongLong(obj);
+ return 0;
+ }
else {
PyErr_SetString(PyExc_ValueError,
"Could not convert object to NumPy timedelta");
@@ -3747,6 +3757,27 @@ recursive_find_object_datetime64_type(PyObject *obj,
}
/*
+ * handler function for PyDelta values
+ * which may also be in a 0 dimensional
+ * NumPy array
+ */
+static int
+delta_checker(PyArray_DatetimeMetaData *meta)
+{
+ PyArray_DatetimeMetaData tmp_meta;
+
+ tmp_meta.base = NPY_FR_us;
+ tmp_meta.num = 1;
+
+ /* Combine it with 'meta' */
+ if (compute_datetime_metadata_greatest_common_divisor(
+ meta, &tmp_meta, meta, 0, 0) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
* Recursively determines the metadata for an NPY_TIMEDELTA dtype.
*
* Returns 0 on success, -1 on failure.
@@ -3783,6 +3814,28 @@ recursive_find_object_timedelta64_type(PyObject *obj,
else if (arr_dtype->type_num != NPY_OBJECT) {
return 0;
}
+ else {
+ if (PyArray_NDIM(arr) == 0) {
+ /*
+ * special handling of 0 dimensional NumPy object
+ * arrays, which may be indexed to retrieve their
+ * single object using [()], but not by using
+ * __getitem__(integer) approaches
+ */
+ PyObject *item, *meth, *args;
+
+ meth = PyObject_GetAttrString(obj, "__getitem__");
+ args = Py_BuildValue("(())");
+ item = PyObject_CallObject(meth, args);
+ /*
+ * NOTE: may need other type checks here in the future
+ * for expanded 0 D datetime array conversions?
+ */
+ if (PyDelta_Check(item)) {
+ return delta_checker(meta);
+ }
+ }
+ }
}
/* Datetime scalar -> use its metadata */
else if (PyArray_IsScalar(obj, Timedelta)) {
@@ -3803,18 +3856,7 @@ recursive_find_object_timedelta64_type(PyObject *obj,
}
/* Python timedelta object -> 'us' */
else if (PyDelta_Check(obj)) {
- PyArray_DatetimeMetaData tmp_meta;
-
- tmp_meta.base = NPY_FR_us;
- tmp_meta.num = 1;
-
- /* Combine it with 'meta' */
- if (compute_datetime_metadata_greatest_common_divisor(meta,
- &tmp_meta, meta, 0, 0) < 0) {
- return -1;
- }
-
- return 0;
+ return delta_checker(meta);
}
/* Now check if what we have left is a sequence for recursion */
diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c
index 4f9d8fa41..95b7bb3dc 100644
--- a/numpy/core/src/multiarray/datetime_strings.c
+++ b/numpy/core/src/multiarray/datetime_strings.c
@@ -69,7 +69,7 @@
* multiplatform code, get_localtime() should never be used outside of this
* range.
*
- * [1] http://en.wikipedia.org/wiki/Year_2038_problem
+ * [1] https://en.wikipedia.org/wiki/Year_2038_problem
*/
static int
get_localtime(NPY_TIME_T *ts, struct tm *tms)
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index e3a018356..3038e4dea 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -10,7 +10,7 @@
#include "numpy/arrayscalars.h"
#include "npy_config.h"
-
+#include "npy_ctypes.h"
#include "npy_pycompat.h"
#include "_datetime.h"
@@ -55,79 +55,46 @@ Borrowed_PyMapping_GetItemString(PyObject *o, char *key)
return ret;
}
-/*
- * Creates a dtype object from ctypes inputs.
- *
- * Returns a new reference to a dtype object, or NULL
- * if this is not possible. When it returns NULL, it does
- * not set a Python exception.
- */
static PyArray_Descr *
-_arraydescr_fromctypes(PyObject *obj)
+_arraydescr_from_ctypes_type(PyTypeObject *type)
{
- PyObject *dtypedescr;
- PyArray_Descr *newdescr;
- int ret;
+ PyObject *_numpy_dtype_ctypes;
+ PyObject *res;
- /* Understand basic ctypes */
- dtypedescr = PyObject_GetAttrString(obj, "_type_");
- PyErr_Clear();
- if (dtypedescr) {
- ret = PyArray_DescrConverter(dtypedescr, &newdescr);
- Py_DECREF(dtypedescr);
- if (ret == NPY_SUCCEED) {
- PyObject *length;
- /* Check for ctypes arrays */
- length = PyObject_GetAttrString(obj, "_length_");
- PyErr_Clear();
- if (length) {
- /* derived type */
- PyObject *newtup;
- PyArray_Descr *derived;
- newtup = Py_BuildValue("N(N)", newdescr, length);
- ret = PyArray_DescrConverter(newtup, &derived);
- Py_DECREF(newtup);
- if (ret == NPY_SUCCEED) {
- return derived;
- }
- PyErr_Clear();
- return NULL;
- }
- return newdescr;
- }
- PyErr_Clear();
+ /* Call the python function of the same name. */
+ _numpy_dtype_ctypes = PyImport_ImportModule("numpy.core._dtype_ctypes");
+ if (_numpy_dtype_ctypes == NULL) {
return NULL;
}
- /* Understand ctypes structures --
- bit-fields are not supported
- automatically aligns */
- dtypedescr = PyObject_GetAttrString(obj, "_fields_");
- PyErr_Clear();
- if (dtypedescr) {
- ret = PyArray_DescrAlignConverter(dtypedescr, &newdescr);
- Py_DECREF(dtypedescr);
- if (ret == NPY_SUCCEED) {
- return newdescr;
- }
- PyErr_Clear();
+ res = PyObject_CallMethod(_numpy_dtype_ctypes, "dtype_from_ctypes_type", "O", (PyObject *)type);
+ Py_DECREF(_numpy_dtype_ctypes);
+ if (res == NULL) {
+ return NULL;
}
- return NULL;
+ /*
+ * sanity check that dtype_from_ctypes_type returned the right type,
+ * since getting it wrong would give segfaults.
+ */
+ if (!PyObject_TypeCheck(res, &PyArrayDescr_Type)) {
+ Py_DECREF(res);
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+
+ return (PyArray_Descr *)res;
}
/*
- * This function creates a dtype object when:
- * - The object has a "dtype" attribute, and it can be converted
- * to a dtype object.
- * - The object is a ctypes type object, including array
- * and structure types.
+ * This function creates a dtype object when the object has a "dtype" attribute,
+ * and it can be converted to a dtype object.
*
* Returns a new reference to a dtype object, or NULL
* if this is not possible. When it returns NULL, it does
* not set a Python exception.
*/
NPY_NO_EXPORT PyArray_Descr *
-_arraydescr_fromobj(PyObject *obj)
+_arraydescr_from_dtype_attr(PyObject *obj)
{
PyObject *dtypedescr;
PyArray_Descr *newdescr = NULL;
@@ -136,15 +103,18 @@ _arraydescr_fromobj(PyObject *obj)
/* For arbitrary objects that have a "dtype" attribute */
dtypedescr = PyObject_GetAttrString(obj, "dtype");
PyErr_Clear();
- if (dtypedescr != NULL) {
- ret = PyArray_DescrConverter(dtypedescr, &newdescr);
- Py_DECREF(dtypedescr);
- if (ret == NPY_SUCCEED) {
- return newdescr;
- }
+ if (dtypedescr == NULL) {
+ return NULL;
+ }
+
+ ret = PyArray_DescrConverter(dtypedescr, &newdescr);
+ Py_DECREF(dtypedescr);
+ if (ret != NPY_SUCCEED) {
PyErr_Clear();
+ return NULL;
}
- return _arraydescr_fromctypes(obj);
+
+ return newdescr;
}
/*
@@ -287,6 +257,9 @@ _convert_from_tuple(PyObject *obj, int align)
return NULL;
}
PyArray_DESCR_REPLACE(type);
+ if (type == NULL) {
+ return NULL;
+ }
if (type->type_num == NPY_UNICODE) {
type->elsize = itemsize << 2;
}
@@ -1424,10 +1397,20 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
check_num = NPY_VOID;
}
else {
- *at = _arraydescr_fromobj(obj);
+ *at = _arraydescr_from_dtype_attr(obj);
if (*at) {
return NPY_SUCCEED;
}
+
+ /*
+ * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
+ * type might override the dtype if numpy does not otherwise
+ * support it.
+ */
+ if (npy_ctypes_check((PyTypeObject *)obj)) {
+ *at = _arraydescr_from_ctypes_type((PyTypeObject *)obj);
+ return *at ? NPY_SUCCEED : NPY_FAIL;
+ }
}
goto finish;
}
@@ -1440,6 +1423,12 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
PyObject *obj2;
obj2 = PyUnicode_AsASCIIString(obj);
if (obj2 == NULL) {
+ /* Convert the exception into a TypeError */
+ PyObject *err = PyErr_Occurred();
+ if (PyErr_GivenExceptionMatches(err, PyExc_UnicodeEncodeError)) {
+ PyErr_SetString(PyExc_TypeError,
+ "data type not understood");
+ }
return NPY_FAIL;
}
retval = PyArray_DescrConverter(obj2, at);
@@ -1591,13 +1580,23 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
goto fail;
}
else {
- *at = _arraydescr_fromobj(obj);
+ *at = _arraydescr_from_dtype_attr(obj);
if (*at) {
return NPY_SUCCEED;
}
if (PyErr_Occurred()) {
return NPY_FAIL;
}
+
+ /*
+ * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
+ * type might override the dtype if numpy does not otherwise
+ * support it.
+ */
+ if (npy_ctypes_check(Py_TYPE(obj))) {
+ *at = _arraydescr_from_ctypes_type(Py_TYPE(obj));
+ return *at ? NPY_SUCCEED : NPY_FAIL;
+ }
goto fail;
}
if (PyErr_Occurred()) {
@@ -1655,6 +1654,9 @@ finish:
if (PyDataType_ISUNSIZED(*at) && (*at)->elsize != elsize) {
PyArray_DESCR_REPLACE(*at);
+ if (*at == NULL) {
+ goto error;
+ }
(*at)->elsize = elsize;
}
if (endian != '=' && PyArray_ISNBO(endian)) {
@@ -1663,6 +1665,9 @@ finish:
if (endian != '=' && (*at)->byteorder != '|'
&& (*at)->byteorder != endian) {
PyArray_DESCR_REPLACE(*at);
+ if (*at == NULL) {
+ goto error;
+ }
(*at)->byteorder = endian;
}
return NPY_SUCCEED;
@@ -1855,72 +1860,17 @@ arraydescr_protocol_typestr_get(PyArray_Descr *self)
}
static PyObject *
-arraydescr_typename_get(PyArray_Descr *self)
+arraydescr_name_get(PyArray_Descr *self)
{
- static const char np_prefix[] = "numpy.";
- const int np_prefix_len = sizeof(np_prefix) - 1;
- PyTypeObject *typeobj = self->typeobj;
+ /* let python handle this */
+ PyObject *_numpy_dtype;
PyObject *res;
- char *s;
- int len;
- int prefix_len;
- int suffix_len;
-
- if (PyTypeNum_ISUSERDEF(self->type_num)) {
- s = strrchr(typeobj->tp_name, '.');
- if (s == NULL) {
- res = PyUString_FromString(typeobj->tp_name);
- }
- else {
- res = PyUString_FromStringAndSize(s + 1, strlen(s) - 1);
- }
- return res;
- }
- else {
- /*
- * NumPy type or subclass
- *
- * res is derived from typeobj->tp_name with the following rules:
- * - if starts with "numpy.", that prefix is removed
- * - if ends with "_", that suffix is removed
- */
- len = strlen(typeobj->tp_name);
-
- if (! strncmp(typeobj->tp_name, np_prefix, np_prefix_len)) {
- prefix_len = np_prefix_len;
- }
- else {
- prefix_len = 0;
- }
-
- if (typeobj->tp_name[len - 1] == '_') {
- suffix_len = 1;
- }
- else {
- suffix_len = 0;
- }
-
- len -= prefix_len;
- len -= suffix_len;
- res = PyUString_FromStringAndSize(typeobj->tp_name+prefix_len, len);
- }
- if (PyTypeNum_ISFLEXIBLE(self->type_num) && !PyDataType_ISUNSIZED(self)) {
- PyObject *p;
- p = PyUString_FromFormat("%d", self->elsize * 8);
- PyUString_ConcatAndDel(&res, p);
- }
- if (PyDataType_ISDATETIME(self)) {
- PyArray_DatetimeMetaData *meta;
-
- meta = get_datetime_metadata_from_dtype(self);
- if (meta == NULL) {
- Py_DECREF(res);
- return NULL;
- }
-
- res = append_metastr_to_string(meta, 0, res);
+ _numpy_dtype = PyImport_ImportModule("numpy.core._dtype");
+ if (_numpy_dtype == NULL) {
+ return NULL;
}
-
+ res = PyObject_CallMethod(_numpy_dtype, "_name_get", "O", self);
+ Py_DECREF(_numpy_dtype);
return res;
}
@@ -2214,7 +2164,7 @@ static PyGetSetDef arraydescr_getsets[] = {
(getter)arraydescr_protocol_typestr_get,
NULL, NULL, NULL},
{"name",
- (getter)arraydescr_typename_get,
+ (getter)arraydescr_name_get,
NULL, NULL, NULL},
{"base",
(getter)arraydescr_base_get,
@@ -2403,7 +2353,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args))
if (ret == NULL) {
return NULL;
}
- mod = PyImport_ImportModule("numpy.core.multiarray");
+ mod = PyImport_ImportModule("numpy.core._multiarray_umath");
if (mod == NULL) {
Py_DECREF(ret);
return NULL;
@@ -3168,462 +3118,36 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype)
}
/*
- * Returns a string representation of a structured array,
- * in a list format.
+ * The general dtype repr function.
*/
static PyObject *
-arraydescr_struct_list_str(PyArray_Descr *dtype)
+arraydescr_repr(PyArray_Descr *dtype)
{
- PyObject *names, *key, *fields, *ret, *tmp, *tup, *title;
- Py_ssize_t i, names_size;
- PyArray_Descr *fld_dtype;
- int fld_offset;
-
- names = dtype->names;
- names_size = PyTuple_GET_SIZE(names);
- fields = dtype->fields;
-
- /* Build up a string to make the list */
-
- /* Go through all the names */
- ret = PyUString_FromString("[");
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(fields, key);
- if (tup == NULL) {
- return 0;
- }
- title = NULL;
- if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &fld_offset, &title)) {
- PyErr_Clear();
- return 0;
- }
- PyUString_ConcatAndDel(&ret, PyUString_FromString("("));
- /* Check for whether to do titles as well */
- if (title != NULL && title != Py_None) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString("("));
- PyUString_ConcatAndDel(&ret, PyObject_Repr(title));
- PyUString_ConcatAndDel(&ret, PyUString_FromString(", "));
- PyUString_ConcatAndDel(&ret, PyObject_Repr(key));
- PyUString_ConcatAndDel(&ret, PyUString_FromString("), "));
- }
- else {
- PyUString_ConcatAndDel(&ret, PyObject_Repr(key));
- PyUString_ConcatAndDel(&ret, PyUString_FromString(", "));
- }
- /* Special case subarray handling here */
- if (PyDataType_HASSUBARRAY(fld_dtype)) {
- tmp = arraydescr_construction_repr(
- fld_dtype->subarray->base, 0, 1);
- PyUString_ConcatAndDel(&ret, tmp);
- PyUString_ConcatAndDel(&ret, PyUString_FromString(", "));
- PyUString_ConcatAndDel(&ret,
- PyObject_Str(fld_dtype->subarray->shape));
- }
- else {
- tmp = arraydescr_construction_repr(fld_dtype, 0, 1);
- PyUString_ConcatAndDel(&ret, tmp);
- }
- PyUString_ConcatAndDel(&ret, PyUString_FromString(")"));
- if (i != names_size - 1) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString(", "));
- }
+ PyObject *_numpy_dtype;
+ PyObject *res;
+ _numpy_dtype = PyImport_ImportModule("numpy.core._dtype");
+ if (_numpy_dtype == NULL) {
+ return NULL;
}
- PyUString_ConcatAndDel(&ret, PyUString_FromString("]"));
-
- return ret;
+ res = PyObject_CallMethod(_numpy_dtype, "__repr__", "O", dtype);
+ Py_DECREF(_numpy_dtype);
+ return res;
}
-
/*
- * Returns a string representation of a structured array,
- * in a dict format.
+ * The general dtype str function.
*/
static PyObject *
-arraydescr_struct_dict_str(PyArray_Descr *dtype, int includealignedflag)
-{
- PyObject *names, *key, *fields, *ret, *tmp, *tup, *title;
- Py_ssize_t i, names_size;
- PyArray_Descr *fld_dtype;
- int fld_offset, has_titles;
-
- names = dtype->names;
- names_size = PyTuple_GET_SIZE(names);
- fields = dtype->fields;
- has_titles = 0;
-
- /* Build up a string to make the dictionary */
-
- /* First, the names */
- ret = PyUString_FromString("{'names':[");
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- PyUString_ConcatAndDel(&ret, PyObject_Repr(key));
- if (i != names_size - 1) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString(","));
- }
- }
- /* Second, the formats */
- PyUString_ConcatAndDel(&ret, PyUString_FromString("], 'formats':["));
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(fields, key);
- if (tup == NULL) {
- return 0;
- }
- title = NULL;
- if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &fld_offset, &title)) {
- PyErr_Clear();
- return 0;
- }
- /* Check for whether to do titles as well */
- if (title != NULL && title != Py_None) {
- has_titles = 1;
- }
- tmp = arraydescr_construction_repr(fld_dtype, 0, 1);
- PyUString_ConcatAndDel(&ret, tmp);
- if (i != names_size - 1) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString(","));
- }
- }
- /* Third, the offsets */
- PyUString_ConcatAndDel(&ret, PyUString_FromString("], 'offsets':["));
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(fields, key);
- if (tup == NULL) {
- return 0;
- }
- if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype, &fld_offset, &title)) {
- PyErr_Clear();
- return 0;
- }
- PyUString_ConcatAndDel(&ret, PyUString_FromFormat("%d", fld_offset));
- if (i != names_size - 1) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString(","));
- }
- }
- /* Fourth, the titles */
- if (has_titles) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString("], 'titles':["));
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(fields, key);
- if (tup == NULL) {
- return 0;
- }
- title = Py_None;
- if (!PyArg_ParseTuple(tup, "Oi|O", &fld_dtype,
- &fld_offset, &title)) {
- PyErr_Clear();
- return 0;
- }
- PyUString_ConcatAndDel(&ret, PyObject_Repr(title));
- if (i != names_size - 1) {
- PyUString_ConcatAndDel(&ret, PyUString_FromString(","));
- }
- }
- }
- if (includealignedflag && (dtype->flags&NPY_ALIGNED_STRUCT)) {
- /* Finally, the itemsize/itemsize and aligned flag */
- PyUString_ConcatAndDel(&ret,
- PyUString_FromFormat("], 'itemsize':%d, 'aligned':True}",
- (int)dtype->elsize));
- }
- else {
- /* Finally, the itemsize/itemsize*/
- PyUString_ConcatAndDel(&ret,
- PyUString_FromFormat("], 'itemsize':%d}", (int)dtype->elsize));
- }
-
- return ret;
-}
-
-/* Produces a string representation for a structured dtype */
-static PyObject *
-arraydescr_struct_str(PyArray_Descr *dtype, int includealignflag)
-{
- PyObject *sub;
-
- /*
- * The list str representation can't include the 'align=' flag,
- * so if it is requested and the struct has the aligned flag set,
- * we must use the dict str instead.
- */
- if (!(includealignflag && (dtype->flags&NPY_ALIGNED_STRUCT)) &&
- is_dtype_struct_simple_unaligned_layout(dtype)) {
- sub = arraydescr_struct_list_str(dtype);
- }
- else {
- sub = arraydescr_struct_dict_str(dtype, includealignflag);
- }
-
- /* If the data type isn't the default, void, show it */
- if (dtype->typeobj != &PyVoidArrType_Type) {
- /*
- * Note: We cannot get the type name from dtype->typeobj->tp_name
- * because its value depends on whether the type is dynamically or
- * statically allocated. Instead use __name__ and __module__.
- * See https://docs.python.org/2/c-api/typeobj.html.
- */
-
- PyObject *str_name, *namestr, *str_module, *modulestr, *ret;
-
- str_name = PyUString_FromString("__name__");
- namestr = PyObject_GetAttr((PyObject*)(dtype->typeobj), str_name);
- Py_DECREF(str_name);
-
- if (namestr == NULL) {
- /* this should never happen since types always have __name__ */
- PyErr_Format(PyExc_RuntimeError,
- "dtype does not have a __name__ attribute");
- return NULL;
- }
-
- str_module = PyUString_FromString("__module__");
- modulestr = PyObject_GetAttr((PyObject*)(dtype->typeobj), str_module);
- Py_DECREF(str_module);
-
- ret = PyUString_FromString("(");
- if (modulestr != NULL) {
- /* Note: if modulestr == NULL, the type is unpicklable */
- PyUString_ConcatAndDel(&ret, modulestr);
- PyUString_ConcatAndDel(&ret, PyUString_FromString("."));
- }
- PyUString_ConcatAndDel(&ret, namestr);
- PyUString_ConcatAndDel(&ret, PyUString_FromString(", "));
- PyUString_ConcatAndDel(&ret, sub);
- PyUString_ConcatAndDel(&ret, PyUString_FromString(")"));
- return ret;
- }
- else {
- return sub;
- }
-}
-
-/* Produces a string representation for a subarray dtype */
-static PyObject *
-arraydescr_subarray_str(PyArray_Descr *dtype)
-{
- PyObject *p, *ret;
-
- ret = PyUString_FromString("(");
- p = arraydescr_construction_repr(dtype->subarray->base, 0, 1);
- PyUString_ConcatAndDel(&ret, p);
- PyUString_ConcatAndDel(&ret, PyUString_FromString(", "));
- PyUString_ConcatAndDel(&ret, PyObject_Str(dtype->subarray->shape));
- PyUString_ConcatAndDel(&ret, PyUString_FromString(")"));
-
- return ret;
-}
-
-static PyObject *
arraydescr_str(PyArray_Descr *dtype)
{
- PyObject *sub;
-
- if (PyDataType_HASFIELDS(dtype)) {
- sub = arraydescr_struct_str(dtype, 1);
- }
- else if (PyDataType_HASSUBARRAY(dtype)) {
- sub = arraydescr_subarray_str(dtype);
- }
- else if (PyDataType_ISFLEXIBLE(dtype) || !PyArray_ISNBO(dtype->byteorder)) {
- sub = arraydescr_protocol_typestr_get(dtype);
- }
- else {
- sub = arraydescr_typename_get(dtype);
- }
- return sub;
-}
-
-/*
- * The dtype repr function specifically for structured arrays.
- */
-static PyObject *
-arraydescr_struct_repr(PyArray_Descr *dtype)
-{
- PyObject *sub, *s;
-
- s = PyUString_FromString("dtype(");
- sub = arraydescr_struct_str(dtype, 0);
- if (sub == NULL) {
+ PyObject *_numpy_dtype;
+ PyObject *res;
+ _numpy_dtype = PyImport_ImportModule("numpy.core._dtype");
+ if (_numpy_dtype == NULL) {
return NULL;
}
-
- PyUString_ConcatAndDel(&s, sub);
-
- /* If it's an aligned structure, add the align=True parameter */
- if (dtype->flags&NPY_ALIGNED_STRUCT) {
- PyUString_ConcatAndDel(&s, PyUString_FromString(", align=True"));
- }
-
- PyUString_ConcatAndDel(&s, PyUString_FromString(")"));
- return s;
-}
-
-/* See descriptor.h for documentation */
-NPY_NO_EXPORT PyObject *
-arraydescr_construction_repr(PyArray_Descr *dtype, int includealignflag,
- int shortrepr)
-{
- PyObject *ret;
- PyArray_DatetimeMetaData *meta;
- char byteorder[2];
-
- if (PyDataType_HASFIELDS(dtype)) {
- return arraydescr_struct_str(dtype, includealignflag);
- }
- else if (PyDataType_HASSUBARRAY(dtype)) {
- return arraydescr_subarray_str(dtype);
- }
-
- /* Normalize byteorder to '<' or '>' */
- switch (dtype->byteorder) {
- case NPY_NATIVE:
- byteorder[0] = NPY_NATBYTE;
- break;
- case NPY_SWAP:
- byteorder[0] = NPY_OPPBYTE;
- break;
- case NPY_IGNORE:
- byteorder[0] = '\0';
- break;
- default:
- byteorder[0] = dtype->byteorder;
- break;
- }
- byteorder[1] = '\0';
-
- /* Handle booleans, numbers, and custom dtypes */
- if (dtype->type_num == NPY_BOOL) {
- if (shortrepr) {
- return PyUString_FromString("'?'");
- }
- else {
- return PyUString_FromString("'bool'");
- }
- }
- else if (PyTypeNum_ISNUMBER(dtype->type_num)) {
- /* Short repr with endianness, like '<f8' */
- if (shortrepr || (dtype->byteorder != NPY_NATIVE &&
- dtype->byteorder != NPY_IGNORE)) {
- return PyUString_FromFormat("'%s%c%d'", byteorder,
- (int)dtype->kind, dtype->elsize);
- }
- /* Longer repr, like 'float64' */
- else {
- char *kindstr;
- switch (dtype->kind) {
- case 'u':
- kindstr = "uint";
- break;
- case 'i':
- kindstr = "int";
- break;
- case 'f':
- kindstr = "float";
- break;
- case 'c':
- kindstr = "complex";
- break;
- default:
- PyErr_Format(PyExc_RuntimeError,
- "internal dtype repr error, unknown kind '%c'",
- (int)dtype->kind);
- return NULL;
- }
- return PyUString_FromFormat("'%s%d'", kindstr, 8*dtype->elsize);
- }
- }
- else if (PyTypeNum_ISUSERDEF(dtype->type_num)) {
- char *s = strrchr(dtype->typeobj->tp_name, '.');
- if (s == NULL) {
- return PyUString_FromString(dtype->typeobj->tp_name);
- }
- else {
- return PyUString_FromStringAndSize(s + 1, strlen(s) - 1);
- }
- }
-
- /* All the rest which don't fit in the same pattern */
- switch (dtype->type_num) {
- /*
- * The object reference may be different sizes on different
- * platforms, so it should never include the itemsize here.
- */
- case NPY_OBJECT:
- return PyUString_FromString("'O'");
-
- case NPY_STRING:
- if (PyDataType_ISUNSIZED(dtype)) {
- return PyUString_FromString("'S'");
- }
- else {
- return PyUString_FromFormat("'S%d'", (int)dtype->elsize);
- }
-
- case NPY_UNICODE:
- if (PyDataType_ISUNSIZED(dtype)) {
- return PyUString_FromFormat("'%sU'", byteorder);
- }
- else {
- return PyUString_FromFormat("'%sU%d'", byteorder,
- (int)dtype->elsize / 4);
- }
-
- case NPY_VOID:
- if (PyDataType_ISUNSIZED(dtype)) {
- return PyUString_FromString("'V'");
- }
- else {
- return PyUString_FromFormat("'V%d'", (int)dtype->elsize);
- }
-
- case NPY_DATETIME:
- meta = get_datetime_metadata_from_dtype(dtype);
- if (meta == NULL) {
- return NULL;
- }
- ret = PyUString_FromFormat("'%sM8", byteorder);
- ret = append_metastr_to_string(meta, 0, ret);
- PyUString_ConcatAndDel(&ret, PyUString_FromString("'"));
- return ret;
-
- case NPY_TIMEDELTA:
- meta = get_datetime_metadata_from_dtype(dtype);
- if (meta == NULL) {
- return NULL;
- }
- ret = PyUString_FromFormat("'%sm8", byteorder);
- ret = append_metastr_to_string(meta, 0, ret);
- PyUString_ConcatAndDel(&ret, PyUString_FromString("'"));
- return ret;
-
- default:
- PyErr_SetString(PyExc_RuntimeError, "Internal error: NumPy dtype "
- "unrecognized type number");
- return NULL;
- }
-}
-
-/*
- * The general dtype repr function.
- */
-static PyObject *
-arraydescr_repr(PyArray_Descr *dtype)
-{
- PyObject *ret;
-
- if (PyDataType_HASFIELDS(dtype)) {
- return arraydescr_struct_repr(dtype);
- }
- else {
- ret = PyUString_FromString("dtype(");
- PyUString_ConcatAndDel(&ret,
- arraydescr_construction_repr(dtype, 1, 0));
- PyUString_ConcatAndDel(&ret, PyUString_FromString(")"));
- return ret;
- }
+ res = PyObject_CallMethod(_numpy_dtype, "__str__", "O", dtype);
+ Py_DECREF(_numpy_dtype);
+ return res;
}
static PyObject *
@@ -3761,10 +3285,15 @@ _check_has_fields(PyArray_Descr *self)
{
if (!PyDataType_HASFIELDS(self)) {
PyObject *astr = arraydescr_str(self);
+ if (astr == NULL) {
+ return -1;
+ }
#if defined(NPY_PY3K)
- PyObject *bstr = PyUnicode_AsUnicodeEscapeString(astr);
- Py_DECREF(astr);
- astr = bstr;
+ {
+ PyObject *bstr = PyUnicode_AsUnicodeEscapeString(astr);
+ Py_DECREF(astr);
+ astr = bstr;
+ }
#endif
PyErr_Format(PyExc_KeyError,
"There are no fields in dtype %s.", PyBytes_AsString(astr));
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index f95041195..a5f3b8cdf 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -8,38 +8,12 @@ NPY_NO_EXPORT PyObject *
array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args);
NPY_NO_EXPORT PyArray_Descr *
-_arraydescr_fromobj(PyObject *obj);
+_arraydescr_from_dtype_attr(PyObject *obj);
NPY_NO_EXPORT int
is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype);
-/*
- * Creates a string repr of the dtype, excluding the 'dtype()' part
- * surrounding the object. This object may be a string, a list, or
- * a dict depending on the nature of the dtype. This
- * is the object passed as the first parameter to the dtype
- * constructor, and if no additional constructor parameters are
- * given, will reproduce the exact memory layout.
- *
- * If 'shortrepr' is non-zero, this creates a shorter repr using
- * 'kind' and 'itemsize', instead of the longer type name.
- *
- * If 'includealignflag' is true, this includes the 'align=True' parameter
- * inside the struct dtype construction dict when needed. Use this flag
- * if you want a proper repr string without the 'dtype()' part around it.
- *
- * If 'includealignflag' is false, this does not preserve the
- * 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
- * struct arrays like the regular repr does, because the 'align'
- * flag is not part of first dtype constructor parameter. This
- * mode is intended for a full 'repr', where the 'align=True' is
- * provided as the second parameter.
- */
-NPY_NO_EXPORT PyObject *
-arraydescr_construction_repr(PyArray_Descr *dtype, int includealignflag,
- int shortrepr);
-
extern NPY_NO_EXPORT char *_datetime_strings[];
#endif
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 2cb1e0a95..63b1ead25 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -26,6 +26,7 @@
#include "_datetime.h"
#include "datetime_strings.h"
#include "descriptor.h"
+#include "array_assign.h"
#include "shape.h"
#include "lowlevel_strided_loops.h"
@@ -51,6 +52,20 @@
#endif
/**********************************************/
+#if NPY_DT_DBG_TRACING
+/*
+ * Thin wrapper around print that ignores exceptions
+ */
+static void
+_safe_print(PyObject *obj)
+{
+ if (PyObject_Print(obj, stdout, 0) < 0) {
+ PyErr_Clear();
+ printf("<error during print>");
+ }
+}
+#endif
+
/*
* Returns a transfer function which DECREFs any references in src_type.
*
@@ -1042,9 +1057,9 @@ get_nbo_cast_datetime_transfer_function(int aligned,
#if NPY_DT_DBG_TRACING
printf("Dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ _safe_print((PyObject *)src_dtype);
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ _safe_print((PyObject *)dst_dtype);
printf("\n");
printf("has conversion fraction %lld/%lld\n", num, denom);
#endif
@@ -1089,9 +1104,9 @@ get_nbo_datetime_to_string_transfer_function(int aligned,
#if NPY_DT_DBG_TRACING
printf("Dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ _safe_print((PyObject *)src_dtype);
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ _safe_print((PyObject *)dst_dtype);
printf("\n");
#endif
@@ -1211,9 +1226,9 @@ get_nbo_string_to_datetime_transfer_function(int aligned,
#if NPY_DT_DBG_TRACING
printf("Dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ _safe_print((PyObject *)src_dtype);
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ _safe_print((PyObject *)dst_dtype);
printf("\n");
#endif
@@ -2965,6 +2980,10 @@ static void _strided_masked_wrapper_decsrcref_transfer_function(
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
+ if (N <= 0) {
+ break;
+ }
+
/* Process unmasked values */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 0);
@@ -3000,6 +3019,10 @@ static void _strided_masked_wrapper_transfer_function(
dst += subloopsize * dst_stride;
src += subloopsize * src_stride;
N -= subloopsize;
+ if (N <= 0) {
+ break;
+ }
+
/* Process unmasked values */
mask = (npy_bool*)npy_memchr((char *)mask, 0, mask_stride, N,
&subloopsize, 0);
@@ -3413,9 +3436,13 @@ PyArray_GetDTypeTransferFunction(int aligned,
#if NPY_DT_DBG_TRACING
printf("Calculating dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ if (PyObject_Print((PyObject *)src_dtype, stdout, 0) < 0) {
+ return NPY_FAIL;
+ }
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ if (PyObject_Print((PyObject *)dst_dtype, stdout, 0) < 0) {
+ return NPY_FAIL;
+ }
printf("\n");
#endif
@@ -3739,11 +3766,15 @@ PyArray_CastRawArrays(npy_intp count,
return NPY_SUCCEED;
}
- /* Check data alignment */
- aligned = (((npy_intp)src | src_stride) &
- (src_dtype->alignment - 1)) == 0 &&
- (((npy_intp)dst | dst_stride) &
- (dst_dtype->alignment - 1)) == 0;
+ /* Check data alignment, both uint and true */
+ aligned = raw_array_is_aligned(1, &count, dst, &dst_stride,
+ npy_uint_alignment(dst_dtype->elsize)) &&
+ raw_array_is_aligned(1, &count, dst, &dst_stride,
+ dst_dtype->alignment) &&
+ raw_array_is_aligned(1, &count, src, &src_stride,
+ npy_uint_alignment(src_dtype->elsize)) &&
+ raw_array_is_aligned(1, &count, src, &src_stride,
+ src_dtype->alignment);
/* Get the function to do the casting */
if (PyArray_GetDTypeTransferFunction(aligned,
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index a78bedccb..85ea49fb4 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -12,6 +12,7 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "array_assign.h"
#include "common.h"
@@ -64,7 +65,7 @@ PyArray_UpdateFlags(PyArrayObject *ret, int flagmask)
_UpdateContiguousFlags(ret);
}
if (flagmask & NPY_ARRAY_ALIGNED) {
- if (_IsAligned(ret)) {
+ if (IsAligned(ret)) {
PyArray_ENABLEFLAGS(ret, NPY_ARRAY_ALIGNED);
}
else {
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 925585704..a7c6b14f4 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -19,6 +19,7 @@
#include "arrayobject.h"
#include "ctors.h"
#include "lowlevel_strided_loops.h"
+#include "array_assign.h"
#include "item_selection.h"
#include "npy_sort.h"
@@ -44,7 +45,7 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
indices = NULL;
self = (PyArrayObject *)PyArray_CheckAxis(self0, &axis,
- NPY_ARRAY_CARRAY);
+ NPY_ARRAY_CARRAY_RO);
if (self == NULL) {
return NULL;
}
@@ -809,7 +810,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op);
npy_intp astride = PyArray_STRIDE(op, axis);
int swap = PyArray_ISBYTESWAPPED(op);
- int needcopy = !PyArray_ISALIGNED(op) || swap || astride != elsize;
+ int needcopy = !IsAligned(op) || swap || astride != elsize;
int hasrefs = PyDataType_REFCHK(PyArray_DESCR(op));
PyArray_CopySwapNFunc *copyswapn = PyArray_DESCR(op)->f->copyswapn;
@@ -937,7 +938,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op);
npy_intp astride = PyArray_STRIDE(op, axis);
int swap = PyArray_ISBYTESWAPPED(op);
- int needcopy = !PyArray_ISALIGNED(op) || swap || astride != elsize;
+ int needcopy = !IsAligned(op) || swap || astride != elsize;
int hasrefs = PyDataType_REFCHK(PyArray_DESCR(op));
int needidxbuffer;
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 3e3248f53..a3bc8e742 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -92,114 +92,6 @@ parse_index_entry(PyObject *op, npy_intp *step_size,
}
-/*
- * Parses an index that has no fancy indexing. Populates
- * out_dimensions, out_strides, and out_offset.
- */
-NPY_NO_EXPORT int
-parse_index(PyArrayObject *self, PyObject *op,
- npy_intp *out_dimensions,
- npy_intp *out_strides,
- npy_intp *out_offset,
- int check_index)
-{
- int i, j, n;
- int nd_old, nd_new, n_add, n_ellipsis;
- npy_intp n_steps, start, offset, step_size;
- PyObject *op1 = NULL;
- int is_slice;
-
- if (PySlice_Check(op) || op == Py_Ellipsis || op == Py_None) {
- n = 1;
- op1 = op;
- Py_INCREF(op);
- /* this relies on the fact that n==1 for loop below */
- is_slice = 1;
- }
- else {
- if (!PySequence_Check(op)) {
- PyErr_SetString(PyExc_IndexError,
- "index must be either an int "
- "or a sequence");
- return -1;
- }
- n = PySequence_Length(op);
- is_slice = 0;
- }
-
- nd_old = nd_new = 0;
-
- offset = 0;
- for (i = 0; i < n; i++) {
- if (!is_slice) {
- op1 = PySequence_GetItem(op, i);
- if (op1 == NULL) {
- return -1;
- }
- }
- start = parse_index_entry(op1, &step_size, &n_steps,
- nd_old < PyArray_NDIM(self) ?
- PyArray_DIMS(self)[nd_old] : 0,
- nd_old, check_index ?
- nd_old < PyArray_NDIM(self) : 0);
- Py_DECREF(op1);
- if (start == -1) {
- break;
- }
- if (n_steps == NEWAXIS_INDEX) {
- out_dimensions[nd_new] = 1;
- out_strides[nd_new] = 0;
- nd_new++;
- }
- else if (n_steps == ELLIPSIS_INDEX) {
- for (j = i + 1, n_ellipsis = 0; j < n; j++) {
- op1 = PySequence_GetItem(op, j);
- if (op1 == Py_None) {
- n_ellipsis++;
- }
- Py_DECREF(op1);
- }
- n_add = PyArray_NDIM(self)-(n-i-n_ellipsis-1+nd_old);
- if (n_add < 0) {
- PyErr_SetString(PyExc_IndexError, "too many indices");
- return -1;
- }
- for (j = 0; j < n_add; j++) {
- out_dimensions[nd_new] = PyArray_DIMS(self)[nd_old];
- out_strides[nd_new] = PyArray_STRIDES(self)[nd_old];
- nd_new++; nd_old++;
- }
- }
- else {
- if (nd_old >= PyArray_NDIM(self)) {
- PyErr_SetString(PyExc_IndexError, "too many indices");
- return -1;
- }
- offset += PyArray_STRIDES(self)[nd_old]*start;
- nd_old++;
- if (n_steps != SINGLE_INDEX) {
- out_dimensions[nd_new] = n_steps;
- out_strides[nd_new] = step_size *
- PyArray_STRIDES(self)[nd_old-1];
- nd_new++;
- }
- }
- }
- if (i < n) {
- return -1;
- }
- n_add = PyArray_NDIM(self)-nd_old;
- for (j = 0; j < n_add; j++) {
- out_dimensions[nd_new] = PyArray_DIMS(self)[nd_old];
- out_strides[nd_new] = PyArray_STRIDES(self)[nd_old];
- nd_new++;
- nd_old++;
- }
- *out_offset = offset;
- return nd_new;
-}
-
-
/*********************** Element-wise Array Iterator ***********************/
/* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/
/* and Python's array iterator ***/
diff --git a/numpy/core/src/multiarray/iterators.h b/numpy/core/src/multiarray/iterators.h
index 04f57c885..376dc154a 100644
--- a/numpy/core/src/multiarray/iterators.h
+++ b/numpy/core/src/multiarray/iterators.h
@@ -1,17 +1,6 @@
#ifndef _NPY_ARRAYITERATORS_H_
#define _NPY_ARRAYITERATORS_H_
-/*
- * Parses an index that has no fancy indexing. Populates
- * out_dimensions, out_strides, and out_offset.
- */
-NPY_NO_EXPORT int
-parse_index(PyArrayObject *self, PyObject *op,
- npy_intp *out_dimensions,
- npy_intp *out_strides,
- npy_intp *out_offset,
- int check_index);
-
NPY_NO_EXPORT PyObject
*iter_subscript(PyArrayIterObject *, PyObject *);
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index fa68af19a..16bacf1ab 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -10,7 +10,6 @@
#define PY_SSIZE_T_CLEAN
#include "Python.h"
-#include "structmember.h"
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
@@ -19,16 +18,7 @@
#include <numpy/halffloat.h>
#include "lowlevel_strided_loops.h"
-
-/* used for some alignment checks */
-#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
-/*
- * Disable harmless compiler warning "4116: unnamed type definition in
- * parentheses" which is caused by the _ALIGN macro.
- */
-#if defined(_MSC_VER)
-#pragma warning(disable:4116)
-#endif
+#include "array_assign.h"
/*
@@ -92,7 +82,7 @@
/**begin repeat
* #elsize = 1, 2, 4, 8, 16#
* #elsize_half = 0, 1, 2, 4, 8#
- * #type = npy_uint8, npy_uint16, npy_uint32, npy_uint64, npy_uint128#
+ * #type = npy_uint8, npy_uint16, npy_uint32, npy_uint64, npy_uint64#
*/
/**begin repeat1
* #oper = strided_to_strided, strided_to_contig,
@@ -129,10 +119,10 @@ static void
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *NPY_UNUSED(data))
{
-#if @is_aligned@ && @elsize@ != 16
+#if @is_aligned@
/* sanity check */
- assert(npy_is_aligned(dst, _ALIGN(@type@)));
- assert(npy_is_aligned(src, _ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@)));
#endif
/*printf("fn @prefix@_@oper@_size@elsize@\n");*/
while (N > 0) {
@@ -211,8 +201,8 @@ static NPY_GCC_OPT_3 void
}
#if @is_aligned@ && @elsize@ != 16
/* sanity check */
- assert(npy_is_aligned(dst, _ALIGN(@type@)));
- assert(npy_is_aligned(src, _ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@)));
#endif
#if @elsize@ == 1 && @dst_contig@
memset(dst, *src, N);
@@ -818,12 +808,8 @@ static NPY_GCC_OPT_3 void
#if @aligned@
/* sanity check */
-# if !@is_complex1@
- assert(npy_is_aligned(src, _ALIGN(_TYPE1)));
-# endif
-# if !@is_complex2@
- assert(npy_is_aligned(dst, _ALIGN(_TYPE2)));
-# endif
+ assert(N == 0 || npy_is_aligned(src, _ALIGN(_TYPE1)));
+ assert(N == 0 || npy_is_aligned(dst, _ALIGN(_TYPE2)));
#endif
/*printf("@prefix@_cast_@name1@_to_@name2@\n");*/
@@ -1371,7 +1357,7 @@ PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
*/
/*
- * Advanded indexing iteration of arrays when there is a single indexing
+ * Advanced indexing iteration of arrays when there is a single indexing
* array which has the same memory order as the value array and both
* can be trivially iterated (single stride, aligned, no casting necessary).
*/
@@ -1385,7 +1371,7 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
npy_intp itersize;
- int is_aligned = PyArray_ISALIGNED(self) && PyArray_ISALIGNED(result);
+ int is_aligned = IsUintAligned(self) && IsUintAligned(result);
int needs_api = PyDataType_REFCHK(PyArray_DESCR(self));
PyArray_CopySwapFunc *copyswap = PyArray_DESCR(self)->f->copyswap;
@@ -1405,7 +1391,7 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
/* Check the indices beforehand */
while (itersize--) {
npy_intp indval = *((npy_intp*)ind_ptr);
- if (check_and_adjust_index(&indval, fancy_dim, 1, _save) < 0 ) {
+ if (check_and_adjust_index(&indval, fancy_dim, 0, _save) < 0 ) {
return -1;
}
ind_ptr += ind_stride;
@@ -1435,9 +1421,9 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
while (itersize--) {
char * self_ptr;
npy_intp indval = *((npy_intp*)ind_ptr);
- assert(npy_is_aligned(ind_ptr, _ALIGN(npy_intp)));
+ assert(npy_is_aligned(ind_ptr, _UINT_ALIGN(npy_intp)));
#if @isget@
- if (check_and_adjust_index(&indval, fancy_dim, 1, _save) < 0 ) {
+ if (check_and_adjust_index(&indval, fancy_dim, 0, _save) < 0 ) {
return -1;
}
#else
@@ -1449,8 +1435,8 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
#if @isget@
#if @elsize@
- assert(npy_is_aligned(result_ptr, _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@)));
*(@copytype@ *)result_ptr = *(@copytype@ *)self_ptr;
#else
copyswap(result_ptr, self_ptr, 0, self);
@@ -1458,8 +1444,8 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
#else /* !@isget@ */
#if @elsize@
- assert(npy_is_aligned(result_ptr, _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@)));
*(@copytype@ *)self_ptr = *(@copytype@ *)result_ptr;
#else
copyswap(self_ptr, result_ptr, 0, self);
@@ -1518,7 +1504,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
* could also check extra_op is buffered, but it should rarely matter.
*/
- is_aligned = PyArray_ISALIGNED(array) && PyArray_ISALIGNED(mit->extra_op);
+ is_aligned = IsUintAligned(array) && IsUintAligned(mit->extra_op);
if (mit->size == 0) {
return 0;
@@ -1581,7 +1567,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
for (i=0; i < @numiter@; i++) {
npy_intp indval = *((npy_intp*)outer_ptrs[i]);
assert(npy_is_aligned(outer_ptrs[i],
- _ALIGN(npy_intp)));
+ _UINT_ALIGN(npy_intp)));
#if @isget@ && @one_iter@
if (check_and_adjust_index(&indval, fancy_dims[i],
@@ -1601,16 +1587,20 @@ mapiter_@name@(PyArrayMapIterObject *mit)
#if @isget@
#if @elsize@
- assert(npy_is_aligned(outer_ptrs[i], _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(outer_ptrs[i],
+ _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr,
+ _UINT_ALIGN(@copytype@)));
*(@copytype@ *)(outer_ptrs[i]) = *(@copytype@ *)self_ptr;
#else
copyswap(outer_ptrs[i], self_ptr, 0, array);
#endif
#else /* !@isget@ */
#if @elsize@
- assert(npy_is_aligned(outer_ptrs[i], _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(outer_ptrs[i],
+ _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr,
+ _UINT_ALIGN(@copytype@)));
*(@copytype@ *)self_ptr = *(@copytype@ *)(outer_ptrs[i]);
#else
copyswap(self_ptr, outer_ptrs[i], 0, array);
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 57b8f15c2..17edd2bbf 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -20,6 +20,7 @@
#include "lowlevel_strided_loops.h"
#include "item_selection.h"
#include "mem_overlap.h"
+#include "array_assign.h"
#define HAS_INTEGER 1
@@ -1063,7 +1064,8 @@ array_boolean_subscript(PyArrayObject *self,
/* Get a dtype transfer function */
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
- if (PyArray_GetDTypeTransferFunction(PyArray_ISALIGNED(self),
+ if (PyArray_GetDTypeTransferFunction(
+ IsUintAligned(self) && IsAligned(self),
fixed_strides[0], itemsize,
dtype, dtype,
0,
@@ -1252,7 +1254,8 @@ array_assign_boolean_subscript(PyArrayObject *self,
/* Get a dtype transfer function */
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
if (PyArray_GetDTypeTransferFunction(
- PyArray_ISALIGNED(self) && PyArray_ISALIGNED(v),
+ IsUintAligned(self) && IsAligned(self) &&
+ IsUintAligned(v) && IsAligned(v),
v_stride, fixed_strides[0],
PyArray_DESCR(v), PyArray_DESCR(self),
0,
@@ -1388,54 +1391,14 @@ array_subscript_asarray(PyArrayObject *self, PyObject *op)
}
/*
- * Helper function for _get_field_view which turns a multifield
- * view into a "packed" copy, as done in numpy 1.15 and before.
- * In numpy 1.16 this function should be removed.
- */
-NPY_NO_EXPORT int
-_multifield_view_to_copy(PyArrayObject **view) {
- static PyObject *copyfunc = NULL;
- PyObject *viewcopy;
-
- /* return a repacked copy of the view */
- npy_cache_import("numpy.lib.recfunctions", "repack_fields", &copyfunc);
- if (copyfunc == NULL) {
- goto view_fail;
- }
-
- PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
- viewcopy = PyObject_CallFunction(copyfunc, "O", *view);
- if (viewcopy == NULL) {
- goto view_fail;
- }
- Py_DECREF(*view);
- *view = (PyArrayObject*)viewcopy;
-
- /* warn when writing to the copy */
- PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
- return 0;
-
-view_fail:
- Py_DECREF(*view);
- *view = NULL;
- return 0;
-}
-
-/*
* Attempts to subscript an array using a field name or list of field names.
*
* If an error occurred, return 0 and set view to NULL. If the subscript is not
* a string or list of strings, return -1 and set view to NULL. Otherwise
* return 0 and set view to point to a new view into arr for the given fields.
- *
- * In numpy 1.15 and before, in the case of a list of field names the returned
- * view will actually be a copy by default, with fields packed together.
- * The `force_view` argument causes a view to be returned. This argument can be
- * removed in 1.16 when we plan to return a view always.
*/
NPY_NO_EXPORT int
-_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view,
- int force_view)
+_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
{
*view = NULL;
@@ -1596,11 +1559,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view,
return 0;
}
- /* the code below can be replaced by "return 0" in 1.16 */
- if (force_view) {
- return 0;
- }
- return _multifield_view_to_copy(view);
+ return 0;
}
return -1;
}
@@ -1628,7 +1587,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
/* return fields if op is a string index */
if (PyDataType_HASFIELDS(PyArray_DESCR(self))) {
PyArrayObject *view;
- int ret = _get_field_view(self, op, &view, 0);
+ int ret = _get_field_view(self, op, &view);
if (ret == 0){
if (view == NULL) {
return NULL;
@@ -1723,7 +1682,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
/* Check if the type is equivalent to INTP */
PyArray_ITEMSIZE(ind) == sizeof(npy_intp) &&
PyArray_DESCR(ind)->kind == 'i' &&
- PyArray_ISALIGNED(ind) &&
+ IsUintAligned(ind) &&
PyDataType_ISNOTSWAPPED(PyArray_DESCR(ind))) {
Py_INCREF(PyArray_DESCR(self));
@@ -1910,7 +1869,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
/* field access */
if (PyDataType_HASFIELDS(PyArray_DESCR(self))){
PyArrayObject *view;
- int ret = _get_field_view(self, ind, &view, 1);
+ int ret = _get_field_view(self, ind, &view);
if (ret == 0){
if (view == NULL) {
return -1;
@@ -2086,7 +2045,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
/* Check if the type is equivalent to INTP */
PyArray_ITEMSIZE(ind) == sizeof(npy_intp) &&
PyArray_DESCR(ind)->kind == 'i' &&
- PyArray_ISALIGNED(ind) &&
+ IsUintAligned(ind) &&
PyDataType_ISNOTSWAPPED(PyArray_DESCR(ind))) {
/* trivial_set checks the index for us */
@@ -2606,7 +2565,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit)
/* Check if the type is equivalent to INTP */
PyArray_ITEMSIZE(op) == sizeof(npy_intp) &&
PyArray_DESCR(op)->kind == 'i' &&
- PyArray_ISALIGNED(op) &&
+ IsUintAligned(op) &&
PyDataType_ISNOTSWAPPED(PyArray_DESCR(op))) {
char *data;
npy_intp stride;
@@ -2915,20 +2874,20 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
Py_INCREF(extra_op_dtype);
mit->extra_op_dtype = extra_op_dtype;
- /* Create an iterator, just to broadcast the arrays?! */
- tmp_iter = NpyIter_MultiNew(mit->numiter, index_arrays,
- NPY_ITER_ZEROSIZE_OK |
- NPY_ITER_REFS_OK |
- NPY_ITER_MULTI_INDEX |
- NPY_ITER_DONT_NEGATE_STRIDES,
- NPY_KEEPORDER,
- NPY_UNSAFE_CASTING,
- tmp_op_flags, NULL);
- if (tmp_iter == NULL) {
- goto fail;
- }
-
if (PyArray_SIZE(subspace) == 1) {
+ /* Create an iterator, just to broadcast the arrays?! */
+ tmp_iter = NpyIter_MultiNew(mit->numiter, index_arrays,
+ NPY_ITER_ZEROSIZE_OK |
+ NPY_ITER_REFS_OK |
+ NPY_ITER_MULTI_INDEX |
+ NPY_ITER_DONT_NEGATE_STRIDES,
+ NPY_KEEPORDER,
+ NPY_UNSAFE_CASTING,
+ tmp_op_flags, NULL);
+ if (tmp_iter == NULL) {
+ goto fail;
+ }
+
/*
* nditer allows itemsize with npy_intp type, so it works
* here, but it would *not* work directly, since elsize
@@ -2941,6 +2900,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
"internal error: failed to find output array strides");
goto fail;
}
+ NpyIter_Deallocate(tmp_iter);
}
else {
/* Just use C-order strides (TODO: allow also F-order) */
@@ -2950,7 +2910,6 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
stride *= mit->dimensions[i];
}
}
- NpyIter_Deallocate(tmp_iter);
/* shape is set, and strides is set up to mit->nd, set rest */
PyArray_CreateSortedStridePerm(PyArray_NDIM(subspace),
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index d6f2577a3..7c814e6e6 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -21,6 +21,7 @@
#include "conversion_utils.h"
#include "shape.h"
#include "strfuncs.h"
+#include "array_assign.h"
#include "methods.h"
#include "alloc.h"
@@ -186,7 +187,7 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
if (n <= 1) {
- if (PyTuple_GET_ITEM(args, 0) == Py_None) {
+ if (n != 0 && PyTuple_GET_ITEM(args, 0) == Py_None) {
return PyArray_View(self, NULL, NULL);
}
if (!PyArg_ParseTuple(args, "O&:reshape", PyArray_IntpConverter,
@@ -355,6 +356,7 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
PyObject *ret = NULL;
PyObject *safe;
static PyObject *checkfunc = NULL;
+ int self_elsize, typed_elsize;
/* check that we are not reinterpreting memory containing Objects. */
if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) {
@@ -372,6 +374,22 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
}
Py_DECREF(safe);
}
+ self_elsize = PyArray_ITEMSIZE(self);
+ typed_elsize = typed->elsize;
+
+ /* check that values are valid */
+ if (typed_elsize > self_elsize) {
+ PyErr_SetString(PyExc_ValueError, "new type is larger than original type");
+ return NULL;
+ }
+ if (offset < 0) {
+ PyErr_SetString(PyExc_ValueError, "offset is negative");
+ return NULL;
+ }
+ if (offset > self_elsize - typed_elsize) {
+ PyErr_SetString(PyExc_ValueError, "new type plus offset is larger than original type");
+ return NULL;
+ }
ret = PyArray_NewFromDescr_int(
Py_TYPE(self), typed,
@@ -970,15 +988,68 @@ array_getarray(PyArrayObject *self, PyObject *args)
}
}
+/*
+ * Check whether any of a set of input and output args have a non-default
+ * __array_ufunc__ method. Return 1 if so, 0 if not, and -1 on error.
+ *
+ * This function primarily exists to help ndarray.__array_ufunc__ determine
+ * whether it can support a ufunc (which is the case only if none of the
+ * operands have an override). Thus, unlike in umath/override.c, the
+ * actual overrides are not needed and one can stop looking once one is found.
+ */
+static int
+any_array_ufunc_overrides(PyObject *args, PyObject *kwds)
+{
+ int i;
+ int nin, nout;
+ PyObject *out_kwd_obj;
+ PyObject *fast;
+ PyObject **in_objs, **out_objs;
-static PyObject *
+ /* check inputs */
+ nin = PyTuple_Size(args);
+ if (nin < 0) {
+ return -1;
+ }
+ fast = PySequence_Fast(args, "Could not convert object to sequence");
+ if (fast == NULL) {
+ return -1;
+ }
+ in_objs = PySequence_Fast_ITEMS(fast);
+ for (i = 0; i < nin; ++i) {
+ if (PyUFunc_HasOverride(in_objs[i])) {
+ Py_DECREF(fast);
+ return 1;
+ }
+ }
+ Py_DECREF(fast);
+ /* check outputs, if any */
+ nout = PyUFuncOverride_GetOutObjects(kwds, &out_kwd_obj, &out_objs);
+ if (nout < 0) {
+ return -1;
+ }
+ for (i = 0; i < nout; i++) {
+ if (PyUFunc_HasOverride(out_objs[i])) {
+ Py_DECREF(out_kwd_obj);
+ return 1;
+ }
+ }
+ Py_DECREF(out_kwd_obj);
+ return 0;
+}
+
+
+NPY_NO_EXPORT PyObject *
array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
PyObject *ufunc, *method_name, *normal_args, *ufunc_method;
PyObject *result = NULL;
- int num_override_args;
+ int has_override;
+
+ assert(PyTuple_CheckExact(args));
+ assert(kwds == NULL || PyDict_CheckExact(kwds));
- if (PyTuple_Size(args) < 2) {
+ if (PyTuple_GET_SIZE(args) < 2) {
PyErr_SetString(PyExc_TypeError,
"__array_ufunc__ requires at least 2 arguments");
return NULL;
@@ -988,11 +1059,11 @@ array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
/* ndarray cannot handle overrides itself */
- num_override_args = PyUFunc_WithOverride(normal_args, kwds, NULL, NULL);
- if (num_override_args == -1) {
- return NULL;
+ has_override = any_array_ufunc_overrides(normal_args, kwds);
+ if (has_override < 0) {
+ goto cleanup;
}
- if (num_override_args) {
+ else if (has_override) {
result = Py_NotImplemented;
Py_INCREF(Py_NotImplemented);
goto cleanup;
@@ -1019,6 +1090,13 @@ cleanup:
static PyObject *
+array_function(PyArrayObject *self, PyObject *args, PyObject *kwds)
+{
+ NPY_FORWARD_NDARRAY_METHOD("_array_function");
+}
+
+
+static PyObject *
array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
NPY_ORDER order = NPY_CORDER;
@@ -1563,7 +1641,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
if (ret == NULL) {
return NULL;
}
- mod = PyImport_ImportModule("numpy.core.multiarray");
+ mod = PyImport_ImportModule("numpy.core._multiarray_umath");
if (mod == NULL) {
Py_DECREF(ret);
return NULL;
@@ -1591,6 +1669,8 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
Notice because Python does not describe a mechanism to write
raw data to the pickle, this performs a copy to a string first
+ This issue is now adressed in protocol 5, where a buffer is serialized
+ instead of a string,
*/
state = PyTuple_New(5);
@@ -1624,6 +1704,132 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
}
static PyObject *
+array_reduce_ex(PyArrayObject *self, PyObject *args)
+{
+ int protocol;
+ PyObject *ret = NULL, *numeric_mod = NULL, *from_buffer_func = NULL;
+ PyObject *buffer_tuple = NULL, *pickle_module = NULL, *pickle_class = NULL;
+ PyObject *class_args = NULL, *class_args_tuple = NULL, *unused = NULL;
+ PyObject *subclass_array_reduce = NULL;
+ PyObject *buffer = NULL, *transposed_array = NULL;
+ PyArray_Descr *descr = NULL;
+ char order;
+
+ if (PyArg_ParseTuple(args, "i", &protocol)){
+ descr = PyArray_DESCR(self);
+ if ((protocol < 5) ||
+ (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
+ !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) ||
+ PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
+ (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
+ ((PyObject*)self)->ob_type != &PyArray_Type) ||
+ PyDataType_ISUNSIZED(descr)) {
+ /* The PickleBuffer class from version 5 of the pickle protocol
+ * can only be used for arrays backed by a contiguous data buffer.
+ * For all other cases we fallback to the generic array_reduce
+ * method that involves using a temporary bytes allocation. However
+ * we do not call array_reduce directly but instead lookup and call
+ * the __reduce__ method to make sure that it's possible customize
+ * pickling in sub-classes. */
+ subclass_array_reduce = PyObject_GetAttrString((PyObject *)self,
+ "__reduce__");
+ return PyObject_CallObject(subclass_array_reduce, unused);
+ }
+ else if (protocol == 5){
+ ret = PyTuple_New(2);
+
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ /* if the python version is below 3.8, the pickle module does not provide
+ * built-in support for protocol 5. We try importing the pickle5
+ * backport instead */
+#if PY_VERSION_HEX >= 0x03080000
+ pickle_module = PyImport_ImportModule("pickle");
+#elif PY_VERSION_HEX < 0x03080000 && PY_VERSION_HEX >= 0x03060000
+ pickle_module = PyImport_ImportModule("pickle5");
+ if (pickle_module == NULL){
+ /* for protocol 5, raise a clear ImportError if pickle5 is not found
+ */
+ PyErr_SetString(PyExc_ImportError, "Using pickle protocol 5 "
+ "requires the pickle5 module for python versions >=3.6 "
+ "and <3.8");
+ return NULL;
+ }
+#else
+ PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
+ "for python versions < 3.6");
+ return NULL;
+#endif
+ if (pickle_module == NULL){
+ return NULL;
+ }
+
+ pickle_class = PyObject_GetAttrString(pickle_module,
+ "PickleBuffer");
+
+ class_args_tuple = PyTuple_New(1);
+ if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
+ PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)){
+
+ /* if the array if Fortran-contiguous and not C-contiguous,
+ * the PickleBuffer instance will hold a view on the transpose
+ * of the initial array, that is C-contiguous. */
+ order = 'F';
+ transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL);
+ PyTuple_SET_ITEM(class_args_tuple, 0, transposed_array);
+ }
+ else {
+ order = 'C';
+ PyTuple_SET_ITEM(class_args_tuple, 0, (PyObject *)self);
+ Py_INCREF(self);
+ }
+
+ class_args = Py_BuildValue("O", class_args_tuple);
+
+ buffer = PyObject_CallObject(pickle_class, class_args);
+
+ numeric_mod = PyImport_ImportModule("numpy.core.numeric");
+ if (numeric_mod == NULL) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+ from_buffer_func = PyObject_GetAttrString(numeric_mod,
+ "_frombuffer");
+ Py_DECREF(numeric_mod);
+
+ Py_INCREF(descr);
+
+ buffer_tuple = PyTuple_New(4);
+ PyTuple_SET_ITEM(buffer_tuple, 0, buffer);
+ PyTuple_SET_ITEM(buffer_tuple, 1, (PyObject *)descr);
+ PyTuple_SET_ITEM(buffer_tuple, 2,
+ PyObject_GetAttrString((PyObject *)self,
+ "shape"));
+ PyTuple_SET_ITEM(buffer_tuple, 3,
+ PyUnicode_FromStringAndSize(&order,
+ (Py_ssize_t)1));
+
+ PyTuple_SET_ITEM(ret, 0, from_buffer_func);
+ PyTuple_SET_ITEM(ret, 1, buffer_tuple);
+
+ return ret;
+ }
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "cannot call __reduce_ex__ with protocol >= %d",
+ 5);
+ return NULL;
+ }
+ }
+ else {
+ return NULL;
+ }
+
+}
+
+static PyObject *
array_setstate(PyArrayObject *self, PyObject *args)
{
PyObject *shape;
@@ -1775,11 +1981,11 @@ array_setstate(PyArrayObject *self, PyObject *args)
fa->data = datastr;
#ifndef NPY_PY3K
/* Check that the string is not interned */
- if (!_IsAligned(self) || swap || PyString_CHECK_INTERNED(rawdata)) {
+ if (!IsAligned(self) || swap || PyString_CHECK_INTERNED(rawdata)) {
#else
/* Bytes should always be considered immutable, but we just grab the
* pointer if they are large, to save memory. */
- if (!_IsAligned(self) || swap || (len <= 1000)) {
+ if (!IsAligned(self) || swap || (len <= 1000)) {
#endif
npy_intp num = PyArray_NBYTES(self);
fa->data = PyDataMem_NEW(num);
@@ -2271,7 +2477,7 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds)
if (PyObject_Not(align_flag)) {
PyArray_CLEARFLAGS(self, NPY_ARRAY_ALIGNED);
}
- else if (_IsAligned(self)) {
+ else if (IsAligned(self)) {
PyArray_ENABLEFLAGS(self, NPY_ARRAY_ALIGNED);
}
else {
@@ -2469,6 +2675,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
{"__array_ufunc__",
(PyCFunction)array_ufunc,
METH_VARARGS | METH_KEYWORDS, NULL},
+ {"__array_function__",
+ (PyCFunction)array_function,
+ METH_VARARGS | METH_KEYWORDS, NULL},
#ifndef NPY_PY3K
{"__unicode__",
@@ -2493,6 +2702,9 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
{"__reduce__",
(PyCFunction) array_reduce,
METH_VARARGS, NULL},
+ {"__reduce_ex__",
+ (PyCFunction) array_reduce_ex,
+ METH_VARARGS, NULL},
{"__setstate__",
(PyCFunction) array_setstate,
METH_VARARGS, NULL},
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index fe19cc9ad..8135769d9 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -19,6 +19,7 @@
#include "structmember.h"
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _UMATHMODULE
#define _MULTIARRAYMODULE
#include <numpy/npy_common.h>
#include "numpy/arrayobject.h"
@@ -54,7 +55,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#include "ctors.h"
#include "array_assign.h"
#include "common.h"
-#include "ufunc_override.h"
#include "multiarraymodule.h"
#include "cblasfuncs.h"
#include "vdot.h"
@@ -67,6 +67,17 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#include "get_attr_string.h"
/*
+ *****************************************************************************
+ ** INCLUDE GENERATED CODE **
+ *****************************************************************************
+ */
+#include "funcs.inc"
+#include "umathmodule.h"
+
+NPY_NO_EXPORT int initscalarmath(PyObject *);
+NPY_NO_EXPORT int set_matmul_flags(PyObject *d); /* in ufunc_object.c */
+
+/*
* global variable to determine if legacy printing is enabled, accessible from
* C. For simplicity the mode is encoded as an integer where '0' means no
* legacy mode, and '113' means 1.13 legacy mode. We can upgrade this if we
@@ -800,102 +811,6 @@ PyArray_CanCoerceScalar(int thistype, int neededtype,
return 0;
}
-/*
- * Make a new empty array, of the passed size, of a type that takes the
- * priority of ap1 and ap2 into account.
- *
- * If `out` is non-NULL, memory overlap is checked with ap1 and ap2, and an
- * updateifcopy temporary array may be returned. If `result` is non-NULL, the
- * output array to be returned (`out` if non-NULL and the newly allocated array
- * otherwise) is incref'd and put to *result.
- */
-static PyArrayObject *
-new_array_for_sum(PyArrayObject *ap1, PyArrayObject *ap2, PyArrayObject* out,
- int nd, npy_intp dimensions[], int typenum, PyArrayObject **result)
-{
- PyArrayObject *out_buf;
-
- if (out) {
- int d;
-
- /* verify that out is usable */
- if (PyArray_NDIM(out) != nd ||
- PyArray_TYPE(out) != typenum ||
- !PyArray_ISCARRAY(out)) {
- PyErr_SetString(PyExc_ValueError,
- "output array is not acceptable (must have the right datatype, "
- "number of dimensions, and be a C-Array)");
- return 0;
- }
- for (d = 0; d < nd; ++d) {
- if (dimensions[d] != PyArray_DIM(out, d)) {
- PyErr_SetString(PyExc_ValueError,
- "output array has wrong dimensions");
- return 0;
- }
- }
-
- /* check for memory overlap */
- if (!(solve_may_share_memory(out, ap1, 1) == 0 &&
- solve_may_share_memory(out, ap2, 1) == 0)) {
- /* allocate temporary output array */
- out_buf = (PyArrayObject *)PyArray_NewLikeArray(out, NPY_CORDER,
- NULL, 0);
- if (out_buf == NULL) {
- return NULL;
- }
-
- /* set copy-back */
- Py_INCREF(out);
- if (PyArray_SetWritebackIfCopyBase(out_buf, out) < 0) {
- Py_DECREF(out);
- Py_DECREF(out_buf);
- return NULL;
- }
- }
- else {
- Py_INCREF(out);
- out_buf = out;
- }
-
- if (result) {
- Py_INCREF(out);
- *result = out;
- }
-
- return out_buf;
- }
- else {
- PyTypeObject *subtype;
- double prior1, prior2;
- /*
- * Need to choose an output array that can hold a sum
- * -- use priority to determine which subtype.
- */
- if (Py_TYPE(ap2) != Py_TYPE(ap1)) {
- prior2 = PyArray_GetPriority((PyObject *)ap2, 0.0);
- prior1 = PyArray_GetPriority((PyObject *)ap1, 0.0);
- subtype = (prior2 > prior1 ? Py_TYPE(ap2) : Py_TYPE(ap1));
- }
- else {
- prior1 = prior2 = 0.0;
- subtype = Py_TYPE(ap1);
- }
-
- out_buf = (PyArrayObject *)PyArray_New(subtype, nd, dimensions,
- typenum, NULL, NULL, 0, 0,
- (PyObject *)
- (prior2 > prior1 ? ap2 : ap1));
-
- if (out_buf != NULL && result) {
- Py_INCREF(out_buf);
- *result = out_buf;
- }
-
- return out_buf;
- }
-}
-
/* Could perhaps be redone to not make contiguous arrays */
/*NUMPY_API
@@ -918,7 +833,10 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2)
typenum = PyArray_ObjectType(op2, typenum);
typec = PyArray_DescrFromType(typenum);
if (typec == NULL) {
- PyErr_SetString(PyExc_TypeError, "Cannot find a common data type.");
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "Cannot find a common data type.");
+ }
goto fail;
}
@@ -1004,7 +922,10 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
typenum = PyArray_ObjectType(op2, typenum);
typec = PyArray_DescrFromType(typenum);
if (typec == NULL) {
- PyErr_SetString(PyExc_TypeError, "Cannot find a common data type.");
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "Cannot find a common data type.");
+ }
return NULL;
}
@@ -1061,7 +982,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
for (i = 0; i < PyArray_NDIM(ap2) - 2; i++) {
dimensions[j++] = PyArray_DIMS(ap2)[i];
}
- if(PyArray_NDIM(ap2) > 1) {
+ if (PyArray_NDIM(ap2) > 1) {
dimensions[j++] = PyArray_DIMS(ap2)[PyArray_NDIM(ap2)-1];
}
@@ -1101,7 +1022,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ap2));
while (it1->index < it1->size) {
while (it2->index < it2->size) {
- dot(it1->dataptr, is1, it2->dataptr, is2, op, l, out_buf);
+ dot(it1->dataptr, is1, it2->dataptr, is2, op, l, NULL);
op += os;
PyArray_ITER_NEXT(it2);
}
@@ -1397,7 +1318,7 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode)
*/
if (inverted) {
st = _pyarray_revert(ret);
- if(st) {
+ if (st) {
goto clean_ret;
}
}
@@ -1444,7 +1365,7 @@ PyArray_Correlate(PyObject *op1, PyObject *op2, int mode)
}
ret = _pyarray_correlate(ap1, ap2, typenum, mode, &unused);
- if(ret == NULL) {
+ if (ret == NULL) {
goto fail;
}
Py_DECREF(ap1);
@@ -1733,7 +1654,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
}
full_path:
- if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd,
&op,
PyArray_DescrConverter2, &type,
PyArray_BoolConverter, &copy,
@@ -2129,6 +2050,7 @@ static PyObject *
array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
{
PyObject *file = NULL, *ret;
+ PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL;
char *sep = "";
Py_ssize_t nin = -1;
static char *kwlist[] = {"file", "dtype", "count", "sep", NULL};
@@ -2164,18 +2086,26 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
}
ret = PyArray_FromFile(fp, type, (npy_intp) nin, sep);
+ /* If an exception is thrown in the call to PyArray_FromFile
+ * we need to clear it, and restore it later to ensure that
+ * we can cleanup the duplicated file descriptor properly.
+ */
+ PyErr_Fetch(&err_type, &err_value, &err_traceback);
if (npy_PyFile_DupClose2(file, fp, orig_pos) < 0) {
+ npy_PyErr_ChainExceptions(err_type, err_value, err_traceback);
goto fail;
}
if (own && npy_PyFile_CloseFile(file) < 0) {
+ npy_PyErr_ChainExceptions(err_type, err_value, err_traceback);
goto fail;
}
+ PyErr_Restore(err_type, err_value, err_traceback);
Py_DECREF(file);
return ret;
fail:
Py_DECREF(file);
- Py_DECREF(ret);
+ Py_XDECREF(ret);
return NULL;
}
@@ -2388,154 +2318,6 @@ fail:
return NULL;
}
-
-
-/*
- * matmul
- *
- * Implements the protocol used by the '@' operator defined in PEP 364.
- * Not in the NUMPY API at this time, maybe later.
- *
- *
- * in1: Left hand side operand
- * in2: Right hand side operand
- * out: Either NULL, or an array into which the output should be placed.
- *
- * Returns NULL on error.
- */
-static PyObject *
-array_matmul(PyObject *NPY_UNUSED(m), PyObject *args, PyObject* kwds)
-{
- PyObject *in1, *in2, *out = NULL;
- char* kwlist[] = {"a", "b", "out", NULL };
- PyArrayObject *ap1, *ap2, *ret = NULL;
- NPY_ORDER order = NPY_KEEPORDER;
- NPY_CASTING casting = NPY_SAFE_CASTING;
- PyArray_Descr *dtype;
- int nd1, nd2, typenum;
- char *subscripts;
- PyArrayObject *ops[2];
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O:matmul", kwlist,
- &in1, &in2, &out)) {
- return NULL;
- }
-
- if (out != NULL) {
- if (out == Py_None) {
- out = NULL;
- }
- else if (!PyArray_Check(out)) {
- PyErr_SetString(PyExc_TypeError, "'out' must be an array");
- return NULL;
- }
- }
-
- dtype = PyArray_DescrFromObject(in1, NULL);
- dtype = PyArray_DescrFromObject(in2, dtype);
- if (dtype == NULL) {
- PyErr_SetString(PyExc_ValueError, "Cannot find a common data type.");
- return NULL;
- }
- typenum = dtype->type_num;
-
- if (typenum == NPY_OBJECT) {
- /* matmul is not currently implemented for object arrays */
- PyErr_SetString(PyExc_TypeError,
- "Object arrays are not currently supported");
- Py_DECREF(dtype);
- return NULL;
- }
-
- ap1 = (PyArrayObject *)PyArray_FromAny(in1, dtype, 0, 0,
- NPY_ARRAY_ALIGNED, NULL);
- if (ap1 == NULL) {
- return NULL;
- }
-
- Py_INCREF(dtype);
- ap2 = (PyArrayObject *)PyArray_FromAny(in2, dtype, 0, 0,
- NPY_ARRAY_ALIGNED, NULL);
- if (ap2 == NULL) {
- Py_DECREF(ap1);
- return NULL;
- }
-
- if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) {
- /* Scalars are rejected */
- PyErr_SetString(PyExc_ValueError,
- "Scalar operands are not allowed, use '*' instead");
- return NULL;
- }
-
- nd1 = PyArray_NDIM(ap1);
- nd2 = PyArray_NDIM(ap2);
-
-#if defined(HAVE_CBLAS)
- if (nd1 <= 2 && nd2 <= 2 &&
- (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum ||
- NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) {
- return cblas_matrixproduct(typenum, ap1, ap2, (PyArrayObject *)out);
- }
-#endif
-
- /*
- * Use einsum for the stacked cases. This is a quick implementation
- * to avoid setting up the proper iterators. Einsum broadcasts, so
- * we need to check dimensions before the call.
- */
- if (nd1 == 1 && nd2 == 1) {
- /* vector vector */
- if (PyArray_DIM(ap1, 0) != PyArray_DIM(ap2, 0)) {
- dot_alignment_error(ap1, 0, ap2, 0);
- goto fail;
- }
- subscripts = "i, i";
- }
- else if (nd1 == 1) {
- /* vector matrix */
- if (PyArray_DIM(ap1, 0) != PyArray_DIM(ap2, nd2 - 2)) {
- dot_alignment_error(ap1, 0, ap2, nd2 - 2);
- goto fail;
- }
- subscripts = "i, ...ij";
- }
- else if (nd2 == 1) {
- /* matrix vector */
- if (PyArray_DIM(ap1, nd1 - 1) != PyArray_DIM(ap2, 0)) {
- dot_alignment_error(ap1, nd1 - 1, ap2, 0);
- goto fail;
- }
- subscripts = "...i, i";
- }
- else {
- /* matrix * matrix */
- if (PyArray_DIM(ap1, nd1 - 1) != PyArray_DIM(ap2, nd2 - 2)) {
- dot_alignment_error(ap1, nd1 - 1, ap2, nd2 - 2);
- goto fail;
- }
- subscripts = "...ij, ...jk";
- }
- ops[0] = ap1;
- ops[1] = ap2;
- ret = PyArray_EinsteinSum(subscripts, 2, ops, NULL, order, casting,
- (PyArrayObject *)out);
- Py_DECREF(ap1);
- Py_DECREF(ap2);
-
- /* If no output was supplied, possibly convert to a scalar */
- if (ret != NULL && out == NULL) {
- return PyArray_Return((PyArrayObject *)ret);
- }
- return (PyObject *)ret;
-
-fail:
- Py_XDECREF(ap1);
- Py_XDECREF(ap2);
- return NULL;
-}
-
-
static int
einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts,
PyArrayObject **op)
@@ -2707,7 +2489,7 @@ einsum_sub_op_from_lists(PyObject *args,
"operand and a subscripts list to einsum");
return -1;
}
- else if(nop >= NPY_MAXARGS) {
+ else if (nop >= NPY_MAXARGS) {
PyErr_SetString(PyExc_ValueError, "too many operands");
return -1;
}
@@ -2942,7 +2724,7 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) {
static char *kwd[]= {"start", "stop", "step", "dtype", NULL};
PyArray_Descr *typecode = NULL;
- if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd,
&o_start,
&o_stop,
&o_step,
@@ -2980,7 +2762,7 @@ array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObje
{
static char *kwlist[] = {NULL};
- if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) {
return NULL;
}
return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() );
@@ -3053,7 +2835,7 @@ array_set_string_function(PyObject *NPY_UNUSED(self), PyObject *args,
int repr = 1;
static char *kwlist[] = {"f", "repr", NULL};
- if(!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) {
return NULL;
}
/* reset the array_repr function to built-in */
@@ -3075,7 +2857,7 @@ array_set_ops_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args),
{
PyObject *oldops = NULL;
- if ((oldops = PyArray_GetNumericOps()) == NULL) {
+ if ((oldops = _PyArray_GetNumericOps()) == NULL) {
return NULL;
}
/*
@@ -3085,8 +2867,10 @@ array_set_ops_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args),
*/
if (kwds && PyArray_SetNumericOps(kwds) == -1) {
Py_DECREF(oldops);
- PyErr_SetString(PyExc_ValueError,
+ if (PyErr_Occurred() == NULL) {
+ PyErr_SetString(PyExc_ValueError,
"one or more objects not callable");
+ }
return NULL;
}
return oldops;
@@ -3361,7 +3145,7 @@ array_promote_types(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyArray_Descr *d1 = NULL;
PyArray_Descr *d2 = NULL;
PyObject *ret = NULL;
- if(!PyArg_ParseTuple(args, "O&O&:promote_types",
+ if (!PyArg_ParseTuple(args, "O&O&:promote_types",
PyArray_DescrConverter2, &d1, PyArray_DescrConverter2, &d2)) {
goto finish;
}
@@ -3387,7 +3171,7 @@ array_min_scalar_type(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyArrayObject *array;
PyObject *ret = NULL;
- if(!PyArg_ParseTuple(args, "O:min_scalar_type", &array_in)) {
+ if (!PyArg_ParseTuple(args, "O:min_scalar_type", &array_in)) {
return NULL;
}
@@ -3464,7 +3248,7 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyArray_Descr *dtype;
PyArray_DatetimeMetaData *meta;
- if(!PyArg_ParseTuple(args, "O&:datetime_data",
+ if (!PyArg_ParseTuple(args, "O&:datetime_data",
PyArray_DescrConverter, &dtype)) {
return NULL;
}
@@ -3483,7 +3267,7 @@ new_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
int size;
- if(!PyArg_ParseTuple(args, "i:buffer", &size)) {
+ if (!PyArg_ParseTuple(args, "i:buffer", &size)) {
return NULL;
}
return PyBuffer_New(size);
@@ -4350,9 +4134,6 @@ static struct PyMethodDef array_module_methods[] = {
{"vdot",
(PyCFunction)array_vdot,
METH_VARARGS | METH_KEYWORDS, NULL},
- {"matmul",
- (PyCFunction)array_matmul,
- METH_VARARGS | METH_KEYWORDS, NULL},
{"c_einsum",
(PyCFunction)array_einsum,
METH_VARARGS|METH_KEYWORDS, NULL},
@@ -4441,7 +4222,7 @@ static struct PyMethodDef array_module_methods[] = {
"indicated by mask."},
{"bincount", (PyCFunction)arr_bincount,
METH_VARARGS | METH_KEYWORDS, NULL},
- {"digitize", (PyCFunction)arr_digitize,
+ {"_monotonicity", (PyCFunction)arr__monotonicity,
METH_VARARGS | METH_KEYWORDS, NULL},
{"interp", (PyCFunction)arr_interp,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -4461,6 +4242,18 @@ static struct PyMethodDef array_module_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode,
METH_VARARGS, NULL},
+ /* from umath */
+ {"frompyfunc",
+ (PyCFunction) ufunc_frompyfunc,
+ METH_VARARGS | METH_KEYWORDS, NULL},
+ {"seterrobj",
+ (PyCFunction) ufunc_seterr,
+ METH_VARARGS, NULL},
+ {"geterrobj",
+ (PyCFunction) ufunc_geterr,
+ METH_VARARGS, NULL},
+ {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc,
+ METH_VARARGS, NULL},
{NULL, NULL, 0, NULL} /* sentinel */
};
@@ -4478,9 +4271,6 @@ static struct PyMethodDef array_module_methods[] = {
static int
setup_scalartypes(PyObject *NPY_UNUSED(dict))
{
- initialize_casting_tables();
- initialize_numeric_types();
-
if (PyType_Ready(&PyBool_Type) < 0) {
return -1;
}
@@ -4716,11 +4506,10 @@ intern_strings(void)
npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
}
-
#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
- "multiarray",
+ "_multiarray_umath",
NULL,
-1,
array_module_methods,
@@ -4734,10 +4523,10 @@ static struct PyModuleDef moduledef = {
/* Initialization function for the module */
#if defined(NPY_PY3K)
#define RETVAL(x) x
-PyMODINIT_FUNC PyInit_multiarray(void) {
+PyMODINIT_FUNC PyInit__multiarray_umath(void) {
#else
#define RETVAL(x)
-PyMODINIT_FUNC initmultiarray(void) {
+PyMODINIT_FUNC init_multiarray_umath(void) {
#endif
PyObject *m, *d, *s;
PyObject *c_api;
@@ -4746,7 +4535,7 @@ PyMODINIT_FUNC initmultiarray(void) {
#if defined(NPY_PY3K)
m = PyModule_Create(&moduledef);
#else
- m = Py_InitModule("multiarray", array_module_methods);
+ m = Py_InitModule("_multiarray_umath", array_module_methods);
#endif
if (!m) {
goto err;
@@ -4780,6 +4569,25 @@ PyMODINIT_FUNC initmultiarray(void) {
* static structure slots with functions from the Python C_API.
*/
PyArray_Type.tp_hash = PyObject_HashNotImplemented;
+
+ if (PyType_Ready(&PyUFunc_Type) < 0) {
+ goto err;
+ }
+
+ /* Load the ufunc operators into the array module's namespace */
+ if (InitOperators(d) < 0) {
+ goto err;
+ }
+
+ if (set_matmul_flags(d) < 0) {
+ goto err;
+ }
+ initialize_casting_tables();
+ initialize_numeric_types();
+ if (initscalarmath(m) < 0) {
+ goto err;
+ }
+
if (PyType_Ready(&PyArray_Type) < 0) {
goto err;
}
@@ -4826,6 +4634,16 @@ PyMODINIT_FUNC initmultiarray(void) {
PyDict_SetItemString(d, "_ARRAY_API", c_api);
Py_DECREF(c_api);
+ c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL);
+ if (c_api == NULL) {
+ goto err;
+ }
+ PyDict_SetItemString(d, "_UFUNC_API", c_api);
+ Py_DECREF(c_api);
+ if (PyErr_Occurred()) {
+ goto err;
+ }
+
/*
* PyExc_Exception should catch all the standard errors that are
* now raised instead of the string exception "multiarray.error"
@@ -4902,7 +4720,9 @@ PyMODINIT_FUNC initmultiarray(void) {
if (set_typeinfo(d) != 0) {
goto err;
}
-
+ if (initumath(m) != 0) {
+ goto err;
+ }
return RETVAL(m);
err:
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index c56376f58..90cff4077 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -17,8 +17,7 @@
#include "arrayobject.h"
#include "templ_common.h"
-#include "mem_overlap.h"
-
+#include "array_assign.h"
/* Internal helper functions private to this file */
static int
@@ -1133,7 +1132,7 @@ npyiter_prepare_one_operand(PyArrayObject **op,
/* Check if the operand is aligned */
if (op_flags & NPY_ITER_ALIGNED) {
/* Check alignment */
- if (!PyArray_ISALIGNED(*op)) {
+ if (!IsAligned(*op)) {
NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST "
"because of NPY_ITER_ALIGNED\n");
*op_itflags |= NPY_OP_ITFLAG_CAST;
@@ -2852,8 +2851,14 @@ npyiter_allocate_arrays(NpyIter *iter,
npyiter_replace_axisdata(iter, iop, op[iop], ondim,
PyArray_DATA(op[iop]), op_axes ? op_axes[iop] : NULL);
- /* New arrays are aligned and need no cast */
- op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ /*
+ * New arrays are guaranteed true-aligned, but copy/cast code
+ * needs uint-alignment in addition.
+ */
+ if (IsUintAligned(out)) {
+ op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ }
+ /* New arrays need no cast */
op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST;
}
/*
@@ -2889,11 +2894,17 @@ npyiter_allocate_arrays(NpyIter *iter,
PyArray_DATA(op[iop]), NULL);
/*
- * New arrays are aligned need no cast, and in the case
+ * New arrays are guaranteed true-aligned, but copy/cast code
+ * needs uint-alignment in addition.
+ */
+ if (IsUintAligned(temp)) {
+ op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ }
+ /*
+ * New arrays need no cast, and in the case
* of scalars, always have stride 0 so never need buffering
*/
- op_itflags[iop] |= (NPY_OP_ITFLAG_ALIGNED |
- NPY_OP_ITFLAG_BUFNEVER);
+ op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER;
op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST;
if (itflags & NPY_ITFLAG_BUFFER) {
NBF_STRIDES(bufferdata)[iop] = 0;
@@ -2954,8 +2965,14 @@ npyiter_allocate_arrays(NpyIter *iter,
npyiter_replace_axisdata(iter, iop, op[iop], ondim,
PyArray_DATA(op[iop]), op_axes ? op_axes[iop] : NULL);
- /* The temporary copy is aligned and needs no cast */
- op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ /*
+ * New arrays are guaranteed true-aligned, but copy/cast code
+ * additionally needs uint-alignment in addition.
+ */
+ if (IsUintAligned(temp)) {
+ op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ }
+ /* The temporary copy needs no cast */
op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST;
}
else {
@@ -2975,7 +2992,7 @@ npyiter_allocate_arrays(NpyIter *iter,
* If the operand is aligned, any buffering can use aligned
* optimizations.
*/
- if (PyArray_ISALIGNED(op[iop])) {
+ if (IsUintAligned(op[iop])) {
op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
}
}
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 448d2d9c2..d153a8a64 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -15,6 +15,7 @@
#include "temp_elide.h"
#include "binop_override.h"
+#include "ufunc_override.h"
/*************************************************************************
**************** Implement Number Protocol ****************************
@@ -70,12 +71,8 @@ array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo
n_ops.op = temp; \
}
-
-/*NUMPY_API
- *Set internal structure with number functions that all arrays will use
- */
NPY_NO_EXPORT int
-PyArray_SetNumericOps(PyObject *dict)
+_PyArray_SetNumericOps(PyObject *dict)
{
PyObject *temp = NULL;
SET(add);
@@ -115,19 +112,32 @@ PyArray_SetNumericOps(PyObject *dict)
SET(minimum);
SET(rint);
SET(conjugate);
+ SET(matmul);
return 0;
}
-/* FIXME - macro contains goto */
+/*NUMPY_API
+ *Set internal structure with number functions that all arrays will use
+ */
+NPY_NO_EXPORT int
+PyArray_SetNumericOps(PyObject *dict)
+{
+ /* 2018-09-09, 1.16 */
+ if (DEPRECATE("PyArray_SetNumericOps is deprecated. Use "
+ "PyUFunc_ReplaceLoopBySignature to replace ufunc inner loop functions "
+ "instead.") < 0) {
+ return -1;
+ }
+ return _PyArray_SetNumericOps(dict);
+}
+
+/* Note - macro contains goto */
#define GET(op) if (n_ops.op && \
(PyDict_SetItemString(dict, #op, n_ops.op)==-1)) \
goto fail;
-/*NUMPY_API
- Get dictionary showing number functions that all arrays will use
-*/
NPY_NO_EXPORT PyObject *
-PyArray_GetNumericOps(void)
+_PyArray_GetNumericOps(void)
{
PyObject *dict;
if ((dict = PyDict_New())==NULL)
@@ -168,6 +178,7 @@ PyArray_GetNumericOps(void)
GET(minimum);
GET(rint);
GET(conjugate);
+ GET(matmul);
return dict;
fail:
@@ -175,6 +186,19 @@ PyArray_GetNumericOps(void)
return NULL;
}
+/*NUMPY_API
+ Get dictionary showing number functions that all arrays will use
+*/
+NPY_NO_EXPORT PyObject *
+PyArray_GetNumericOps(void)
+{
+ /* 2018-09-09, 1.16 */
+ if (DEPRECATE("PyArray_GetNumericOps is deprecated.") < 0) {
+ return NULL;
+ }
+ return _PyArray_GetNumericOps();
+}
+
static PyObject *
_get_keywords(int rtype, PyArrayObject *out)
{
@@ -360,14 +384,8 @@ array_divmod(PyArrayObject *m1, PyObject *m2)
static PyObject *
array_matrix_multiply(PyArrayObject *m1, PyObject *m2)
{
- static PyObject *matmul = NULL;
-
- npy_cache_import("numpy.core.multiarray", "matmul", &matmul);
- if (matmul == NULL) {
- return NULL;
- }
BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_matrix_multiply, array_matrix_multiply);
- return PyArray_GenericBinaryFunction(m1, m2, matmul);
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.matmul);
}
static PyObject *
@@ -550,6 +568,50 @@ array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo)
return value;
}
+static PyObject *
+array_positive(PyArrayObject *m1)
+{
+ /*
+ * For backwards compatibility, where + just implied a copy,
+ * we cannot just call n_ops.positive. Instead, we do the following
+ * 1. Try n_ops.positive
+ * 2. If we get an exception, check whether __array_ufunc__ is
+ * overridden; if so, we live in the future and we allow the
+ * TypeError to be passed on.
+ * 3. If not, give a deprecation warning and return a copy.
+ */
+ PyObject *value;
+ if (can_elide_temp_unary(m1)) {
+ value = PyArray_GenericInplaceUnaryFunction(m1, n_ops.positive);
+ }
+ else {
+ value = PyArray_GenericUnaryFunction(m1, n_ops.positive);
+ }
+ if (value == NULL) {
+ /*
+ * We first fetch the error, as it needs to be clear to check
+ * for the override. When the deprecation is removed,
+ * this whole stanza can be deleted.
+ */
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ if (PyUFunc_HasOverride((PyObject *)m1)) {
+ PyErr_Restore(exc, val, tb);
+ return NULL;
+ }
+ /* 2018-06-28, 1.16.0 */
+ if (DEPRECATE("Applying '+' to a non-numerical array is "
+ "ill-defined. Returning a copy, but in the future "
+ "this will error.") < 0) {
+ return NULL;
+ }
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+ value = PyArray_Return((PyArrayObject *)PyArray_Copy(m1));
+ }
+ return value;
+}
static PyObject *
array_negative(PyArrayObject *m1)
@@ -927,12 +989,6 @@ array_hex(PyArrayObject *v)
#endif
static PyObject *
-_array_copy_nice(PyArrayObject *self)
-{
- return PyArray_Return((PyArrayObject *) PyArray_Copy(self));
-}
-
-static PyObject *
array_index(PyArrayObject *v)
{
if (!PyArray_ISINTEGER(v) || PyArray_NDIM(v) != 0) {
@@ -955,7 +1011,7 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = {
(binaryfunc)array_divmod, /*nb_divmod*/
(ternaryfunc)array_power, /*nb_power*/
(unaryfunc)array_negative, /*nb_neg*/
- (unaryfunc)_array_copy_nice, /*nb_pos*/
+ (unaryfunc)array_positive, /*nb_pos*/
(unaryfunc)array_absolute, /*(unaryfunc)array_abs,*/
(inquiry)_array_nonzero, /*nb_nonzero*/
(unaryfunc)array_invert, /*nb_invert*/
diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h
index 99a2a722b..33a7cf872 100644
--- a/numpy/core/src/multiarray/number.h
+++ b/numpy/core/src/multiarray/number.h
@@ -39,6 +39,7 @@ typedef struct {
PyObject *minimum;
PyObject *rint;
PyObject *conjugate;
+ PyObject *matmul;
} NumericOps;
extern NPY_NO_EXPORT NumericOps n_ops;
@@ -48,10 +49,10 @@ NPY_NO_EXPORT PyObject *
array_int(PyArrayObject *v);
NPY_NO_EXPORT int
-PyArray_SetNumericOps(PyObject *dict);
+_PyArray_SetNumericOps(PyObject *dict);
NPY_NO_EXPORT PyObject *
-PyArray_GetNumericOps(void);
+_PyArray_GetNumericOps(void);
NPY_NO_EXPORT PyObject *
PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op);
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index 5ef6c0bbf..bc435d1ca 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -471,7 +471,7 @@ PyArray_DescrFromTypeObject(PyObject *type)
/* Do special thing for VOID sub-types */
if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) {
new = PyArray_DescrNewFromType(NPY_VOID);
- conv = _arraydescr_fromobj(type);
+ conv = _arraydescr_from_dtype_attr(type);
if (conv) {
new->fields = conv->fields;
Py_INCREF(new->fields);
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 0d7db2d8f..2f71c8ae9 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -539,19 +539,22 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen,
}
static PyObject *
+_void_scalar_repr(PyObject *obj) {
+ static PyObject *reprfunc = NULL;
+ npy_cache_import("numpy.core.arrayprint",
+ "_void_scalar_repr", &reprfunc);
+ if (reprfunc == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(reprfunc, "O", obj);
+}
+
+static PyObject *
voidtype_repr(PyObject *self)
{
PyVoidScalarObject *s = (PyVoidScalarObject*) self;
if (PyDataType_HASFIELDS(s->descr)) {
- static PyObject *reprfunc = NULL;
-
- npy_cache_import("numpy.core.arrayprint",
- "_void_scalar_repr", &reprfunc);
- if (reprfunc == NULL) {
- return NULL;
- }
-
- return PyObject_CallFunction(reprfunc, "O", self);
+ return _void_scalar_repr(self);
}
return _void_to_hex(s->obval, s->descr->elsize, "void(b'", "\\x", "')");
}
@@ -561,15 +564,7 @@ voidtype_str(PyObject *self)
{
PyVoidScalarObject *s = (PyVoidScalarObject*) self;
if (PyDataType_HASFIELDS(s->descr)) {
- static PyObject *reprfunc = NULL;
-
- npy_cache_import("numpy.core.arrayprint",
- "_void_scalar_repr", &reprfunc);
- if (reprfunc == NULL) {
- return NULL;
- }
-
- return PyObject_CallFunction(reprfunc, "O", self);
+ return _void_scalar_repr(self);
}
return _void_to_hex(s->obval, s->descr->elsize, "b'", "\\x", "'");
}
@@ -1109,8 +1104,7 @@ static PyNumberMethods gentype_as_number = {
(binaryfunc)gentype_add, /*nb_add*/
(binaryfunc)gentype_subtract, /*nb_subtract*/
(binaryfunc)gentype_multiply, /*nb_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(binaryfunc)gentype_divide, /*nb_divide*/
#endif
(binaryfunc)gentype_remainder, /*nb_remainder*/
@@ -1126,8 +1120,7 @@ static PyNumberMethods gentype_as_number = {
(binaryfunc)gentype_and, /*nb_and*/
(binaryfunc)gentype_xor, /*nb_xor*/
(binaryfunc)gentype_or, /*nb_or*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*nb_coerce*/
#endif
(unaryfunc)gentype_int, /*nb_int*/
@@ -1137,16 +1130,14 @@ static PyNumberMethods gentype_as_number = {
(unaryfunc)gentype_long, /*nb_long*/
#endif
(unaryfunc)gentype_float, /*nb_float*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(unaryfunc)gentype_oct, /*nb_oct*/
(unaryfunc)gentype_hex, /*nb_hex*/
#endif
0, /*inplace_add*/
0, /*inplace_subtract*/
0, /*inplace_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*inplace_divide*/
#endif
0, /*inplace_remainder*/
@@ -1161,6 +1152,10 @@ static PyNumberMethods gentype_as_number = {
0, /*nb_inplace_floor_divide*/
0, /*nb_inplace_true_divide*/
(unaryfunc)NULL, /*nb_index*/
+#if PY_VERSION_HEX >= 0x03050000
+ 0, /*np_matmul*/
+ 0, /*np_inplace_matmul*/
+#endif
};
@@ -1877,7 +1872,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
}
#endif
- mod = PyImport_ImportModule("numpy.core.multiarray");
+ mod = PyImport_ImportModule("numpy.core._multiarray_umath");
if (mod == NULL) {
return NULL;
}
@@ -3755,30 +3750,21 @@ static PyMappingMethods gentype_as_mapping = {
* #CNAME = FLOAT, DOUBLE, LONGDOUBLE#
*/
#if NPY_BITSOF_@CNAME@ == 16
-#define _THIS_SIZE2 "16"
-#define _THIS_SIZE1 "32"
+#define _THIS_SIZE "32"
#elif NPY_BITSOF_@CNAME@ == 32
-#define _THIS_SIZE2 "32"
-#define _THIS_SIZE1 "64"
+#define _THIS_SIZE "64"
#elif NPY_BITSOF_@CNAME@ == 64
-#define _THIS_SIZE2 "64"
-#define _THIS_SIZE1 "128"
+#define _THIS_SIZE "128"
#elif NPY_BITSOF_@CNAME@ == 80
-#define _THIS_SIZE2 "80"
-#define _THIS_SIZE1 "160"
+#define _THIS_SIZE "160"
#elif NPY_BITSOF_@CNAME@ == 96
-#define _THIS_SIZE2 "96"
-#define _THIS_SIZE1 "192"
+#define _THIS_SIZE "192"
#elif NPY_BITSOF_@CNAME@ == 128
-#define _THIS_SIZE2 "128"
-#define _THIS_SIZE1 "256"
+#define _THIS_SIZE "256"
#elif NPY_BITSOF_@CNAME@ == 256
-#define _THIS_SIZE2 "256"
-#define _THIS_SIZE1 "512"
+#define _THIS_SIZE "512"
#endif
-#define _THIS_DOC "Composed of two " _THIS_SIZE2 " bit floats"
-
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(0, 0)
@@ -3786,7 +3772,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
PyObject_HEAD_INIT(0)
0, /* ob_size */
#endif
- "numpy.@name@" _THIS_SIZE1, /* tp_name*/
+ "numpy.@name@" _THIS_SIZE, /* tp_name*/
sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
0, /* tp_itemsize*/
0, /* tp_dealloc*/
@@ -3809,7 +3795,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_setattro*/
0, /* tp_as_buffer*/
Py_TPFLAGS_DEFAULT, /* tp_flags*/
- _THIS_DOC, /* tp_doc */
+ 0, /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
@@ -3837,9 +3823,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_del */
0, /* tp_version_tag */
};
-#undef _THIS_SIZE1
-#undef _THIS_SIZE2
-#undef _THIS_DOC
+#undef _THIS_SIZE
/**end repeat**/
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 3ac71e285..30820737e 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -89,11 +89,19 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
return NULL;
}
+ if (PyArray_BASE(self) != NULL
+ || (((PyArrayObject_fields *)self)->weakreflist != NULL)) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot resize an array that "
+ "references or is referenced\n"
+ "by another array in this way. Use the np.resize function.");
+ return NULL;
+ }
if (refcheck) {
#ifdef PYPY_VERSION
PyErr_SetString(PyExc_ValueError,
"cannot resize an array with refcheck=True on PyPy.\n"
- "Use the resize function or refcheck=False");
+ "Use the np.resize function or refcheck=False");
return NULL;
#else
refcnt = PyArray_REFCOUNT(self);
@@ -102,13 +110,12 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
else {
refcnt = 1;
}
- if ((refcnt > 2)
- || (PyArray_BASE(self) != NULL)
- || (((PyArrayObject_fields *)self)->weakreflist != NULL)) {
+ if (refcnt > 2) {
PyErr_SetString(PyExc_ValueError,
"cannot resize an array that "
"references or is referenced\n"
- "by another array in this way. Use the resize function");
+ "by another array in this way.\n"
+ "Use the np.resize function or refcheck=False");
return NULL;
}
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index 3d2f976f2..09b948218 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -166,7 +166,7 @@ check_callers(int * cannot)
return 0;
}
/* get multiarray base address */
- if (dladdr(&PyArray_SetNumericOps, &info)) {
+ if (dladdr(&PyArray_INCREF, &info)) {
pos_ma_start = info.dli_fbase;
pos_ma_end = info.dli_fbase;
}
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index 8b5eef87a..d960838c8 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -568,13 +568,21 @@ int npy_get_floatstatus() {
/*
* Functions to set the floating point status word.
- * keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h
*/
#if (defined(__unix__) || defined(unix)) && !defined(USG)
#include <sys/param.h>
#endif
+
+/*
+ * Define floating point status functions. We must define
+ * npy_get_floatstatus_barrier, npy_clear_floatstatus_barrier,
+ * npy_set_floatstatus_{divbyzero, overflow, underflow, invalid}
+ * for all supported platforms.
+ */
+
+
/* Solaris --------------------------------------------------------*/
/* --------ignoring SunOS ieee_flags approach, someone else can
** deal with that! */
@@ -626,117 +634,94 @@ void npy_set_floatstatus_invalid(void)
fpsetsticky(FP_X_INV);
}
+#elif defined(_AIX)
+#include <float.h>
+#include <fpxcp.h>
-#elif defined(__GLIBC__) || defined(__APPLE__) || \
- defined(__CYGWIN__) || defined(__MINGW32__) || \
- (defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
-# include <fenv.h>
-
-int npy_get_floatstatus_barrier(char* param)
+int npy_get_floatstatus_barrier(char *param)
{
- int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
- FE_UNDERFLOW | FE_INVALID);
+ int fpstatus = fp_read_flag();
/*
* By using a volatile, the compiler cannot reorder this call
*/
if (param != NULL) {
volatile char NPY_UNUSED(c) = *(char*)param;
}
-
- return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
- ((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
- ((FE_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
- ((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
+ return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
+ ((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
+ ((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
+ ((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
int npy_clear_floatstatus_barrier(char * param)
{
- /* testing float status is 50-100 times faster than clearing on x86 */
int fpstatus = npy_get_floatstatus_barrier(param);
- if (fpstatus != 0) {
- feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
- FE_UNDERFLOW | FE_INVALID);
- }
+ fp_swap_flag(0);
return fpstatus;
}
-
void npy_set_floatstatus_divbyzero(void)
{
- feraiseexcept(FE_DIVBYZERO);
+ fp_raise_xcp(FP_DIV_BY_ZERO);
}
void npy_set_floatstatus_overflow(void)
{
- feraiseexcept(FE_OVERFLOW);
+ fp_raise_xcp(FP_OVERFLOW);
}
void npy_set_floatstatus_underflow(void)
{
- feraiseexcept(FE_UNDERFLOW);
+ fp_raise_xcp(FP_UNDERFLOW);
}
void npy_set_floatstatus_invalid(void)
{
- feraiseexcept(FE_INVALID);
-}
-
-#elif defined(_AIX)
-#include <float.h>
-#include <fpxcp.h>
-
-int npy_get_floatstatus_barrier(char *param)
-{
- int fpstatus = fp_read_flag();
- /*
- * By using a volatile, the compiler cannot reorder this call
- */
- if (param != NULL) {
- volatile char NPY_UNUSED(c) = *(char*)param;
- }
- return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
- ((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
- ((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
- ((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
+ fp_raise_xcp(FP_INVALID);
}
-int npy_clear_floatstatus_barrier(char * param)
-{
- int fpstatus = npy_get_floatstatus_barrier(param);
- fp_swap_flag(0);
+#elif defined(_MSC_VER) || (defined(__osf__) && defined(__alpha))
- return fpstatus;
-}
+/*
+ * By using a volatile floating point value,
+ * the compiler is forced to actually do the requested
+ * operations because of potential concurrency.
+ *
+ * We shouldn't write multiple values to a single
+ * global here, because that would cause
+ * a race condition.
+ */
+static volatile double _npy_floatstatus_x,
+ _npy_floatstatus_zero = 0.0, _npy_floatstatus_big = 1e300,
+ _npy_floatstatus_small = 1e-300, _npy_floatstatus_inf;
void npy_set_floatstatus_divbyzero(void)
{
- fp_raise_xcp(FP_DIV_BY_ZERO);
+ _npy_floatstatus_x = 1.0 / _npy_floatstatus_zero;
}
void npy_set_floatstatus_overflow(void)
{
- fp_raise_xcp(FP_OVERFLOW);
+ _npy_floatstatus_x = _npy_floatstatus_big * 1e300;
}
void npy_set_floatstatus_underflow(void)
{
- fp_raise_xcp(FP_UNDERFLOW);
+ _npy_floatstatus_x = _npy_floatstatus_small * 1e-300;
}
void npy_set_floatstatus_invalid(void)
{
- fp_raise_xcp(FP_INVALID);
+ _npy_floatstatus_inf = NPY_INFINITY;
+ _npy_floatstatus_x = _npy_floatstatus_inf - NPY_INFINITY;
}
-#else
-
/* MS Windows -----------------------------------------------------*/
#if defined(_MSC_VER)
#include <float.h>
-
int npy_get_floatstatus_barrier(char *param)
{
/*
@@ -796,53 +781,61 @@ int npy_clear_floatstatus_barrier(char *param)
return fpstatus;
}
+#endif
+/* End of defined(_MSC_VER) || (defined(__osf__) && defined(__alpha)) */
+
#else
+/* General GCC code, should work on most platforms */
+# include <fenv.h>
-int npy_get_floatstatus_barrier(char *NPY_UNUSED(param))
+int npy_get_floatstatus_barrier(char* param)
{
- return 0;
+ int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
+ FE_UNDERFLOW | FE_INVALID);
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
+
+ return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
+ ((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
+ ((FE_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
+ ((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus_barrier(char *param)
+int npy_clear_floatstatus_barrier(char * param)
{
+ /* testing float status is 50-100 times faster than clearing on x86 */
int fpstatus = npy_get_floatstatus_barrier(param);
- return 0;
-}
+ if (fpstatus != 0) {
+ feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
+ FE_UNDERFLOW | FE_INVALID);
+ }
-#endif
+ return fpstatus;
+}
-/*
- * By using a volatile floating point value,
- * the compiler is forced to actually do the requested
- * operations because of potential concurrency.
- *
- * We shouldn't write multiple values to a single
- * global here, because that would cause
- * a race condition.
- */
-static volatile double _npy_floatstatus_x,
- _npy_floatstatus_zero = 0.0, _npy_floatstatus_big = 1e300,
- _npy_floatstatus_small = 1e-300, _npy_floatstatus_inf;
void npy_set_floatstatus_divbyzero(void)
{
- _npy_floatstatus_x = 1.0 / _npy_floatstatus_zero;
+ feraiseexcept(FE_DIVBYZERO);
}
void npy_set_floatstatus_overflow(void)
{
- _npy_floatstatus_x = _npy_floatstatus_big * 1e300;
+ feraiseexcept(FE_OVERFLOW);
}
void npy_set_floatstatus_underflow(void)
{
- _npy_floatstatus_x = _npy_floatstatus_small * 1e-300;
+ feraiseexcept(FE_UNDERFLOW);
}
void npy_set_floatstatus_invalid(void)
{
- _npy_floatstatus_inf = NPY_INFINITY;
- _npy_floatstatus_x = _npy_floatstatus_inf - NPY_INFINITY;
+ feraiseexcept(FE_INVALID);
}
#endif
diff --git a/numpy/core/src/private/npy_sort.h b/numpy/core/src/private/npy_sort.h
deleted file mode 100644
index 8c6f05623..000000000
--- a/numpy/core/src/private/npy_sort.h
+++ /dev/null
@@ -1,204 +0,0 @@
-#ifndef __NPY_SORT_H__
-#define __NPY_SORT_H__
-
-/* Python include is for future object sorts */
-#include <Python.h>
-#include <numpy/npy_common.h>
-#include <numpy/ndarraytypes.h>
-
-#define NPY_ENOMEM 1
-#define NPY_ECOMP 2
-
-static NPY_INLINE int npy_get_msb(npy_uintp unum)
-{
- int depth_limit = 0;
- while (unum >>= 1) {
- depth_limit++;
- }
- return depth_limit;
-}
-
-int quicksort_bool(void *vec, npy_intp cnt, void *null);
-int heapsort_bool(void *vec, npy_intp cnt, void *null);
-int mergesort_bool(void *vec, npy_intp cnt, void *null);
-int aquicksort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_byte(void *vec, npy_intp cnt, void *null);
-int heapsort_byte(void *vec, npy_intp cnt, void *null);
-int mergesort_byte(void *vec, npy_intp cnt, void *null);
-int aquicksort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ubyte(void *vec, npy_intp cnt, void *null);
-int heapsort_ubyte(void *vec, npy_intp cnt, void *null);
-int mergesort_ubyte(void *vec, npy_intp cnt, void *null);
-int aquicksort_ubyte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ubyte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ubyte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_short(void *vec, npy_intp cnt, void *null);
-int heapsort_short(void *vec, npy_intp cnt, void *null);
-int mergesort_short(void *vec, npy_intp cnt, void *null);
-int aquicksort_short(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_short(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_short(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ushort(void *vec, npy_intp cnt, void *null);
-int heapsort_ushort(void *vec, npy_intp cnt, void *null);
-int mergesort_ushort(void *vec, npy_intp cnt, void *null);
-int aquicksort_ushort(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ushort(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ushort(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_int(void *vec, npy_intp cnt, void *null);
-int heapsort_int(void *vec, npy_intp cnt, void *null);
-int mergesort_int(void *vec, npy_intp cnt, void *null);
-int aquicksort_int(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_int(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_int(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_uint(void *vec, npy_intp cnt, void *null);
-int heapsort_uint(void *vec, npy_intp cnt, void *null);
-int mergesort_uint(void *vec, npy_intp cnt, void *null);
-int aquicksort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_long(void *vec, npy_intp cnt, void *null);
-int heapsort_long(void *vec, npy_intp cnt, void *null);
-int mergesort_long(void *vec, npy_intp cnt, void *null);
-int aquicksort_long(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_long(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_long(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ulong(void *vec, npy_intp cnt, void *null);
-int heapsort_ulong(void *vec, npy_intp cnt, void *null);
-int mergesort_ulong(void *vec, npy_intp cnt, void *null);
-int aquicksort_ulong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ulong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ulong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_longlong(void *vec, npy_intp cnt, void *null);
-int heapsort_longlong(void *vec, npy_intp cnt, void *null);
-int mergesort_longlong(void *vec, npy_intp cnt, void *null);
-int aquicksort_longlong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_longlong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_longlong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ulonglong(void *vec, npy_intp cnt, void *null);
-int heapsort_ulonglong(void *vec, npy_intp cnt, void *null);
-int mergesort_ulonglong(void *vec, npy_intp cnt, void *null);
-int aquicksort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_half(void *vec, npy_intp cnt, void *null);
-int heapsort_half(void *vec, npy_intp cnt, void *null);
-int mergesort_half(void *vec, npy_intp cnt, void *null);
-int aquicksort_half(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_half(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_half(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_float(void *vec, npy_intp cnt, void *null);
-int heapsort_float(void *vec, npy_intp cnt, void *null);
-int mergesort_float(void *vec, npy_intp cnt, void *null);
-int aquicksort_float(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_float(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_float(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_double(void *vec, npy_intp cnt, void *null);
-int heapsort_double(void *vec, npy_intp cnt, void *null);
-int mergesort_double(void *vec, npy_intp cnt, void *null);
-int aquicksort_double(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_double(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_double(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_longdouble(void *vec, npy_intp cnt, void *null);
-int heapsort_longdouble(void *vec, npy_intp cnt, void *null);
-int mergesort_longdouble(void *vec, npy_intp cnt, void *null);
-int aquicksort_longdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_longdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_longdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_cfloat(void *vec, npy_intp cnt, void *null);
-int heapsort_cfloat(void *vec, npy_intp cnt, void *null);
-int mergesort_cfloat(void *vec, npy_intp cnt, void *null);
-int aquicksort_cfloat(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_cfloat(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_cfloat(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_cdouble(void *vec, npy_intp cnt, void *null);
-int heapsort_cdouble(void *vec, npy_intp cnt, void *null);
-int mergesort_cdouble(void *vec, npy_intp cnt, void *null);
-int aquicksort_cdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_cdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_cdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_clongdouble(void *vec, npy_intp cnt, void *null);
-int heapsort_clongdouble(void *vec, npy_intp cnt, void *null);
-int mergesort_clongdouble(void *vec, npy_intp cnt, void *null);
-int aquicksort_clongdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_clongdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_clongdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_string(void *vec, npy_intp cnt, void *arr);
-int heapsort_string(void *vec, npy_intp cnt, void *arr);
-int mergesort_string(void *vec, npy_intp cnt, void *arr);
-int aquicksort_string(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int aheapsort_string(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int amergesort_string(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-
-
-int quicksort_unicode(void *vec, npy_intp cnt, void *arr);
-int heapsort_unicode(void *vec, npy_intp cnt, void *arr);
-int mergesort_unicode(void *vec, npy_intp cnt, void *arr);
-int aquicksort_unicode(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int aheapsort_unicode(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int amergesort_unicode(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-
-
-int quicksort_datetime(void *vec, npy_intp cnt, void *null);
-int heapsort_datetime(void *vec, npy_intp cnt, void *null);
-int mergesort_datetime(void *vec, npy_intp cnt, void *null);
-int aquicksort_datetime(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_datetime(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_datetime(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_timedelta(void *vec, npy_intp cnt, void *null);
-int heapsort_timedelta(void *vec, npy_intp cnt, void *null);
-int mergesort_timedelta(void *vec, npy_intp cnt, void *null);
-int aquicksort_timedelta(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_timedelta(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_timedelta(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int npy_quicksort(void *vec, npy_intp cnt, void *arr);
-int npy_heapsort(void *vec, npy_intp cnt, void *arr);
-int npy_mergesort(void *vec, npy_intp cnt, void *arr);
-int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-
-#endif
diff --git a/numpy/core/src/private/ufunc_override.c b/numpy/core/src/private/ufunc_override.c
deleted file mode 100644
index 116da3267..000000000
--- a/numpy/core/src/private/ufunc_override.c
+++ /dev/null
@@ -1,163 +0,0 @@
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-#define NO_IMPORT_ARRAY
-
-#include "npy_pycompat.h"
-#include "get_attr_string.h"
-#include "npy_import.h"
-
-#include "ufunc_override.h"
-
-/*
- * Check whether an object has __array_ufunc__ defined on its class and it
- * is not the default, i.e., the object is not an ndarray, and its
- * __array_ufunc__ is not the same as that of ndarray.
- *
- * Returns a new reference, the value of type(obj).__array_ufunc__
- *
- * If the __array_ufunc__ matches that of ndarray, or does not exist, return
- * NULL.
- *
- * Note that since this module is used with both multiarray and umath, we do
- * not have access to PyArray_Type and therewith neither to PyArray_CheckExact
- * nor to the default __array_ufunc__ method, so instead we import locally.
- * TODO: Can this really not be done more smartly?
- */
-static PyObject *
-get_non_default_array_ufunc(PyObject *obj)
-{
- static PyObject *ndarray = NULL;
- static PyObject *ndarray_array_ufunc = NULL;
- PyObject *cls_array_ufunc;
-
- /* on first entry, import and cache ndarray and its __array_ufunc__ */
- if (ndarray == NULL) {
- npy_cache_import("numpy.core.multiarray", "ndarray", &ndarray);
- ndarray_array_ufunc = PyObject_GetAttrString(ndarray,
- "__array_ufunc__");
- }
-
- /* Fast return for ndarray */
- if ((PyObject *)Py_TYPE(obj) == ndarray) {
- return NULL;
- }
- /* does the class define __array_ufunc__? */
- cls_array_ufunc = PyArray_LookupSpecial(obj, "__array_ufunc__");
- if (cls_array_ufunc == NULL) {
- return NULL;
- }
- /* is it different from ndarray.__array_ufunc__? */
- if (cls_array_ufunc != ndarray_array_ufunc) {
- return cls_array_ufunc;
- }
- Py_DECREF(cls_array_ufunc);
- return NULL;
-}
-
-/*
- * Check whether a set of input and output args have a non-default
- * `__array_ufunc__` method. Return the number of overrides, setting
- * corresponding objects in PyObject array with_override and the corresponding
- * __array_ufunc__ methods in methods (both only if not NULL, and both using
- * new references).
- *
- * returns -1 on failure.
- */
-NPY_NO_EXPORT int
-PyUFunc_WithOverride(PyObject *args, PyObject *kwds,
- PyObject **with_override, PyObject **methods)
-{
- int i;
-
- int nargs;
- int nout_kwd = 0;
- int out_kwd_is_tuple = 0;
- int num_override_args = 0;
-
- PyObject *obj;
- PyObject *out_kwd_obj = NULL;
- /*
- * Check inputs
- */
- if (!PyTuple_Check(args)) {
- PyErr_SetString(PyExc_TypeError,
- "Internal Numpy error: call to PyUFunc_HasOverride "
- "with non-tuple");
- goto fail;
- }
- nargs = PyTuple_GET_SIZE(args);
- if (nargs > NPY_MAXARGS) {
- PyErr_SetString(PyExc_TypeError,
- "Internal Numpy error: too many arguments in call "
- "to PyUFunc_HasOverride");
- goto fail;
- }
- /* be sure to include possible 'out' keyword argument. */
- if (kwds && PyDict_CheckExact(kwds)) {
- out_kwd_obj = PyDict_GetItemString(kwds, "out");
- if (out_kwd_obj != NULL) {
- out_kwd_is_tuple = PyTuple_CheckExact(out_kwd_obj);
- if (out_kwd_is_tuple) {
- nout_kwd = PyTuple_GET_SIZE(out_kwd_obj);
- }
- else {
- nout_kwd = 1;
- }
- }
- }
-
- for (i = 0; i < nargs + nout_kwd; ++i) {
- PyObject *method;
- if (i < nargs) {
- obj = PyTuple_GET_ITEM(args, i);
- }
- else {
- if (out_kwd_is_tuple) {
- obj = PyTuple_GET_ITEM(out_kwd_obj, i - nargs);
- }
- else {
- obj = out_kwd_obj;
- }
- }
- /*
- * Now see if the object provides an __array_ufunc__. However, we should
- * ignore the base ndarray.__ufunc__, so we skip any ndarray as well as
- * any ndarray subclass instances that did not override __array_ufunc__.
- */
- method = get_non_default_array_ufunc(obj);
- if (method != NULL) {
- if (method == Py_None) {
- PyErr_Format(PyExc_TypeError,
- "operand '%.200s' does not support ufuncs "
- "(__array_ufunc__=None)",
- obj->ob_type->tp_name);
- Py_DECREF(method);
- goto fail;
- }
- if (with_override != NULL) {
- Py_INCREF(obj);
- with_override[num_override_args] = obj;
- }
- if (methods != NULL) {
- methods[num_override_args] = method;
- }
- else {
- Py_DECREF(method);
- }
- ++num_override_args;
- }
- }
- return num_override_args;
-
-fail:
- if (methods != NULL) {
- for (i = 0; i < num_override_args; i++) {
- Py_DECREF(methods[i]);
- }
- }
- if (with_override != NULL) {
- for (i = 0; i < num_override_args; i++) {
- Py_DECREF(with_override[i]);
- }
- }
- return -1;
-}
diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h
deleted file mode 100644
index 2ed1c626f..000000000
--- a/numpy/core/src/private/ufunc_override.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __UFUNC_OVERRIDE_H
-#define __UFUNC_OVERRIDE_H
-
-#include "npy_config.h"
-
-/*
- * Check whether a set of input and output args have a non-default
- * `__array_ufunc__` method. Returns the number of overrides, setting
- * corresponding objects in PyObject array with_override (if not NULL).
- * returns -1 on failure.
- */
-NPY_NO_EXPORT int
-PyUFunc_WithOverride(PyObject *args, PyObject *kwds,
- PyObject **with_override, PyObject **methods);
-#endif
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index fcbdbe330..8cb74f177 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -128,6 +128,8 @@ static void
/**end repeat**/
char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)";
+/* for use with matrix_multiply code, but different signature */
+char *matmul_signature = "(m?,n),(n,p?)->(m?,p?)";
/**begin repeat
@@ -195,6 +197,45 @@ static void
/**end repeat**/
+char *cross1d_signature = "(3),(3)->(3)";
+
+/**begin repeat
+
+ #TYPE=LONG,DOUBLE#
+ #typ=npy_long, npy_double#
+*/
+
+/*
+ * This implements the cross product:
+ * out[n, 0] = in1[n, 1]*in2[n, 2] - in1[n, 2]*in2[n, 1]
+ * out[n, 1] = in1[n, 2]*in2[n, 0] - in1[n, 0]*in2[n, 2]
+ * out[n, 2] = in1[n, 0]*in2[n, 1] - in1[n, 1]*in2[n, 0]
+ */
+static void
+@TYPE@_cross1d(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_3
+ npy_intp is1=steps[0], is2=steps[1], os = steps[2];
+ BEGIN_OUTER_LOOP_3
+ @typ@ i1_x = *(@typ@ *)(args[0] + 0*is1);
+ @typ@ i1_y = *(@typ@ *)(args[0] + 1*is1);
+ @typ@ i1_z = *(@typ@ *)(args[0] + 2*is1);
+
+ @typ@ i2_x = *(@typ@ *)(args[1] + 0*is2);
+ @typ@ i2_y = *(@typ@ *)(args[1] + 1*is2);
+ @typ@ i2_z = *(@typ@ *)(args[1] + 2*is2);
+ char *op = args[2];
+
+ *(@typ@ *)op = i1_y * i2_z - i1_z * i2_y;
+ op += os;
+ *(@typ@ *)op = i1_z * i2_x - i1_x * i2_z;
+ op += os;
+ *(@typ@ *)op = i1_x * i2_y - i1_y * i2_x;
+ END_OUTER_LOOP
+}
+
+/**end repeat**/
+
char *euclidean_pdist_signature = "(n,d)->(p)";
/**begin repeat
@@ -285,17 +326,39 @@ static void
/**end repeat**/
+/* The following lines were generated using a slightly modified
+ version of code_generators/generate_umath.py and adding these
+ lines to defdict:
+
+defdict = {
+'inner1d' :
+ Ufunc(2, 1, None_,
+ r'''inner on the last dimension and broadcast on the rest \n"
+ " \"(i),(i)->()\" \n''',
+ TD('ld'),
+ ),
+'innerwt' :
+ Ufunc(3, 1, None_,
+ r'''inner1d with a weight argument \n"
+ " \"(i),(i),(i)->()\" \n''',
+ TD('ld'),
+ ),
+}
+
+*/
static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d };
-static void * inner1d_data[] = { (void *)NULL, (void *)NULL };
+static void *inner1d_data[] = { (void *)NULL, (void *)NULL };
static char inner1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction innerwt_functions[] = { LONG_innerwt, DOUBLE_innerwt };
-static void * innerwt_data[] = { (void *)NULL, (void *)NULL };
+static void *innerwt_data[] = { (void *)NULL, (void *)NULL };
static char innerwt_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction matrix_multiply_functions[] = { LONG_matrix_multiply, FLOAT_matrix_multiply, DOUBLE_matrix_multiply };
static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL };
static char matrix_multiply_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
-
+static PyUFuncGenericFunction cross1d_functions[] = { LONG_cross1d, DOUBLE_cross1d };
+static void *cross1d_data[] = { (void *)NULL, (void *)NULL };
+static char cross1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction euclidean_pdist_functions[] =
{ FLOAT_euclidean_pdist, DOUBLE_euclidean_pdist };
static void *eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL };
@@ -303,7 +366,7 @@ static char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT,
NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction cumsum_functions[] = { LONG_cumsum, DOUBLE_cumsum };
-static void * cumsum_data[] = { (void *)NULL, (void *)NULL };
+static void *cumsum_data[] = { (void *)NULL, (void *)NULL };
static char cumsum_signatures[] = { NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE };
@@ -346,6 +409,17 @@ addUfuncs(PyObject *dictionary) {
}
PyDict_SetItemString(dictionary, "matrix_multiply", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(matrix_multiply_functions,
+ matrix_multiply_data, matrix_multiply_signatures,
+ 3, 2, 1, PyUFunc_None, "matmul",
+ "matmul on last two dimensions, with some being optional\n"
+ " \"(m?,n),(n,p?)->(m?,p?)\" \n",
+ 0, matmul_signature);
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "matmul", f);
+ Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(euclidean_pdist_functions,
eucldiean_pdist_data, euclidean_pdist_signatures,
2, 1, 1, PyUFunc_None, "euclidean_pdist",
@@ -376,6 +450,16 @@ addUfuncs(PyObject *dictionary) {
}
PyDict_SetItemString(dictionary, "inner1d_no_doc", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(cross1d_functions, cross1d_data,
+ cross1d_signatures, 2, 2, 1, PyUFunc_None, "cross1d",
+ "cross product on the last dimension and broadcast on the rest \n"\
+ " \"(3),(3)->(3)\" \n",
+ 0, cross1d_signature);
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "cross1d", f);
+ Py_DECREF(f);
return 0;
}
@@ -385,9 +469,10 @@ static PyObject *
UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
int nin, nout, i;
- PyObject *signature, *sig_str;
- PyUFuncObject *f = NULL;
- PyObject *core_num_dims = NULL, *core_dim_ixs = NULL;
+ PyObject *signature=NULL, *sig_str=NULL;
+ PyUFuncObject *f=NULL;
+ PyObject *core_num_dims=NULL, *core_dim_ixs=NULL;
+ PyObject *core_dim_flags=NULL, *core_dim_sizes=NULL;
int core_enabled;
int core_num_ixs = 0;
@@ -442,7 +527,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
goto fail;
}
for (i = 0; i < core_num_ixs; i++) {
- PyObject * val = PyLong_FromLong(f->core_dim_ixs[i]);
+ PyObject *val = PyLong_FromLong(f->core_dim_ixs[i]);
PyTuple_SET_ITEM(core_dim_ixs, i, val);
}
}
@@ -450,13 +535,44 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
Py_INCREF(Py_None);
core_dim_ixs = Py_None;
}
+ if (f->core_dim_flags != NULL) {
+ core_dim_flags = PyTuple_New(f->core_num_dim_ix);
+ if (core_dim_flags == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < f->core_num_dim_ix; i++) {
+ PyObject *val = PyLong_FromLong(f->core_dim_flags[i]);
+ PyTuple_SET_ITEM(core_dim_flags, i, val);
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_dim_flags = Py_None;
+ }
+ if (f->core_dim_sizes != NULL) {
+ core_dim_sizes = PyTuple_New(f->core_num_dim_ix);
+ if (core_dim_sizes == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < f->core_num_dim_ix; i++) {
+ PyObject *val = PyLong_FromLong(f->core_dim_sizes[i]);
+ PyTuple_SET_ITEM(core_dim_sizes, i, val);
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_dim_sizes = Py_None;
+ }
Py_DECREF(f);
- return Py_BuildValue("iOO", core_enabled, core_num_dims, core_dim_ixs);
+ return Py_BuildValue("iOOOO", core_enabled, core_num_dims,
+ core_dim_ixs, core_dim_flags, core_dim_sizes);
fail:
Py_XDECREF(f);
Py_XDECREF(core_num_dims);
Py_XDECREF(core_dim_ixs);
+ Py_XDECREF(core_dim_flags);
+ Py_XDECREF(core_dim_sizes);
return NULL;
}
@@ -464,8 +580,8 @@ static PyMethodDef UMath_TestsMethods[] = {
{"test_signature", UMath_Tests_test_signature, METH_VARARGS,
"Test signature parsing of ufunc. \n"
"Arguments: nin nout signature \n"
- "If fails, it returns NULL. Otherwise it will returns 0 for scalar ufunc "
- "and 1 for generalized ufunc. \n",
+ "If fails, it returns NULL. Otherwise it returns a tuple of ufunc "
+ "internals. \n",
},
{NULL, NULL, 0, NULL} /* Sentinel */
};
@@ -504,6 +620,7 @@ PyMODINIT_FUNC init_umath_tests(void) {
if (m == NULL) {
return RETVAL(NULL);
}
+
import_array();
import_ufunc();
diff --git a/numpy/core/src/umath/cpuid.c b/numpy/core/src/umath/cpuid.c
index 912d51eeb..6744ceb05 100644
--- a/numpy/core/src/umath/cpuid.c
+++ b/numpy/core/src/umath/cpuid.c
@@ -1,13 +1,11 @@
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include <Python.h>
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
-#define NO_IMPORT_ARRAY
-
#include "cpuid.h"
#define XCR_XFEATURE_ENABLED_MASK 0x0
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
index 188054e22..aea1815e8 100644
--- a/numpy/core/src/umath/extobj.c
+++ b/numpy/core/src/umath/extobj.c
@@ -1,13 +1,11 @@
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include <Python.h>
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
-#define NO_IMPORT_ARRAY
-
#include "npy_pycompat.h"
#include "extobj.h"
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 0b02031a7..ae3ece77b 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1,14 +1,12 @@
/* -*- c -*- */
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include "Python.h"
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
-#define NO_IMPORT_ARRAY
-
#include "numpy/npy_common.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
@@ -1329,27 +1327,12 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- npy_bool give_future_warning = 0;
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
- const npy_bool res = in1 @OP@ in2;
- *((npy_bool *)op1) = res;
-
- if ((in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) && res) {
- give_future_warning = 1;
- }
- }
- if (give_future_warning) {
- NPY_ALLOW_C_API_DEF
- NPY_ALLOW_C_API;
- /* 2016-01-18, 1.11 */
- if (DEPRECATE_FUTUREWARNING(
- "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' "
- "will always be False.") < 0) {
- /* nothing to do, we return anyway */
- }
- NPY_DISABLE_C_API;
+ *((npy_bool *)op1) = (in1 @OP@ in2 &&
+ in1 != NPY_DATETIME_NAT &&
+ in2 != NPY_DATETIME_NAT);
}
}
/**end repeat1**/
@@ -1357,26 +1340,12 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- npy_bool give_future_warning = 0;
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
- *((npy_bool *)op1) = in1 != in2;
-
- if (in1 == NPY_DATETIME_NAT && in2 == NPY_DATETIME_NAT) {
- give_future_warning = 1;
- }
- }
- if (give_future_warning) {
- NPY_ALLOW_C_API_DEF
- NPY_ALLOW_C_API;
- /* 2016-01-18, 1.11 */
- if (DEPRECATE_FUTUREWARNING(
- "In the future, NAT != NAT will be True "
- "rather than False.") < 0) {
- /* nothing to do, we return anyway */
- }
- NPY_DISABLE_C_API;
+ *((npy_bool *)op1) = (in1 != in2 ||
+ in1 == NPY_DATETIME_NAT ||
+ in2 == NPY_DATETIME_NAT);
}
}
@@ -1622,6 +1591,34 @@ TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
}
}
+NPY_NO_EXPORT void
+TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const npy_timedelta in1 = *(npy_timedelta *)ip1;
+ const npy_timedelta in2 = *(npy_timedelta *)ip2;
+ if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+ *((npy_timedelta *)op1) = NPY_DATETIME_NAT;
+ }
+ else {
+ if (in2 == 0) {
+ npy_set_floatstatus_divbyzero();
+ *((npy_timedelta *)op1) = 0;
+ }
+ else {
+ /* handle mixed case the way Python does */
+ const npy_timedelta rem = in1 % in2;
+ if ((in1 > 0) == (in2 > 0) || rem == 0) {
+ *((npy_timedelta *)op1) = rem;
+ }
+ else {
+ *((npy_timedelta *)op1) = rem + in2;
+ }
+ }
+ }
+ }
+}
+
/*
*****************************************************************************
** FLOAT LOOPS **
@@ -1864,11 +1861,9 @@ NPY_NO_EXPORT void
if (!run_unary_reduce_simd_@kind@_@TYPE@(args, dimensions, steps)) {
BINARY_REDUCE_LOOP(@type@) {
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
}
- if (npy_isnan(io1)) {
- npy_set_floatstatus_invalid();
- }
*((@type@ *)iop1) = io1;
}
}
@@ -1876,13 +1871,12 @@ NPY_NO_EXPORT void
BINARY_LOOP {
@type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2;
- if (npy_isnan(in1)) {
- npy_set_floatstatus_invalid();
- }
*((@type@ *)op1) = in1;
}
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1897,6 +1891,7 @@ NPY_NO_EXPORT void
if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
io1 = (io1 @OP@ in2 || npy_isnan(in2)) ? io1 : in2;
}
*((@type@ *)iop1) = io1;
@@ -1905,6 +1900,7 @@ NPY_NO_EXPORT void
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
@@ -2230,6 +2226,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in1)) ? in1 : in2;
}
+ /* npy_half_isnan will never set floatstatus_invalid, so do not clear */
}
/**end repeat**/
@@ -2246,7 +2243,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
- npy_clear_floatstatus_barrier((char*)dimensions);
+ /* npy_half_isnan will never set floatstatus_invalid, so do not clear */
}
/**end repeat**/
@@ -2784,18 +2781,16 @@ NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
- const @ftype@ in1r = ((@ftype@ *)ip1)[0];
- const @ftype@ in1i = ((@ftype@ *)ip1)[1];
+ @ftype@ in1r = ((@ftype@ *)ip1)[0];
+ @ftype@ in1i = ((@ftype@ *)ip1)[1];
const @ftype@ in2r = ((@ftype@ *)ip2)[0];
const @ftype@ in2i = ((@ftype@ *)ip2)[1];
- if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in1r) || npy_isnan(in1i)) {
- ((@ftype@ *)op1)[0] = in1r;
- ((@ftype@ *)op1)[1] = in1i;
- }
- else {
- ((@ftype@ *)op1)[0] = in2r;
- ((@ftype@ *)op1)[1] = in2i;
+ if ( !(npy_isnan(in1r) || npy_isnan(in1i) || @OP@(in1r, in1i, in2r, in2i))) {
+ in1r = in2r;
+ in1i = in2i;
}
+ ((@ftype@ *)op1)[0] = in1r;
+ ((@ftype@ *)op1)[1] = in1i;
}
npy_clear_floatstatus_barrier((char*)dimensions);
}
@@ -2813,7 +2808,7 @@ NPY_NO_EXPORT void
const @ftype@ in1i = ((@ftype@ *)ip1)[1];
const @ftype@ in2r = ((@ftype@ *)ip2)[0];
const @ftype@ in2i = ((@ftype@ *)ip2)[1];
- if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in2r) || npy_isnan(in2i)) {
+ if (npy_isnan(in2r) || npy_isnan(in2i) || @OP@(in1r, in1i, in2r, in2i)) {
((@ftype@ *)op1)[0] = in1r;
((@ftype@ *)op1)[1] = in1i;
}
@@ -2822,6 +2817,7 @@ NPY_NO_EXPORT void
((@ftype@ *)op1)[1] = in2i;
}
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 5c2b2c22c..9b6327308 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -473,6 +473,9 @@ TIMEDELTA_md_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
NPY_NO_EXPORT void
TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
/* Special case equivalents to above functions */
#define TIMEDELTA_mq_m_true_divide TIMEDELTA_mq_m_divide
diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src
new file mode 100644
index 000000000..0cb3c82ad
--- /dev/null
+++ b/numpy/core/src/umath/matmul.c.src
@@ -0,0 +1,402 @@
+/* -*- c -*- */
+
+#define _UMATHMODULE
+#define _MULTIARRAYMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "Python.h"
+
+#include "npy_config.h"
+#include "numpy/npy_common.h"
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+#include "numpy/npy_math.h"
+#include "numpy/halffloat.h"
+#include "lowlevel_strided_loops.h"
+
+#include "npy_pycompat.h"
+
+#include "npy_cblas.h"
+#include "arraytypes.h" /* For TYPE_dot functions */
+
+#include <assert.h>
+
+/*
+ *****************************************************************************
+ ** BASICS **
+ *****************************************************************************
+ */
+
+/*
+ * -1 to be conservative, in case blas internally uses a for loop with an
+ * inclusive upper bound
+ */
+#define BLAS_MAXSIZE (NPY_MAX_INT - 1)
+
+/*
+ * Determine if a 2d matrix can be used by BLAS
+ * 1. Strides must not alias or overlap
+ * 2. The faster (second) axis must be contiguous
+ * 3. The slower (first) axis stride, in unit steps, must be larger than
+ * the faster axis dimension
+ */
+static NPY_INLINE npy_bool
+is_blasable2d(npy_intp byte_stride1, npy_intp byte_stride2,
+ npy_intp d1, npy_intp d2, npy_intp itemsize)
+{
+ npy_intp unit_stride1 = byte_stride1 / itemsize;
+ if (byte_stride2 != itemsize) {
+ return NPY_FALSE;
+ }
+ if ((byte_stride1 % itemsize ==0) &&
+ (unit_stride1 >= d2) &&
+ (unit_stride1 <= BLAS_MAXSIZE))
+ {
+ return NPY_TRUE;
+ }
+ return NPY_FALSE;
+}
+
+#if defined(HAVE_CBLAS)
+static const npy_cdouble oneD = {1.0, 0.0}, zeroD = {0.0, 0.0};
+static const npy_cfloat oneF = {1.0, 0.0}, zeroF = {0.0, 0.0};
+
+/**begin repeat
+ *
+ * #name = FLOAT, DOUBLE, CFLOAT, CDOUBLE#
+ * #ctype = npy_float, npy_double, npy_cfloat, npy_cdouble#
+ * #typ = npy_float, npy_double, npy_cfloat, npy_cdouble#
+ * #prefix = s, d, c, z#
+ * #step1 = 1.F, 1., &oneF, &oneD#
+ * #step0 = 0.F, 0., &zeroF, &zeroD#
+ */
+NPY_NO_EXPORT void
+@name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n,
+ void *ip2, npy_intp is2_n, npy_intp NPY_UNUSED(is2_p),
+ void *op, npy_intp op_m, npy_intp NPY_UNUSED(op_p),
+ npy_intp m, npy_intp n, npy_intp NPY_UNUSED(p))
+{
+ /*
+ * Vector matrix multiplication -- Level 2 BLAS
+ * arguments
+ * ip1: contiguous data, m*n shape
+ * ip2: data in c order, n*1 shape
+ * op: data in c order, m shape
+ */
+ enum CBLAS_ORDER order;
+ int M, N, lda;
+
+ assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE);
+ assert (is_blasable2d(is2_n, sizeof(@typ@), n, 1, sizeof(@typ@)));
+ M = (int)m;
+ N = (int)n;
+
+ if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
+ order = CblasColMajor;
+ lda = (int)(is1_m / sizeof(@typ@));
+ }
+ else {
+ /* If not ColMajor, caller should have ensured we are RowMajor */
+ /* will not assert in release mode */
+ order = CblasRowMajor;
+ assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
+ lda = (int)(is1_n / sizeof(@typ@));
+ }
+ cblas_@prefix@gemv(order, CblasTrans, N, M, @step1@, ip1, lda, ip2,
+ is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@));
+}
+
+NPY_NO_EXPORT void
+@name@_matmul_matrixmatrix(void *ip1, npy_intp is1_m, npy_intp is1_n,
+ void *ip2, npy_intp is2_n, npy_intp is2_p,
+ void *op, npy_intp os_m, npy_intp os_p,
+ npy_intp m, npy_intp n, npy_intp p)
+{
+ /*
+ * matrix matrix multiplication -- Level 3 BLAS
+ */
+ enum CBLAS_ORDER order = CblasRowMajor;
+ enum CBLAS_TRANSPOSE trans1, trans2;
+ int M, N, P, lda, ldb, ldc;
+ assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE && p <= BLAS_MAXSIZE);
+ M = (int)m;
+ N = (int)n;
+ P = (int)p;
+
+ assert(is_blasable2d(os_m, os_p, m, p, sizeof(@typ@)));
+ ldc = (int)(os_m / sizeof(@typ@));
+
+ if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
+ trans1 = CblasNoTrans;
+ lda = (int)(is1_m / sizeof(@typ@));
+ }
+ else {
+ /* If not ColMajor, caller should have ensured we are RowMajor */
+ /* will not assert in release mode */
+ assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
+ trans1 = CblasTrans;
+ lda = (int)(is1_n / sizeof(@typ@));
+ }
+
+ if (is_blasable2d(is2_n, is2_p, n, p, sizeof(@typ@))) {
+ trans2 = CblasNoTrans;
+ ldb = (int)(is2_n / sizeof(@typ@));
+ }
+ else {
+ /* If not ColMajor, caller should have ensured we are RowMajor */
+ /* will not assert in release mode */
+ assert(is_blasable2d(is2_p, is2_n, p, n, sizeof(@typ@)));
+ trans2 = CblasTrans;
+ ldb = (int)(is2_p / sizeof(@typ@));
+ }
+ /*
+ * Use syrk if we have a case of a matrix times its transpose.
+ * Otherwise, use gemm for all other cases.
+ */
+ if (
+ (ip1 == ip2) &&
+ (m == p) &&
+ (is1_m == is2_p) &&
+ (is1_n == is2_n) &&
+ (trans1 != trans2)
+ ) {
+ npy_intp i,j;
+ if (trans1 == CblasNoTrans) {
+ cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
+ ip1, lda, @step0@, op, ldc);
+ }
+ else {
+ cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
+ ip1, ldb, @step0@, op, ldc);
+ }
+ /* Copy the triangle */
+ for (i = 0; i < P; i++) {
+ for (j = i + 1; j < P; j++) {
+ ((@typ@*)op)[j * ldc + i] = ((@typ@*)op)[i * ldc + j];
+ }
+ }
+
+ }
+ else {
+ cblas_@prefix@gemm(order, trans1, trans2, M, P, N, @step1@, ip1, lda,
+ ip2, ldb, @step0@, op, ldc);
+ }
+}
+
+/**end repeat**/
+#endif
+
+/*
+ * matmul loops
+ * signature is (m?,n),(n,p?)->(m?,p?)
+ */
+
+/**begin repeat
+ * #TYPE = LONGDOUBLE,
+ * FLOAT, DOUBLE, HALF,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * BOOL#
+ * #typ = npy_longdouble,
+ * npy_float,npy_double,npy_half,
+ * npy_cfloat, npy_cdouble, npy_clongdouble,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_bool#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11#
+ * #IS_HALF = 0, 0, 0, 1, 0*14#
+ */
+
+NPY_NO_EXPORT void
+@TYPE@_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+ void *_ip2, npy_intp is2_n, npy_intp is2_p,
+ void *_op, npy_intp os_m, npy_intp os_p,
+ npy_intp dm, npy_intp dn, npy_intp dp)
+
+{
+ npy_intp m, n, p;
+ npy_intp ib1_n, ib2_n, ib2_p, ob_p;
+ char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+
+ ib1_n = is1_n * dn;
+ ib2_n = is2_n * dn;
+ ib2_p = is2_p * dp;
+ ob_p = os_p * dp;
+
+ for (m = 0; m < dm; m++) {
+ for (p = 0; p < dp; p++) {
+#if @IS_COMPLEX@ == 1
+ (*(@typ@ *)op).real = 0;
+ (*(@typ@ *)op).imag = 0;
+#elif @IS_HALF@
+ float sum = 0;
+#else
+ *(@typ@ *)op = 0;
+#endif
+ for (n = 0; n < dn; n++) {
+ @typ@ val1 = (*(@typ@ *)ip1);
+ @typ@ val2 = (*(@typ@ *)ip2);
+#if @IS_HALF@
+ sum += npy_half_to_float(val1) * npy_half_to_float(val2);
+#elif @IS_COMPLEX@ == 1
+ (*(@typ@ *)op).real += (val1.real * val2.real) -
+ (val1.imag * val2.imag);
+ (*(@typ@ *)op).imag += (val1.real * val2.imag) +
+ (val1.imag * val2.real);
+#else
+ *(@typ@ *)op += val1 * val2;
+#endif
+ ip2 += is2_n;
+ ip1 += is1_n;
+ }
+#if @IS_HALF@
+ *(@typ@ *)op = npy_float_to_half(sum);
+#endif
+ ip1 -= ib1_n;
+ ip2 -= ib2_n;
+ op += os_p;
+ ip2 += is2_p;
+ }
+ op -= ob_p;
+ ip2 -= ib2_p;
+ ip1 += is1_m;
+ op += os_m;
+ }
+}
+
+/**end repeat**/
+
+/**begin repeat
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * BOOL#
+ * #typ = npy_float,npy_double,npy_longdouble, npy_half,
+ * npy_cfloat, npy_cdouble, npy_clongdouble,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_bool#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*11#
+ * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*12#
+ */
+
+
+NPY_NO_EXPORT void
+@TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ npy_intp dOuter = *dimensions++;
+ npy_intp iOuter;
+ npy_intp s0 = *steps++;
+ npy_intp s1 = *steps++;
+ npy_intp s2 = *steps++;
+ npy_intp dm = dimensions[0];
+ npy_intp dn = dimensions[1];
+ npy_intp dp = dimensions[2];
+ npy_intp is1_m=steps[0], is1_n=steps[1], is2_n=steps[2], is2_p=steps[3],
+ os_m=steps[4], os_p=steps[5];
+#if @USEBLAS@ && defined(HAVE_CBLAS)
+ npy_intp sz = sizeof(@typ@);
+ npy_bool special_case = (dm == 1 || dn == 1 || dp == 1);
+ npy_bool any_zero_dim = (dm == 0 || dn == 0 || dp == 0);
+ npy_bool scalar_out = (dm == 1 && dp == 1);
+ npy_bool scalar_vec = (dn == 1 && (dp == 1 || dm == 1));
+ npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE ||
+ dp > BLAS_MAXSIZE);
+ npy_bool i1_c_blasable = is_blasable2d(is1_m, is1_n, dm, dn, sz);
+ npy_bool i2_c_blasable = is_blasable2d(is2_n, is2_p, dn, dp, sz);
+ npy_bool i1_f_blasable = is_blasable2d(is1_n, is1_m, dn, dm, sz);
+ npy_bool i2_f_blasable = is_blasable2d(is2_p, is2_n, dp, dn, sz);
+ npy_bool i1blasable = i1_c_blasable || i1_f_blasable;
+ npy_bool i2blasable = i2_c_blasable || i2_f_blasable;
+ npy_bool o_c_blasable = is_blasable2d(os_m, os_p, dm, dp, sz);
+ npy_bool o_f_blasable = is_blasable2d(os_p, os_m, dp, dm, sz);
+ npy_bool vector_matrix = ((dm == 1) && i2blasable &&
+ is_blasable2d(is1_n, sz, dn, 1, sz));
+ npy_bool matrix_vector = ((dp == 1) && i1blasable &&
+ is_blasable2d(is2_n, sz, dn, 1, sz));
+#endif
+
+ for (iOuter = 0; iOuter < dOuter; iOuter++,
+ args[0] += s0, args[1] += s1, args[2] += s2) {
+ void *ip1=args[0], *ip2=args[1], *op=args[2];
+#if @USEBLAS@ && defined(HAVE_CBLAS)
+ /*
+ * TODO: refactor this out to a inner_loop_selector, in
+ * PyUFunc_MatmulLoopSelector. But that call does not have access to
+ * n, m, p and strides.
+ */
+ if (too_big_for_blas || any_zero_dim) {
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ }
+ else if (special_case) {
+ /* Special case variants that have a 1 in the core dimensions */
+ if (scalar_out) {
+ /* row @ column, 1,1 output */
+ @TYPE@_dot(ip1, is1_n, ip2, is2_n, op, dn, NULL);
+ } else if (scalar_vec){
+ /*
+ * 1,1d @ vector or vector @ 1,1d
+ * could use cblas_Xaxy, but that requires 0ing output
+ * and would not be faster (XXX prove it)
+ */
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ } else if (vector_matrix) {
+ /* vector @ matrix, switch ip1, ip2, p and m */
+ @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, is1_m,
+ op, os_p, os_m, dp, dn, dm);
+ } else if (matrix_vector) {
+ /* matrix @ vector */
+ @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, is2_p,
+
+ op, os_m, os_p, dm, dn, dp);
+ } else {
+ /* column @ row, 2d output, no blas needed or non-blas-able input */
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ }
+ } else {
+ /* matrix @ matrix */
+ if (i1blasable && i2blasable && o_c_blasable) {
+ @TYPE@_matmul_matrixmatrix(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p,
+ dm, dn, dp);
+ } else if (i1blasable && i2blasable && o_f_blasable) {
+ /*
+ * Use transpose equivalence:
+ * matmul(a, b, o) == matmul(b.T, a.T, o.T)
+ */
+ @TYPE@_matmul_matrixmatrix(ip2, is2_p, is2_n,
+ ip1, is1_n, is1_m,
+ op, os_p, os_m,
+ dp, dn, dm);
+ } else {
+ /*
+ * If parameters are castable to int and we copy the
+ * non-blasable (or non-ccontiguous output)
+ * we could still use BLAS, see gh-12365.
+ */
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ }
+ }
+#else
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+
+#endif
+ }
+}
+
+/**end repeat**/
+
+
diff --git a/numpy/core/src/umath/matmul.h.src b/numpy/core/src/umath/matmul.h.src
new file mode 100644
index 000000000..16be7675b
--- /dev/null
+++ b/numpy/core/src/umath/matmul.h.src
@@ -0,0 +1,12 @@
+/**begin repeat
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * BOOL#
+ **/
+NPY_NO_EXPORT void
+@TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat**/
+
+
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index 4a381ba12..2ea23311b 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -5,8 +5,98 @@
#include "numpy/ufuncobject.h"
#include "npy_import.h"
-#include "ufunc_override.h"
#include "override.h"
+#include "ufunc_override.h"
+
+/*
+ * For each positional argument and each argument in a possible "out"
+ * keyword, look for overrides of the standard ufunc behaviour, i.e.,
+ * non-default __array_ufunc__ methods.
+ *
+ * Returns the number of overrides, setting corresponding objects
+ * in PyObject array ``with_override`` and the corresponding
+ * __array_ufunc__ methods in ``methods`` (both using new references).
+ *
+ * Only the first override for a given class is returned.
+ *
+ * Returns -1 on failure.
+ */
+static int
+get_array_ufunc_overrides(PyObject *args, PyObject *kwds,
+ PyObject **with_override, PyObject **methods)
+{
+ int i;
+ int num_override_args = 0;
+ int narg, nout = 0;
+ PyObject *out_kwd_obj;
+ PyObject **arg_objs, **out_objs;
+
+ narg = PyTuple_Size(args);
+ if (narg < 0) {
+ return -1;
+ }
+ arg_objs = PySequence_Fast_ITEMS(args);
+
+ nout = PyUFuncOverride_GetOutObjects(kwds, &out_kwd_obj, &out_objs);
+ if (nout < 0) {
+ return -1;
+ }
+
+ for (i = 0; i < narg + nout; ++i) {
+ PyObject *obj;
+ int j;
+ int new_class = 1;
+
+ if (i < narg) {
+ obj = arg_objs[i];
+ }
+ else {
+ obj = out_objs[i - narg];
+ }
+ /*
+ * Have we seen this class before? If so, ignore.
+ */
+ for (j = 0; j < num_override_args; j++) {
+ new_class = (Py_TYPE(obj) != Py_TYPE(with_override[j]));
+ if (!new_class) {
+ break;
+ }
+ }
+ if (new_class) {
+ /*
+ * Now see if the object provides an __array_ufunc__. However, we should
+ * ignore the base ndarray.__ufunc__, so we skip any ndarray as well as
+ * any ndarray subclass instances that did not override __array_ufunc__.
+ */
+ PyObject *method = PyUFuncOverride_GetNonDefaultArrayUfunc(obj);
+ if (method == NULL) {
+ continue;
+ }
+ if (method == Py_None) {
+ PyErr_Format(PyExc_TypeError,
+ "operand '%.200s' does not support ufuncs "
+ "(__array_ufunc__=None)",
+ obj->ob_type->tp_name);
+ Py_DECREF(method);
+ goto fail;
+ }
+ Py_INCREF(obj);
+ with_override[num_override_args] = obj;
+ methods[num_override_args] = method;
+ ++num_override_args;
+ }
+ }
+ Py_DECREF(out_kwd_obj);
+ return num_override_args;
+
+fail:
+ for (i = 0; i < num_override_args; i++) {
+ Py_DECREF(with_override[i]);
+ Py_DECREF(methods[i]);
+ }
+ Py_DECREF(out_kwd_obj);
+ return -1;
+}
/*
* The following functions normalize ufunc arguments. The work done is similar
@@ -359,7 +449,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
/*
* Check inputs for overrides
*/
- num_override_args = PyUFunc_WithOverride(
+ num_override_args = get_array_ufunc_overrides(
args, kwds, with_override, array_ufunc_methods);
if (num_override_args == -1) {
goto fail;
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 8136d7b3f..6d04ce372 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -7,15 +7,13 @@
* See LICENSE.txt for the license.
*/
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define PY_SSIZE_T_CLEAN
#include <Python.h>
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
-#define NO_IMPORT_ARRAY
-
#include <numpy/arrayobject.h>
#include "npy_config.h"
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index 3e29c4b4e..a7987acda 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -7,13 +7,11 @@
*/
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include "Python.h"
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
-#define NO_IMPORT_ARRAY
-
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "numpy/arrayscalars.h"
@@ -1566,7 +1564,6 @@ static PyObject*
}
/**end repeat**/
-
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
@@ -1577,8 +1574,7 @@ static PyNumberMethods @name@_as_number = {
(binaryfunc)@name@_add, /*nb_add*/
(binaryfunc)@name@_subtract, /*nb_subtract*/
(binaryfunc)@name@_multiply, /*nb_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(binaryfunc)@name@_divide, /*nb_divide*/
#endif
(binaryfunc)@name@_remainder, /*nb_remainder*/
@@ -1598,8 +1594,7 @@ static PyNumberMethods @name@_as_number = {
(binaryfunc)@name@_and, /*nb_and*/
(binaryfunc)@name@_xor, /*nb_xor*/
(binaryfunc)@name@_or, /*nb_or*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*nb_coerce*/
#endif
(unaryfunc)@name@_int, /*nb_int*/
@@ -1609,16 +1604,14 @@ static PyNumberMethods @name@_as_number = {
(unaryfunc)@name@_long, /*nb_long*/
#endif
(unaryfunc)@name@_float, /*nb_float*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(unaryfunc)@name@_oct, /*nb_oct*/
(unaryfunc)@name@_hex, /*nb_hex*/
#endif
0, /*inplace_add*/
0, /*inplace_subtract*/
0, /*inplace_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*inplace_divide*/
#endif
0, /*inplace_remainder*/
@@ -1633,6 +1626,10 @@ static PyNumberMethods @name@_as_number = {
0, /*nb_inplace_floor_divide*/
0, /*nb_inplace_true_divide*/
(unaryfunc)NULL, /*nb_index*/
+#if PY_VERSION_HEX >= 0x03050000
+ 0, /*nb_matrix_multiply*/
+ 0, /*nb_inplace_matrix_multiply*/
+#endif
};
/**end repeat**/
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 5c0568c12..4bb8569be 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -17,8 +17,6 @@
#include "lowlevel_strided_loops.h"
#include "numpy/npy_common.h"
-/* for NO_FLOATING_POINT_SUPPORT */
-#include "numpy/ufuncobject.h"
#include "numpy/npy_math.h"
#ifdef NPY_HAVE_SSE2_INTRINSICS
#include <emmintrin.h>
@@ -34,6 +32,8 @@
#include <float.h>
#include <string.h> /* for memcpy */
+#define VECTOR_SIZE_BYTES 16
+
static NPY_INLINE npy_uintp
abs_ptrdiff(char *a, char *b)
{
@@ -132,7 +132,6 @@ abs_ptrdiff(char *a, char *b)
* #func = sqrt, absolute, negative, minimum, maximum#
* #check = IS_BLOCKABLE_UNARY*3, IS_BLOCKABLE_REDUCE*2 #
* #name = unary*3, unary_reduce*2#
- * #minmax = 0*3, 1*2#
*/
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
@@ -146,17 +145,13 @@ sse2_@func@_@TYPE@(@type@ *, @type@ *, const npy_intp n);
static NPY_INLINE int
run_@name@_simd_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
{
-#if @minmax@ && (defined NO_FLOATING_POINT_SUPPORT)
- return 0;
-#else
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
- if (@check@(sizeof(@type@), 16)) {
+ if (@check@(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0]);
return 1;
}
#endif
return 0;
-#endif
}
/**end repeat1**/
@@ -189,17 +184,24 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps
@type@ * ip2 = (@type@ *)args[1];
@type@ * op = (@type@ *)args[2];
npy_intp n = dimensions[0];
+#if defined __AVX512F__
+ const npy_intp vector_size_bytes = 64;
+#elif defined __AVX2__
+ const npy_intp vector_size_bytes = 32;
+#else
+ const npy_intp vector_size_bytes = 32;
+#endif
/* argument one scalar */
- if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), 16)) {
+ if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), vector_size_bytes)) {
sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
/* argument two scalar */
- else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), vector_size_bytes)) {
sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
- else if (IS_BLOCKABLE_BINARY(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY(sizeof(@type@), vector_size_bytes)) {
sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
@@ -239,16 +241,16 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps
npy_bool * op = (npy_bool *)args[2];
npy_intp n = dimensions[0];
/* argument one scalar */
- if (IS_BLOCKABLE_BINARY_SCALAR1_BOOL(sizeof(@type@), 16)) {
+ if (IS_BLOCKABLE_BINARY_SCALAR1_BOOL(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
/* argument two scalar */
- else if (IS_BLOCKABLE_BINARY_SCALAR2_BOOL(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY_SCALAR2_BOOL(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
- else if (IS_BLOCKABLE_BINARY_BOOL(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY_BOOL(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
@@ -309,7 +311,8 @@ static NPY_INLINE int
run_binary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
- if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_BINARY(sizeof(npy_bool), 16)) {
+ if (sizeof(npy_bool) == 1 &&
+ IS_BLOCKABLE_BINARY(sizeof(npy_bool), VECTOR_SIZE_BYTES)) {
sse2_binary_@kind@_BOOL((npy_bool*)args[2], (npy_bool*)args[0],
(npy_bool*)args[1], dimensions[0]);
return 1;
@@ -323,7 +326,8 @@ static NPY_INLINE int
run_reduce_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
- if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_REDUCE(sizeof(npy_bool), 16)) {
+ if (sizeof(npy_bool) == 1 &&
+ IS_BLOCKABLE_REDUCE(sizeof(npy_bool), VECTOR_SIZE_BYTES)) {
sse2_reduce_@kind@_BOOL((npy_bool*)args[0], (npy_bool*)args[1],
dimensions[0]);
return 1;
@@ -347,7 +351,8 @@ static NPY_INLINE int
run_unary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
- if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_UNARY(sizeof(npy_bool), 16)) {
+ if (sizeof(npy_bool) == 1 &&
+ IS_BLOCKABLE_UNARY(sizeof(npy_bool), VECTOR_SIZE_BYTES)) {
sse2_@kind@_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]);
return 1;
}
@@ -423,19 +428,20 @@ static void
sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
#ifdef __AVX512F__
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 64)
+ const npy_intp vector_size_bytes = 64;
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[i];
/* lots of specializations, to squeeze out max performance */
- if (npy_is_aligned(&ip1[i], 64) && npy_is_aligned(&ip2[i], 64)) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes) && npy_is_aligned(&ip2[i], vector_size_bytes)) {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@@ -443,16 +449,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
}
- else if (npy_is_aligned(&ip1[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ else if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
- else if (npy_is_aligned(&ip2[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ else if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@@ -461,14 +467,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
else {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@@ -477,19 +483,21 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
#elif __AVX2__
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 32)
+ const npy_intp vector_size_bytes = 32;
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[i];
/* lots of specializations, to squeeze out max performance */
- if (npy_is_aligned(&ip1[i], 32) && npy_is_aligned(&ip2[i], 32)) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes) &&
+ npy_is_aligned(&ip2[i], vector_size_bytes)) {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@@ -497,16 +505,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
}
- else if (npy_is_aligned(&ip1[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ else if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
- else if (npy_is_aligned(&ip2[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ else if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@@ -515,14 +523,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
else {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@@ -531,19 +539,20 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
#else
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[i] @OP@ ip2[i];
/* lots of specializations, to squeeze out max performance */
- if (npy_is_aligned(&ip1[i], 16) && npy_is_aligned(&ip2[i], 16)) {
+ if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES) &&
+ npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, a);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@@ -551,16 +560,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
}
- else if (npy_is_aligned(&ip1[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ else if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
}
}
- else if (npy_is_aligned(&ip2[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ else if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@@ -569,14 +578,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
else {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, a);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@@ -595,18 +604,19 @@ static void
sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
#ifdef __AVX512F__
+ const npy_intp vector_size_bytes = 64;
const @vtype512@ a = @vpre512@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 64)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[0] @OP@ ip2[i];
- if (npy_is_aligned(&ip2[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
@@ -615,18 +625,19 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
#elif __AVX2__
+ const npy_intp vector_size_bytes = 32;
const @vtype256@ a = @vpre256@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 32)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[0] @OP@ ip2[i];
- if (npy_is_aligned(&ip2[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
@@ -634,17 +645,17 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
}
#else
const @vtype@ a = @vpre@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[0] @OP@ ip2[i];
- if (npy_is_aligned(&ip2[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
@@ -661,18 +672,19 @@ static void
sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
#ifdef __AVX512F__
+ const npy_intp vector_size_bytes = 64;
const @vtype512@ b = @vpre512@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 64)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[0];
- if (npy_is_aligned(&ip1[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
@@ -680,18 +692,19 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
}
#elif __AVX2__
+ const npy_intp vector_size_bytes = 32;
const @vtype256@ b = @vpre256@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 32)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[0];
- if (npy_is_aligned(&ip1[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
@@ -699,17 +712,17 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
}
#else
const @vtype@ b = @vpre@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[i] @OP@ ip2[0];
- if (npy_is_aligned(&ip1[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
@@ -749,10 +762,10 @@ sse2_compress4_to_byte_@TYPE@(@vtype@ r1, @vtype@ r2, @vtype@ r3, @vtype@ * r4,
static void
sse2_signbit_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = npy_signbit(ip1[i]) != 0;
}
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
int r = @vpre@_movemask_@vsuf@(a);
if (sizeof(@type@) == 8) {
@@ -790,14 +803,14 @@ sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
const @vtype@ fltmax = @vpre@_set1_@vsuf@(FLT_MAX);
#endif
#endif
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = npy_@kind@(ip1[i]) != 0;
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1, r2, r3, r4;
#if @var@ != 0 /* isinf/isfinite */
/* fabs via masking of sign bit */
@@ -860,18 +873,18 @@ sse2_ordered_cmp_@kind@_@TYPE@(const @type@ a, const @type@ b)
static void
sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2);
@vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2);
@vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2);
@@ -888,14 +901,14 @@ static void
sse2_binary_scalar1_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
@vtype@ s = @vpre@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(ip2, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip2, @type@, VECTOR_SIZE_BYTES) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[0], ip2[i]);
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip2[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip2[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip2[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip2[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1 = @vpre@_@VOP@_@vsuf@(s, a);
@vtype@ r2 = @vpre@_@VOP@_@vsuf@(s, b);
@vtype@ r3 = @vpre@_@VOP@_@vsuf@(s, c);
@@ -912,14 +925,14 @@ static void
sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
@vtype@ s = @vpre@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[0]);
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1 = @vpre@_@VOP@_@vsuf@(a, s);
@vtype@ r2 = @vpre@_@VOP@_@vsuf@(b, s);
@vtype@ r3 = @vpre@_@VOP@_@vsuf@(c, s);
@@ -935,19 +948,20 @@ sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy
static void
sse2_sqrt_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
{
- /* align output to 16 bytes */
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) {
+ /* align output to VECTOR_SIZE_BYTES bytes */
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalarf@(ip[i]);
}
- assert(n < (16 / sizeof(@type@)) || npy_is_aligned(&op[i], 16));
- if (npy_is_aligned(&ip[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
+ if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ d = @vpre@_load_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_sqrt_@vsuf@(d));
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ d = @vpre@_loadu_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_sqrt_@vsuf@(d));
}
@@ -986,19 +1000,20 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
*/
const @vtype@ mask = @vpre@_set1_@vsuf@(-0.@c@);
- /* align output to 16 bytes */
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) {
+ /* align output to VECTOR_SIZE_BYTES bytes */
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalar@_@type@(ip[i]);
}
- assert(n < (16 / sizeof(@type@)) || npy_is_aligned(&op[i], 16));
- if (npy_is_aligned(&ip[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ assert(n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
+ if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_@VOP@_@vsuf@(mask, a));
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_@VOP@_@vsuf@(mask, a));
}
@@ -1019,11 +1034,12 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
static void
sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
{
- const npy_intp stride = 16 / (npy_intp)sizeof(@type@);
- LOOP_BLOCK_ALIGN_VAR(ip, @type@, 16) {
+ const npy_intp stride = VECTOR_SIZE_BYTES / (npy_intp)sizeof(@type@);
+ LOOP_BLOCK_ALIGN_VAR(ip, @type@, VECTOR_SIZE_BYTES) {
+ /* Order of operations important for MSVC 2015 */
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
- assert(n < (stride) || npy_is_aligned(&ip[i], 16));
+ assert(n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
if (i + 3 * stride <= n) {
/* load the first elements */
@vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@@ -1032,7 +1048,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
/* minps/minpd will set invalid flag if nan is encountered */
npy_clear_floatstatus_barrier((char*)&c1);
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, 2 * VECTOR_SIZE_BYTES) {
@vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
c1 = @vpre@_@VOP@_@vsuf@(c1, v1);
@@ -1045,15 +1061,15 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
}
else {
@type@ tmp = sse2_horizontal_@VOP@_@vtype@(c1);
+ /* Order of operations important for MSVC 2015 */
*op = (*op @OP@ tmp || npy_isnan(*op)) ? *op : tmp;
}
}
LOOP_BLOCKED_END {
+ /* Order of operations important for MSVC 2015 */
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
- if (npy_isnan(*op)) {
- npy_set_floatstatus_invalid();
- }
+ npy_clear_floatstatus_barrier((char*)op);
}
/**end repeat1**/
@@ -1099,9 +1115,9 @@ static NPY_INLINE @vtype@ byte_to_true(@vtype@ v)
static void
sse2_binary_@kind@_BOOL(npy_bool * op, npy_bool * ip1, npy_bool * ip2, npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[i] @op@ ip2[i];
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vloadu@((@vtype@*)&ip1[i]);
@vtype@ b = @vloadu@((@vtype@*)&ip2[i]);
#if @and@
@@ -1126,16 +1142,16 @@ static void
sse2_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, const npy_intp n)
{
const @vtype@ zero = @vpre@_setzero_@vsuf@();
- LOOP_BLOCK_ALIGN_VAR(ip, npy_bool, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip, npy_bool, VECTOR_SIZE_BYTES) {
*op = *op @op@ ip[i];
if (*op @sc@ 0) {
return;
}
}
/* unrolled once to replace a slow movmsk with a fast pmaxb */
- LOOP_BLOCKED(npy_bool, 32) {
+ LOOP_BLOCKED(npy_bool, 2 * VECTOR_SIZE_BYTES) {
@vtype@ v = @vload@((@vtype@*)&ip[i]);
- @vtype@ v2 = @vload@((@vtype@*)&ip[i + 16]);
+ @vtype@ v2 = @vload@((@vtype@*)&ip[i + VECTOR_SIZE_BYTES]);
v = @vpre@_cmpeq_epi8(v, zero);
v2 = @vpre@_cmpeq_epi8(v2, zero);
#if @and@
@@ -1173,9 +1189,9 @@ sse2_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, const npy_intp n)
static void
sse2_@kind@_BOOL(@type@ * op, @type@ * ip, const npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = (ip[i] @op@ 0);
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vloadu@((@vtype@*)&ip[i]);
#if @not@
const @vtype@ zero = @vpre@_setzero_@vsuf@();
@@ -1196,6 +1212,8 @@ sse2_@kind@_BOOL(@type@ * op, @type@ * ip, const npy_intp n)
/**end repeat**/
+#undef VECTOR_SIZE_BYTES
+
#endif /* NPY_HAVE_SSE2_INTRINSICS */
#endif
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 59fc5aa20..a2df58698 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -24,15 +24,13 @@
*
*/
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include "Python.h"
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
-#define NO_IMPORT_ARRAY
-
#include "npy_pycompat.h"
#include "numpy/arrayobject.h"
@@ -48,6 +46,7 @@
#include "npy_import.h"
#include "extobj.h"
#include "common.h"
+#include "numpyos.h"
/********** PRINTF DEBUG TRACING **************/
#define NPY_UF_DBG_TRACING 0
@@ -71,6 +70,13 @@ typedef struct {
provided, then this is NULL. */
} ufunc_full_args;
+/* C representation of the context argument to __array_wrap__ */
+typedef struct {
+ PyUFuncObject *ufunc;
+ ufunc_full_args args;
+ int out_i;
+} _ufunc_context;
+
/* Get the arg tuple to pass in the context argument to __array_wrap__ and
* __array_prepare__.
*
@@ -302,6 +308,213 @@ _find_array_prepare(ufunc_full_args args,
return;
}
+#define NPY_UFUNC_DEFAULT_INPUT_FLAGS \
+ NPY_ITER_READONLY | \
+ NPY_ITER_ALIGNED | \
+ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
+
+#define NPY_UFUNC_DEFAULT_OUTPUT_FLAGS \
+ NPY_ITER_ALIGNED | \
+ NPY_ITER_ALLOCATE | \
+ NPY_ITER_NO_BROADCAST | \
+ NPY_ITER_NO_SUBTYPE | \
+ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
+
+/* Called at module initialization to set the matmul ufunc output flags */
+NPY_NO_EXPORT int
+set_matmul_flags(PyObject *d)
+{
+ PyObject *matmul = PyDict_GetItemString(d, "matmul");
+ if (matmul == NULL) {
+ return -1;
+ }
+ /*
+ * The default output flag NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE allows
+ * perfectly overlapping input and output (in-place operations). While
+ * correct for the common mathematical operations, this assumption is
+ * incorrect in the general case and specifically in the case of matmul.
+ *
+ * NPY_ITER_UPDATEIFCOPY is added by default in
+ * PyUFunc_GeneralizedFunction, which is the variant called for gufuncs
+ * with a signature
+ *
+ * Enabling NPY_ITER_WRITEONLY can prevent a copy in some cases.
+ */
+ ((PyUFuncObject *)matmul)->op_flags[2] = (NPY_ITER_WRITEONLY |
+ NPY_ITER_UPDATEIFCOPY |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) &
+ ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
+ return 0;
+}
+
+
+/*
+ * Set per-operand flags according to desired input or output flags.
+ * op_flags[i] for i in input (as determined by ufunc->nin) will be
+ * merged with op_in_flags, perhaps overriding per-operand flags set
+ * in previous stages.
+ * op_flags[i] for i in output will be set to op_out_flags only if previously
+ * unset.
+ * The input flag behavior preserves backward compatibility, while the
+ * output flag behaviour is the "correct" one for maximum flexibility.
+ */
+NPY_NO_EXPORT void
+_ufunc_setup_flags(PyUFuncObject *ufunc, npy_uint32 op_in_flags,
+ npy_uint32 op_out_flags, npy_uint32 *op_flags)
+{
+ int nin = ufunc->nin;
+ int nout = ufunc->nout;
+ int nop = nin + nout, i;
+ /* Set up the flags */
+ for (i = 0; i < nin; ++i) {
+ op_flags[i] = ufunc->op_flags[i] | op_in_flags;
+ /*
+ * If READWRITE flag has been set for this operand,
+ * then clear default READONLY flag
+ */
+ if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
+ op_flags[i] &= ~NPY_ITER_READONLY;
+ }
+ }
+ for (i = nin; i < nop; ++i) {
+ op_flags[i] = ufunc->op_flags[i] ? ufunc->op_flags[i] : op_out_flags;
+ }
+}
+
+/*
+ * This function analyzes the input arguments
+ * and determines an appropriate __array_wrap__ function to call
+ * for the outputs.
+ *
+ * If an output argument is provided, then it is wrapped
+ * with its own __array_wrap__ not with the one determined by
+ * the input arguments.
+ *
+ * if the provided output argument is already an array,
+ * the wrapping function is None (which means no wrapping will
+ * be done --- not even PyArray_Return).
+ *
+ * A NULL is placed in output_wrap for outputs that
+ * should just have PyArray_Return called.
+ */
+static void
+_find_array_wrap(ufunc_full_args args, PyObject *kwds,
+ PyObject **output_wrap, int nin, int nout)
+{
+ int i;
+ PyObject *obj;
+ PyObject *wrap = NULL;
+
+ /*
+ * If a 'subok' parameter is passed and isn't True, don't wrap but put None
+ * into slots with out arguments which means return the out argument
+ */
+ if (kwds != NULL && (obj = PyDict_GetItem(kwds,
+ npy_um_str_subok)) != NULL) {
+ if (obj != Py_True) {
+ /* skip search for wrap members */
+ goto handle_out;
+ }
+ }
+
+ /*
+ * Determine the wrapping function given by the input arrays
+ * (could be NULL).
+ */
+ wrap = _find_array_method(args.in, npy_um_str_array_wrap);
+
+ /*
+ * For all the output arrays decide what to do.
+ *
+ * 1) Use the wrap function determined from the input arrays
+ * This is the default if the output array is not
+ * passed in.
+ *
+ * 2) Use the __array_wrap__ method of the output object
+ * passed in. -- this is special cased for
+ * exact ndarray so that no PyArray_Return is
+ * done in that case.
+ */
+handle_out:
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
+ Py_XINCREF(wrap);
+ output_wrap[i] = wrap;
+ }
+ }
+ else {
+ for (i = 0; i < nout; i++) {
+ output_wrap[i] = _get_output_array_method(
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_wrap, wrap);
+ }
+ }
+
+ Py_XDECREF(wrap);
+ return;
+}
+
+
+/*
+ * Apply the __array_wrap__ function with the given array and content.
+ *
+ * Interprets wrap=None and wrap=NULL as intended by _find_array_wrap
+ *
+ * Steals a reference to obj and wrap.
+ * Pass context=NULL to indicate there is no context.
+ */
+static PyObject *
+_apply_array_wrap(
+ PyObject *wrap, PyArrayObject *obj, _ufunc_context const *context) {
+ if (wrap == NULL) {
+ /* default behavior */
+ return PyArray_Return(obj);
+ }
+ else if (wrap == Py_None) {
+ Py_DECREF(wrap);
+ return (PyObject *)obj;
+ }
+ else {
+ PyObject *res;
+ PyObject *py_context = NULL;
+
+ /* Convert the context object to a tuple, if present */
+ if (context == NULL) {
+ py_context = Py_None;
+ Py_INCREF(py_context);
+ }
+ else {
+ PyObject *args_tup;
+ /* Call the method with appropriate context */
+ args_tup = _get_wrap_prepare_args(context->args);
+ if (args_tup == NULL) {
+ goto fail;
+ }
+ py_context = Py_BuildValue("OOi",
+ context->ufunc, args_tup, context->out_i);
+ Py_DECREF(args_tup);
+ if (py_context == NULL) {
+ goto fail;
+ }
+ }
+ /* try __array_wrap__(obj, context) */
+ res = PyObject_CallFunctionObjArgs(wrap, obj, py_context, NULL);
+ Py_DECREF(py_context);
+
+ /* try __array_wrap__(obj) if the context argument is not accepted */
+ if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ res = PyObject_CallFunctionObjArgs(wrap, obj, NULL);
+ }
+ Py_DECREF(wrap);
+ Py_DECREF(obj);
+ return res;
+ fail:
+ Py_DECREF(wrap);
+ Py_DECREF(obj);
+ return NULL;
+ }
+}
+
/*UFUNC_API
*
@@ -340,7 +553,27 @@ _is_alnum_underscore(char ch)
}
/*
- * Return the ending position of a variable name
+ * Convert a string into a number
+ */
+static npy_intp
+_get_size(const char* str)
+{
+ char *stop;
+ npy_longlong size = NumPyOS_strtoll(str, &stop, 10);
+
+ if (stop == str || _is_alpha_underscore(*stop)) {
+ /* not a well formed number */
+ return -1;
+ }
+ if (size >= NPY_MAX_INTP || size <= NPY_MIN_INTP) {
+ /* len(str) too long */
+ return -1;
+ }
+ return size;
+ }
+
+/*
+ * Return the ending position of a variable name including optional modifier
*/
static int
_get_end_of_name(const char* str, int offset)
@@ -349,6 +582,9 @@ _get_end_of_name(const char* str, int offset)
while (_is_alnum_underscore(str[ret])) {
ret++;
}
+ if (str[ret] == '?') {
+ ret ++;
+ }
return ret;
}
@@ -390,7 +626,6 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
"_parse_signature with NULL signature");
return -1;
}
-
len = strlen(signature);
ufunc->core_signature = PyArray_malloc(sizeof(char) * (len+1));
if (ufunc->core_signature) {
@@ -406,13 +641,22 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
ufunc->core_enabled = 1;
ufunc->core_num_dim_ix = 0;
ufunc->core_num_dims = PyArray_malloc(sizeof(int) * ufunc->nargs);
- ufunc->core_dim_ixs = PyArray_malloc(sizeof(int) * len); /* shrink this later */
ufunc->core_offsets = PyArray_malloc(sizeof(int) * ufunc->nargs);
- if (ufunc->core_num_dims == NULL || ufunc->core_dim_ixs == NULL
- || ufunc->core_offsets == NULL) {
+ /* The next three items will be shrunk later */
+ ufunc->core_dim_ixs = PyArray_malloc(sizeof(int) * len);
+ ufunc->core_dim_sizes = PyArray_malloc(sizeof(npy_intp) * len);
+ ufunc->core_dim_flags = PyArray_malloc(sizeof(npy_uint32) * len);
+
+ if (ufunc->core_num_dims == NULL || ufunc->core_dim_ixs == NULL ||
+ ufunc->core_offsets == NULL ||
+ ufunc->core_dim_sizes == NULL ||
+ ufunc->core_dim_flags == NULL) {
PyErr_NoMemory();
goto fail;
}
+ for (i = 0; i < len; i++) {
+ ufunc->core_dim_flags[i] = 0;
+ }
i = _next_non_white_space(signature, 0);
while (signature[i] != '\0') {
@@ -437,26 +681,70 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
i = _next_non_white_space(signature, i + 1);
while (signature[i] != ')') {
/* loop over core dimensions */
- int j = 0;
- if (!_is_alpha_underscore(signature[i])) {
- parse_error = "expect dimension name";
+ int ix, i_end;
+ npy_intp frozen_size;
+ npy_bool can_ignore;
+
+ if (signature[i] == '\0') {
+ parse_error = "unexpected end of signature string";
goto fail;
}
- while (j < ufunc->core_num_dim_ix) {
- if (_is_same_name(signature+i, var_names[j])) {
+ /*
+ * Is this a variable or a fixed size dimension?
+ */
+ if (_is_alpha_underscore(signature[i])) {
+ frozen_size = -1;
+ }
+ else {
+ frozen_size = (npy_intp)_get_size(signature + i);
+ if (frozen_size <= 0) {
+ parse_error = "expect dimension name or non-zero frozen size";
+ goto fail;
+ }
+ }
+ /* Is this dimension flexible? */
+ i_end = _get_end_of_name(signature, i);
+ can_ignore = (i_end > 0 && signature[i_end - 1] == '?');
+ /*
+ * Determine whether we already saw this dimension name,
+ * get its index, and set its properties
+ */
+ for(ix = 0; ix < ufunc->core_num_dim_ix; ix++) {
+ if (frozen_size > 0 ?
+ frozen_size == ufunc->core_dim_sizes[ix] :
+ _is_same_name(signature + i, var_names[ix])) {
break;
}
- j++;
}
- if (j >= ufunc->core_num_dim_ix) {
- var_names[j] = signature+i;
+ /*
+ * If a new dimension, store its properties; if old, check consistency.
+ */
+ if (ix == ufunc->core_num_dim_ix) {
ufunc->core_num_dim_ix++;
+ var_names[ix] = signature + i;
+ ufunc->core_dim_sizes[ix] = frozen_size;
+ if (frozen_size < 0) {
+ ufunc->core_dim_flags[ix] |= UFUNC_CORE_DIM_SIZE_INFERRED;
+ }
+ if (can_ignore) {
+ ufunc->core_dim_flags[ix] |= UFUNC_CORE_DIM_CAN_IGNORE;
+ }
+ } else {
+ if (can_ignore && !(ufunc->core_dim_flags[ix] &
+ UFUNC_CORE_DIM_CAN_IGNORE)) {
+ parse_error = "? cannot be used, name already seen without ?";
+ goto fail;
+ }
+ if (!can_ignore && (ufunc->core_dim_flags[ix] &
+ UFUNC_CORE_DIM_CAN_IGNORE)) {
+ parse_error = "? must be used, name already seen with ?";
+ goto fail;
+ }
}
- ufunc->core_dim_ixs[cur_core_dim] = j;
+ ufunc->core_dim_ixs[cur_core_dim] = ix;
cur_core_dim++;
nd++;
- i = _get_end_of_name(signature, i);
- i = _next_non_white_space(signature, i);
+ i = _next_non_white_space(signature, i_end);
if (signature[i] != ',' && signature[i] != ')') {
parse_error = "expect ',' or ')'";
goto fail;
@@ -493,7 +781,14 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
goto fail;
}
ufunc->core_dim_ixs = PyArray_realloc(ufunc->core_dim_ixs,
- sizeof(int)*cur_core_dim);
+ sizeof(int) * cur_core_dim);
+ ufunc->core_dim_sizes = PyArray_realloc(
+ ufunc->core_dim_sizes,
+ sizeof(npy_intp) * ufunc->core_num_dim_ix);
+ ufunc->core_dim_flags = PyArray_realloc(
+ ufunc->core_dim_flags,
+ sizeof(npy_uint32) * ufunc->core_num_dim_ix);
+
/* check for trivial core-signature, e.g. "(),()->()" */
if (cur_core_dim == 0) {
ufunc->core_enabled = 0;
@@ -524,7 +819,7 @@ _set_out_array(PyObject *obj, PyArrayObject **store)
/* Translate None to NULL */
return 0;
}
- if PyArray_Check(obj) {
+ if (PyArray_Check(obj)) {
/* If it's an array, store it */
if (PyArray_FailUnlessWriteable((PyArrayObject *)obj,
"output array") < 0) {
@@ -552,6 +847,181 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc) {
}
/*
+ * Helpers for keyword parsing
+ */
+
+/*
+ * Find key in a list of pointers to keyword names.
+ * The list should end with NULL.
+ *
+ * Returns either the index into the list (pointing to the final key with NULL
+ * if no match was found), or -1 on failure.
+ */
+static npy_intp
+locate_key(PyObject **kwnames, PyObject *key)
+{
+ PyObject **kwname = kwnames;
+ while (*kwname != NULL && *kwname != key) {
+ kwname++;
+ }
+ /* Slow fallback, just in case */
+ if (*kwname == NULL) {
+ int cmp = 0;
+ kwname = kwnames;
+ while (*kwname != NULL &&
+ (cmp = PyObject_RichCompareBool(key, *kwname,
+ Py_EQ)) == 0) {
+ kwname++;
+ }
+ if (cmp < 0) {
+ return -1;
+ }
+ }
+ return kwname - kwnames;
+}
+
+/*
+ * Parse keyword arguments, matching against kwnames
+ *
+ * Arguments beyond kwnames (the va_list) should contain converters and outputs
+ * for each keyword name (where an output can be NULL to indicate the particular
+ * keyword should be ignored).
+ *
+ * Returns 0 on success, -1 on failure with an error set.
+ *
+ * Note that the parser does not clean up on failure, i.e., already parsed keyword
+ * values may hold new references, which the caller has to remove.
+ *
+ * TODO: ufunc is only used for the name in error messages; passing on the
+ * name instead might be an option.
+ *
+ * TODO: instead of having this function ignore of keywords for which the
+ * corresponding output is NULL, the calling routine should prepare the
+ * correct list.
+ */
+static int
+parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, ...)
+{
+ va_list va;
+ PyObject *key, *value;
+ Py_ssize_t pos = 0;
+ typedef int converter(PyObject *, void *);
+
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ int i;
+ converter *convert;
+ void *output = NULL;
+ npy_intp index = locate_key(kwnames, key);
+ if (index < 0) {
+ return -1;
+ }
+ if (kwnames[index]) {
+ va_start(va, kwnames);
+ for (i = 0; i <= index; i++) {
+ convert = va_arg(va, converter *);
+ output = va_arg(va, void *);
+ }
+ va_end(va);
+ }
+ if (output) {
+ if (!convert(value, output)) {
+ return -1;
+ }
+ }
+ else {
+#if PY_VERSION_HEX >= 0x03000000
+ PyErr_Format(PyExc_TypeError,
+ "'%S' is an invalid keyword to ufunc '%s'",
+ key, ufunc_get_name_cstr(ufunc));
+#else
+ char *str = PyString_AsString(key);
+ if (str == NULL) {
+ PyErr_Clear();
+ PyErr_SetString(PyExc_TypeError, "invalid keyword argument");
+ }
+ else {
+ PyErr_Format(PyExc_TypeError,
+ "'%s' is an invalid keyword to ufunc '%s'",
+ str, ufunc_get_name_cstr(ufunc));
+ }
+#endif
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Converters for use in parsing of keywords arguments.
+ */
+NPY_NO_EXPORT int
+_subok_converter(PyObject *obj, int *subok)
+{
+ if (PyBool_Check(obj)) {
+ *subok = (obj == Py_True);
+ return NPY_SUCCEED;
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "'subok' must be a boolean");
+ return NPY_FAIL;
+ }
+}
+
+NPY_NO_EXPORT int
+_keepdims_converter(PyObject *obj, int *keepdims)
+{
+ if (PyBool_Check(obj)) {
+ *keepdims = (obj == Py_True);
+ return NPY_SUCCEED;
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "'keepdims' must be a boolean");
+ return NPY_FAIL;
+ }
+}
+
+NPY_NO_EXPORT int
+_wheremask_converter(PyObject *obj, PyArrayObject **wheremask)
+{
+ /*
+ * Optimization: where=True is the same as no where argument.
+ * This lets us document True as the default.
+ */
+ if (obj == Py_True) {
+ return NPY_SUCCEED;
+ }
+ else {
+ PyArray_Descr *dtype = PyArray_DescrFromType(NPY_BOOL);
+ if (dtype == NULL) {
+ return NPY_FAIL;
+ }
+ /* PyArray_FromAny steals reference to dtype, even on failure */
+ *wheremask = (PyArrayObject *)PyArray_FromAny(obj, dtype, 0, 0, 0, NULL);
+ if ((*wheremask) == NULL) {
+ return NPY_FAIL;
+ }
+ return NPY_SUCCEED;
+ }
+}
+
+NPY_NO_EXPORT int
+_new_reference(PyObject *obj, PyObject **out)
+{
+ Py_INCREF(obj);
+ *out = obj;
+ return NPY_SUCCEED;
+}
+
+NPY_NO_EXPORT int
+_borrowed_reference(PyObject *obj, PyObject **out)
+{
+ *out = obj;
+ return NPY_SUCCEED;
+}
+
+/*
* Parses the positional and keyword arguments for a generic ufunc call.
* All returned arguments are new references (with optional ones NULL
* if not present)
@@ -575,15 +1045,9 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
int nout = ufunc->nout;
int nop = ufunc->nargs;
PyObject *obj, *context;
- PyObject *str_key_obj = NULL;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
- int type_num;
-
- int any_flexible = 0, any_object = 0, any_flexible_userloops = 0;
- int has_sig = 0;
-
+ PyArray_Descr *dtype = NULL;
/*
- * Initialize objects so caller knows when outputs and other optional
+ * Initialize output objects so caller knows when outputs and optional
* arguments are set (also means we can safely XDECREF on failure).
*/
for (i = 0; i < nop; i++) {
@@ -638,166 +1102,6 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
if (out_op[i] == NULL) {
goto fail;
}
-
- type_num = PyArray_DESCR(out_op[i])->type_num;
- if (!any_flexible &&
- PyTypeNum_ISFLEXIBLE(type_num)) {
- any_flexible = 1;
- }
- if (!any_object &&
- PyTypeNum_ISOBJECT(type_num)) {
- any_object = 1;
- }
-
- /*
- * If any operand is a flexible dtype, check to see if any
- * struct dtype ufuncs are registered. A ufunc has been registered
- * for a struct dtype if ufunc's arg_dtypes array is not NULL.
- */
- if (PyTypeNum_ISFLEXIBLE(type_num) &&
- !any_flexible_userloops &&
- ufunc->userloops != NULL) {
- PyUFunc_Loop1d *funcdata;
- PyObject *key, *obj;
- key = PyInt_FromLong(type_num);
- if (key == NULL) {
- continue;
- }
- obj = PyDict_GetItem(ufunc->userloops, key);
- Py_DECREF(key);
- if (obj == NULL) {
- continue;
- }
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- while (funcdata != NULL) {
- if (funcdata->arg_dtypes != NULL) {
- any_flexible_userloops = 1;
- break;
- }
- funcdata = funcdata->next;
- }
- }
- }
-
- if (any_flexible && !any_flexible_userloops && !any_object && nin == 2) {
- /* Traditionally, we return -2 here (meaning "NotImplemented") anytime
- * we hit the above condition.
- *
- * This condition basically means "we are doomed", b/c the "flexible"
- * dtypes -- strings and void -- cannot have their own ufunc loops
- * registered (except via the special "flexible userloops" mechanism),
- * and they can't be cast to anything except object (and we only cast
- * to object if any_object is true). So really we should do nothing
- * here and continue and let the proper error be raised. But, we can't
- * quite yet, b/c of backcompat.
- *
- * Most of the time, this NotImplemented either got returned directly
- * to the user (who can't do anything useful with it), or got passed
- * back out of a special function like __mul__. And fortunately, for
- * almost all special functions, the end result of this was a
- * TypeError. Which is also what we get if we just continue without
- * this special case, so this special case is unnecessary.
- *
- * The only thing that actually depended on the NotImplemented is
- * array_richcompare, which did two things with it. First, it needed
- * to see this NotImplemented in order to implement the special-case
- * comparisons for
- *
- * string < <= == != >= > string
- * void == != void
- *
- * Now it checks for those cases first, before trying to call the
- * ufunc, so that's no problem. What it doesn't handle, though, is
- * cases like
- *
- * float < string
- *
- * or
- *
- * float == void
- *
- * For those, it just let the NotImplemented bubble out, and accepted
- * Python's default handling. And unfortunately, for comparisons,
- * Python's default is *not* to raise an error. Instead, it returns
- * something that depends on the operator:
- *
- * == return False
- * != return True
- * < <= >= > Python 2: use "fallback" (= weird and broken) ordering
- * Python 3: raise TypeError (hallelujah)
- *
- * In most cases this is straightforwardly broken, because comparison
- * of two arrays should always return an array, and here we end up
- * returning a scalar. However, there is an exception: if we are
- * comparing two scalars for equality, then it actually is correct to
- * return a scalar bool instead of raising an error. If we just
- * removed this special check entirely, then "np.float64(1) == 'foo'"
- * would raise an error instead of returning False, which is genuinely
- * wrong.
- *
- * The proper end goal here is:
- * 1) == and != should be implemented in a proper vectorized way for
- * all types. The short-term hack for this is just to add a
- * special case to PyUFunc_DefaultLegacyInnerLoopSelector where
- * if it can't find a comparison loop for the given types, and
- * the ufunc is np.equal or np.not_equal, then it returns a loop
- * that just fills the output array with False (resp. True). Then
- * array_richcompare could trust that whenever its special cases
- * don't apply, simply calling the ufunc will do the right thing,
- * even without this special check.
- * 2) < <= >= > should raise an error if no comparison function can
- * be found. array_richcompare already handles all string <>
- * string cases, and void dtypes don't have ordering, so again
- * this would mean that array_richcompare could simply call the
- * ufunc and it would do the right thing (i.e., raise an error),
- * again without needing this special check.
- *
- * So this means that for the transition period, our goal is:
- * == and != on scalars should simply return NotImplemented like
- * they always did, since everything ends up working out correctly
- * in this case only
- * == and != on arrays should issue a FutureWarning and then return
- * NotImplemented
- * < <= >= > on all flexible dtypes on py2 should raise a
- * DeprecationWarning, and then return NotImplemented. On py3 we
- * skip the warning, though, b/c it would just be immediately be
- * followed by an exception anyway.
- *
- * And for all other operations, we let things continue as normal.
- */
- /* strcmp() is a hack but I think we can get away with it for this
- * temporary measure.
- */
- if (!strcmp(ufunc_name, "equal") ||
- !strcmp(ufunc_name, "not_equal")) {
- /* Warn on non-scalar, return NotImplemented regardless */
- if (PyArray_NDIM(out_op[0]) != 0 ||
- PyArray_NDIM(out_op[1]) != 0) {
- if (DEPRECATE_FUTUREWARNING(
- "elementwise comparison failed; returning scalar "
- "instead, but in the future will perform elementwise "
- "comparison") < 0) {
- goto fail;
- }
- }
- Py_DECREF(out_op[0]);
- Py_DECREF(out_op[1]);
- return -2;
- }
- else if (!strcmp(ufunc_name, "less") ||
- !strcmp(ufunc_name, "less_equal") ||
- !strcmp(ufunc_name, "greater") ||
- !strcmp(ufunc_name, "greater_equal")) {
-#if !defined(NPY_PY3K)
- if (DEPRECATE("unorderable dtypes; returning scalar but in "
- "the future this will be an error") < 0) {
- goto fail;
- }
-#endif
- Py_DECREF(out_op[0]);
- Py_DECREF(out_op[1]);
- return -2;
- }
}
/* Get positional output arguments */
@@ -809,253 +1113,149 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
}
/*
- * Get keyword output and other arguments.
- * Raise an error if anything else is present in the
- * keyword dictionary.
+ * If keywords are present, get keyword output and other arguments.
+ * Raise an error if anything else is present in the keyword dictionary.
*/
- if (kwds != NULL) {
- PyObject *key, *value;
- Py_ssize_t pos = 0;
- while (PyDict_Next(kwds, &pos, &key, &value)) {
- Py_ssize_t length = 0;
- char *str = NULL;
- int bad_arg = 1;
-
-#if defined(NPY_PY3K)
- Py_XDECREF(str_key_obj);
- str_key_obj = PyUnicode_AsASCIIString(key);
- if (str_key_obj != NULL) {
- key = str_key_obj;
- }
-#endif
-
- if (PyBytes_AsStringAndSize(key, &str, &length) < 0) {
- PyErr_Clear();
- PyErr_SetString(PyExc_TypeError, "invalid keyword argument");
+ if (kwds) {
+ PyObject *out_kwd = NULL;
+ PyObject *sig = NULL;
+ static PyObject *kwnames[13] = {NULL};
+ if (kwnames[0] == NULL) {
+ kwnames[0] = npy_um_str_out;
+ kwnames[1] = npy_um_str_where;
+ kwnames[2] = npy_um_str_axes;
+ kwnames[3] = npy_um_str_axis;
+ kwnames[4] = npy_um_str_keepdims;
+ kwnames[5] = npy_um_str_casting;
+ kwnames[6] = npy_um_str_order;
+ kwnames[7] = npy_um_str_dtype;
+ kwnames[8] = npy_um_str_subok;
+ kwnames[9] = npy_um_str_signature;
+ kwnames[10] = npy_um_str_sig;
+ kwnames[11] = npy_um_str_extobj;
+ kwnames[12] = NULL; /* sentinel */
+ }
+ /*
+ * Parse using converters to calculate outputs
+ * (NULL outputs are treated as indicating a keyword is not allowed).
+ */
+ if (parse_ufunc_keywords(
+ ufunc, kwds, kwnames,
+ _borrowed_reference, &out_kwd,
+ _wheremask_converter, out_wheremask, /* new reference */
+ _new_reference, out_axes,
+ _new_reference, out_axis,
+ _keepdims_converter, out_keepdims,
+ PyArray_CastingConverter, out_casting,
+ PyArray_OrderConverter, out_order,
+ PyArray_DescrConverter2, &dtype, /* new reference */
+ _subok_converter, out_subok,
+ _new_reference, out_typetup,
+ _borrowed_reference, &sig,
+ _new_reference, out_extobj) < 0) {
+ goto fail;
+ }
+ /*
+ * Check that outputs were not passed as positional as well,
+ * and that they are either None or an array.
+ */
+ if (out_kwd) { /* borrowed reference */
+ /*
+ * Output arrays are generally specified as a tuple of arrays
+ * and None, but may be a single array or None for ufuncs
+ * with a single output.
+ */
+ if (nargs > nin) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot specify 'out' as both a "
+ "positional and keyword argument");
goto fail;
}
-
- switch (str[0]) {
- case 'a':
- /* possible axes argument for generalized ufunc */
- if (out_axes != NULL && strcmp(str, "axes") == 0) {
- if (out_axis != NULL && *out_axis != NULL) {
- PyErr_SetString(PyExc_TypeError,
- "cannot specify both 'axis' and 'axes'");
- goto fail;
- }
- Py_INCREF(value);
- *out_axes = value;
- bad_arg = 0;
- }
- else if (out_axis != NULL && strcmp(str, "axis") == 0) {
- if (out_axes != NULL && *out_axes != NULL) {
- PyErr_SetString(PyExc_TypeError,
- "cannot specify both 'axis' and 'axes'");
- goto fail;
- }
- Py_INCREF(value);
- *out_axis = value;
- bad_arg = 0;
- }
- break;
- case 'c':
- /* Provides a policy for allowed casting */
- if (strcmp(str, "casting") == 0) {
- if (!PyArray_CastingConverter(value, out_casting)) {
- goto fail;
- }
- bad_arg = 0;
- }
- break;
- case 'd':
- /* Another way to specify 'sig' */
- if (strcmp(str, "dtype") == 0) {
- /* Allow this parameter to be None */
- PyArray_Descr *dtype;
- if (!PyArray_DescrConverter2(value, &dtype)) {
- goto fail;
- }
- if (dtype != NULL) {
- if (*out_typetup != NULL) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot specify both 'signature' and 'dtype'");
- goto fail;
- }
- *out_typetup = Py_BuildValue("(N)", dtype);
- }
- bad_arg = 0;
- }
- break;
- case 'e':
- /*
- * Overrides the global parameters buffer size,
- * error mask, and error object
- */
- if (strcmp(str, "extobj") == 0) {
- Py_INCREF(value);
- *out_extobj = value;
- bad_arg = 0;
- }
- break;
- case 'k':
- if (out_keepdims != NULL && strcmp(str, "keepdims") == 0) {
- if (!PyBool_Check(value)) {
- PyErr_SetString(PyExc_TypeError,
- "'keepdims' must be a boolean");
- goto fail;
- }
- *out_keepdims = (value == Py_True);
- bad_arg = 0;
+ if (PyTuple_CheckExact(out_kwd)) {
+ if (PyTuple_GET_SIZE(out_kwd) != nout) {
+ PyErr_SetString(PyExc_ValueError,
+ "The 'out' tuple must have exactly "
+ "one entry per ufunc output");
+ goto fail;
+ }
+ /* 'out' must be a tuple of arrays and Nones */
+ for(i = 0; i < nout; ++i) {
+ PyObject *val = PyTuple_GET_ITEM(out_kwd, i);
+ if (_set_out_array(val, out_op+nin+i) < 0) {
+ goto fail;
}
- break;
- case 'o':
- /*
- * Output arrays may be specified as a keyword argument,
- * either as a single array or None for single output
- * ufuncs, or as a tuple of arrays and Nones.
- */
- if (strcmp(str, "out") == 0) {
- if (nargs > nin) {
- PyErr_SetString(PyExc_ValueError,
- "cannot specify 'out' as both a "
- "positional and keyword argument");
- goto fail;
- }
- if (PyTuple_CheckExact(value)) {
- if (PyTuple_GET_SIZE(value) != nout) {
- PyErr_SetString(PyExc_ValueError,
- "The 'out' tuple must have exactly "
- "one entry per ufunc output");
- goto fail;
- }
- /* 'out' must be a tuple of arrays and Nones */
- for(i = 0; i < nout; ++i) {
- PyObject *val = PyTuple_GET_ITEM(value, i);
- if (_set_out_array(val, out_op+nin+i) < 0) {
- goto fail;
- }
- }
- }
- else if (nout == 1) {
- /* Can be an array if it only has one output */
- if (_set_out_array(value, out_op + nin) < 0) {
- goto fail;
- }
- }
- else {
- /*
- * If the deprecated behavior is ever removed,
- * keep only the else branch of this if-else
- */
- if (PyArray_Check(value) || value == Py_None) {
- if (DEPRECATE("passing a single array to the "
- "'out' keyword argument of a "
- "ufunc with\n"
- "more than one output will "
- "result in an error in the "
- "future") < 0) {
- /* The future error message */
- PyErr_SetString(PyExc_TypeError,
+ }
+ }
+ else if (nout == 1) {
+ /* Can be an array if it only has one output */
+ if (_set_out_array(out_kwd, out_op + nin) < 0) {
+ goto fail;
+ }
+ }
+ else {
+ /*
+ * If the deprecated behavior is ever removed,
+ * keep only the else branch of this if-else
+ */
+ if (PyArray_Check(out_kwd) || out_kwd == Py_None) {
+ if (DEPRECATE("passing a single array to the "
+ "'out' keyword argument of a "
+ "ufunc with\n"
+ "more than one output will "
+ "result in an error in the "
+ "future") < 0) {
+ /* The future error message */
+ PyErr_SetString(PyExc_TypeError,
"'out' must be a tuple of arrays");
- goto fail;
- }
- if (_set_out_array(value, out_op+nin) < 0) {
- goto fail;
- }
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- nout > 1 ? "'out' must be a tuple "
- "of arrays" :
- "'out' must be an array or a "
- "tuple of a single array");
- goto fail;
- }
- }
- bad_arg = 0;
+ goto fail;
}
- /* Allows the default output layout to be overridden */
- else if (strcmp(str, "order") == 0) {
- if (!PyArray_OrderConverter(value, out_order)) {
- goto fail;
- }
- bad_arg = 0;
+ if (_set_out_array(out_kwd, out_op+nin) < 0) {
+ goto fail;
}
- break;
- case 's':
- /* Allows a specific function inner loop to be selected */
- if (strcmp(str, "sig") == 0 ||
- strcmp(str, "signature") == 0) {
- if (has_sig == 1) {
- PyErr_SetString(PyExc_ValueError,
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ nout > 1 ? "'out' must be a tuple "
+ "of arrays" :
+ "'out' must be an array or a "
+ "tuple of a single array");
+ goto fail;
+ }
+ }
+ }
+ /*
+ * Check we did not get both axis and axes, or multiple ways
+ * to define a signature.
+ */
+ if (out_axes != NULL && out_axis != NULL &&
+ *out_axes != NULL && *out_axis != NULL) {
+ PyErr_SetString(PyExc_TypeError,
+ "cannot specify both 'axis' and 'axes'");
+ goto fail;
+ }
+ if (sig) { /* borrowed reference */
+ if (*out_typetup != NULL) {
+ PyErr_SetString(PyExc_ValueError,
"cannot specify both 'sig' and 'signature'");
- goto fail;
- }
- if (*out_typetup != NULL) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot specify both 'signature' and 'dtype'");
- goto fail;
- }
- Py_INCREF(value);
- *out_typetup = value;
- bad_arg = 0;
- has_sig = 1;
- }
- else if (strcmp(str, "subok") == 0) {
- if (!PyBool_Check(value)) {
- PyErr_SetString(PyExc_TypeError,
- "'subok' must be a boolean");
- goto fail;
- }
- *out_subok = (value == Py_True);
- bad_arg = 0;
- }
- break;
- case 'w':
- /*
- * Provides a boolean array 'where=' mask if
- * out_wheremask is supplied.
- */
- if (out_wheremask != NULL && strcmp(str, "where") == 0) {
- PyArray_Descr *dtype;
- dtype = PyArray_DescrFromType(NPY_BOOL);
- if (dtype == NULL) {
- goto fail;
- }
- if (value == Py_True) {
- /*
- * Optimization: where=True is the same as no
- * where argument. This lets us document it as a
- * default argument
- */
- bad_arg = 0;
- break;
- }
- *out_wheremask = (PyArrayObject *)PyArray_FromAny(
- value, dtype,
- 0, 0, 0, NULL);
- if (*out_wheremask == NULL) {
- goto fail;
- }
- bad_arg = 0;
- }
- break;
+ goto fail;
}
-
- if (bad_arg) {
- char *format = "'%s' is an invalid keyword to ufunc '%s'";
- PyErr_Format(PyExc_TypeError, format, str, ufunc_name);
+ Py_INCREF(sig);
+ *out_typetup = sig;
+ }
+ if (dtype) { /* new reference */
+ if (*out_typetup != NULL) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "cannot specify both 'signature' and 'dtype'");
goto fail;
}
+ /* Note: "N" uses the reference */
+ *out_typetup = Py_BuildValue("(N)", dtype);
}
}
- Py_XDECREF(str_key_obj);
-
return 0;
fail:
- Py_XDECREF(str_key_obj);
+ Py_XDECREF(dtype);
Py_XDECREF(*out_typetup);
Py_XDECREF(*out_extobj);
if (out_wheremask != NULL) {
@@ -1266,11 +1466,11 @@ iterator_loop(PyUFuncObject *ufunc,
PyObject **arr_prep,
ufunc_full_args full_args,
PyUFuncGenericFunction innerloop,
- void *innerloopdata)
+ void *innerloopdata,
+ npy_uint32 *op_flags)
{
npy_intp i, nin = ufunc->nin, nout = ufunc->nout;
npy_intp nop = nin + nout;
- npy_uint32 op_flags[NPY_MAXARGS];
NpyIter *iter;
char *baseptrs[NPY_MAXARGS];
@@ -1284,29 +1484,6 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_BEGIN_THREADS_DEF;
- /* Set up the flags */
- for (i = 0; i < nin; ++i) {
- op_flags[i] = NPY_ITER_READONLY |
- NPY_ITER_ALIGNED |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- /*
- * If READWRITE flag has been set for this operand,
- * then clear default READONLY flag
- */
- op_flags[i] |= ufunc->op_flags[i];
- if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
- op_flags[i] &= ~NPY_ITER_READONLY;
- }
- }
- for (i = nin; i < nop; ++i) {
- op_flags[i] = NPY_ITER_WRITEONLY |
- NPY_ITER_ALIGNED |
- NPY_ITER_ALLOCATE |
- NPY_ITER_NO_BROADCAST |
- NPY_ITER_NO_SUBTYPE |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- }
-
iter_flags = ufunc->iter_flags |
NPY_ITER_EXTERNAL_LOOP |
NPY_ITER_REFS_OK |
@@ -1410,15 +1587,15 @@ iterator_loop(PyUFuncObject *ufunc,
}
/*
+ * ufunc - the ufunc to call
* trivial_loop_ok - 1 if no alignment, data conversion, etc required
- * nin - number of inputs
- * nout - number of outputs
- * op - the operands (nin + nout of them)
+ * op - the operands (ufunc->nin + ufunc->nout of them)
+ * dtypes - the dtype of each operand
* order - the loop execution order/output memory order
* buffersize - how big of a buffer to use
* arr_prep - the __array_prepare__ functions for the outputs
- * innerloop - the inner loop function
- * innerloopdata - data to pass to the inner loop
+ * full_args - the original input, output PyObject *
+ * op_flags - per-operand flags, a combination of NPY_ITER_* constants
*/
static int
execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
@@ -1428,7 +1605,8 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- ufunc_full_args full_args)
+ ufunc_full_args full_args,
+ npy_uint32 *op_flags)
{
npy_intp nin = ufunc->nin, nout = ufunc->nout;
PyUFuncGenericFunction innerloop;
@@ -1563,7 +1741,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_UF_DBG_PRINT("iterator loop\n");
if (iterator_loop(ufunc, op, dtypes, order,
buffersize, arr_prep, full_args,
- innerloop, innerloopdata) < 0) {
+ innerloop, innerloopdata, op_flags) < 0) {
return -1;
}
@@ -1589,14 +1767,13 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- ufunc_full_args full_args)
+ ufunc_full_args full_args,
+ npy_uint32 *op_flags)
{
int i, nin = ufunc->nin, nout = ufunc->nout;
int nop = nin + nout;
- npy_uint32 op_flags[NPY_MAXARGS];
NpyIter *iter;
int needs_api;
- npy_intp default_op_in_flags = 0, default_op_out_flags = 0;
NpyIter_IterNextFunc *iternext;
char **dataptr;
@@ -1606,48 +1783,10 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
PyArrayObject **op_it;
npy_uint32 iter_flags;
- if (wheremask != NULL) {
- if (nop + 1 > NPY_MAXARGS) {
- PyErr_SetString(PyExc_ValueError,
- "Too many operands when including where= parameter");
- return -1;
- }
- op[nop] = wheremask;
- dtypes[nop] = NULL;
- default_op_out_flags |= NPY_ITER_WRITEMASKED;
- }
-
- /* Set up the flags */
- for (i = 0; i < nin; ++i) {
- op_flags[i] = default_op_in_flags |
- NPY_ITER_READONLY |
- NPY_ITER_ALIGNED |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- /*
- * If READWRITE flag has been set for this operand,
- * then clear default READONLY flag
- */
- op_flags[i] |= ufunc->op_flags[i];
- if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
- op_flags[i] &= ~NPY_ITER_READONLY;
- }
- }
for (i = nin; i < nop; ++i) {
- /*
- * We don't write to all elements, and the iterator may make
- * UPDATEIFCOPY temporary copies. The output arrays (unless they are
- * allocated by the iterator itself) must be considered READWRITE by the
- * iterator, so that the elements we don't write to are copied to the
- * possible temporary array.
- */
- op_flags[i] = default_op_out_flags |
- (op[i] != NULL ? NPY_ITER_READWRITE : NPY_ITER_WRITEONLY) |
- NPY_ITER_ALIGNED |
- NPY_ITER_ALLOCATE |
- NPY_ITER_NO_BROADCAST |
- NPY_ITER_NO_SUBTYPE |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
+ op_flags[i] |= (op[i] != NULL ? NPY_ITER_READWRITE : NPY_ITER_WRITEONLY);
}
+
if (wheremask != NULL) {
op_flags[nop] = NPY_ITER_READONLY | NPY_ITER_ARRAYMASK;
}
@@ -1890,6 +2029,72 @@ fail:
}
/*
+ * Validate that operands have enough dimensions, accounting for
+ * possible flexible dimensions that may be absent.
+ */
+static int
+_validate_num_dims(PyUFuncObject *ufunc, PyArrayObject **op,
+ npy_uint32 *core_dim_flags,
+ int *op_core_num_dims) {
+ int i, j;
+ int nin = ufunc->nin;
+ int nop = ufunc->nargs;
+
+ for (i = 0; i < nop; i++) {
+ if (op[i] != NULL) {
+ int op_ndim = PyArray_NDIM(op[i]);
+
+ if (op_ndim < op_core_num_dims[i]) {
+ int core_offset = ufunc->core_offsets[i];
+ /* We've too few, but some dimensions might be flexible */
+ for (j = core_offset;
+ j < core_offset + ufunc->core_num_dims[i]; j++) {
+ int core_dim_index = ufunc->core_dim_ixs[j];
+ if ((core_dim_flags[core_dim_index] &
+ UFUNC_CORE_DIM_CAN_IGNORE)) {
+ int i1, j1, k;
+ /*
+ * Found a dimension that can be ignored. Flag that
+ * it is missing, and unflag that it can be ignored,
+ * since we are doing so already.
+ */
+ core_dim_flags[core_dim_index] |= UFUNC_CORE_DIM_MISSING;
+ core_dim_flags[core_dim_index] ^= UFUNC_CORE_DIM_CAN_IGNORE;
+ /*
+ * Reduce the number of core dimensions for all
+ * operands that use this one (including ours),
+ * and check whether we're now OK.
+ */
+ for (i1 = 0, k=0; i1 < nop; i1++) {
+ for (j1 = 0; j1 < ufunc->core_num_dims[i1]; j1++) {
+ if (ufunc->core_dim_ixs[k++] == core_dim_index) {
+ op_core_num_dims[i1]--;
+ }
+ }
+ }
+ if (op_ndim == op_core_num_dims[i]) {
+ break;
+ }
+ }
+ }
+ if (op_ndim < op_core_num_dims[i]) {
+ PyErr_Format(PyExc_ValueError,
+ "%s: %s operand %d does not have enough "
+ "dimensions (has %d, gufunc core with "
+ "signature %s requires %d)",
+ ufunc_get_name_cstr(ufunc),
+ i < nin ? "Input" : "Output",
+ i < nin ? i : i - nin, PyArray_NDIM(op[i]),
+ ufunc->core_signature, op_core_num_dims[i]);
+ return -1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
* Check whether any of the outputs of a gufunc has core dimensions.
*/
static int
@@ -1962,7 +2167,7 @@ _check_keepdims_support(PyUFuncObject *ufunc) {
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
+_parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes,
PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nin = ufunc->nin;
int nop = ufunc->nargs;
@@ -1992,7 +2197,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
PyObject *op_axes_tuple, *axis_item;
int axis, op_axis;
- op_ncore = core_num_dims[iop];
+ op_ncore = op_core_num_dims[iop];
if (op[iop] != NULL) {
op_ndim = PyArray_NDIM(op[iop]);
op_nbroadcast = op_ndim - op_ncore;
@@ -2146,57 +2351,72 @@ _parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis,
*
* Returns 0 on success, and -1 on failure
*
- * The behavior has been changed in NumPy 1.10.0, and the following
+ * The behavior has been changed in NumPy 1.16.0, and the following
* requirements must be fulfilled or an error will be raised:
* * Arguments, both input and output, must have at least as many
* dimensions as the corresponding number of core dimensions. In
- * previous versions, 1's were prepended to the shape as needed.
+ * versions before 1.10, 1's were prepended to the shape as needed.
* * Core dimensions with same labels must have exactly matching sizes.
- * In previous versions, core dimensions of size 1 would broadcast
+ * In versions before 1.10, core dimensions of size 1 would broadcast
* against other core dimensions with the same label.
* * All core dimensions must have their size specified by a passed in
- * input or output argument. In previous versions, core dimensions in
+ * input or output argument. In versions before 1.10, core dimensions in
* an output argument that were not specified in an input argument,
* and whose size could not be inferred from a passed in output
* argument, would have their size set to 1.
+ * * Core dimensions may be fixed, new in NumPy 1.16
*/
static int
_get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
- npy_intp* core_dim_sizes, int **remap_axis) {
+ int *op_core_num_dims, npy_uint32 *core_dim_flags,
+ npy_intp *core_dim_sizes, int **remap_axis) {
int i;
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = nin + nout;
- for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
- core_dim_sizes[i] = -1;
- }
for (i = 0; i < nop; ++i) {
if (op[i] != NULL) {
int idim;
int dim_offset = ufunc->core_offsets[i];
- int num_dims = ufunc->core_num_dims[i];
- int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
+ int core_start_dim = PyArray_NDIM(op[i]) - op_core_num_dims[i];
+ int dim_delta = 0;
+
+ /* checked before this routine gets called */
+ assert(core_start_dim >= 0);
+
/*
* Make sure every core dimension exactly matches all other core
- * dimensions with the same label.
+ * dimensions with the same label. Note that flexible dimensions
+ * may have been removed at this point, if so, they are marked
+ * with UFUNC_CORE_DIM_MISSING.
*/
- for (idim = 0; idim < num_dims; ++idim) {
- int core_dim_index = ufunc->core_dim_ixs[dim_offset+idim];
- npy_intp op_dim_size = PyArray_DIM(
- op[i], REMAP_AXIS(i, core_start_dim+idim));
-
- if (core_dim_sizes[core_dim_index] == -1) {
+ for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
+ int core_index = dim_offset + idim;
+ int core_dim_index = ufunc->core_dim_ixs[core_index];
+ npy_intp core_dim_size = core_dim_sizes[core_dim_index];
+ npy_intp op_dim_size;
+
+ /* can only happen if flexible; dimension missing altogether */
+ if (core_dim_flags[core_dim_index] & UFUNC_CORE_DIM_MISSING) {
+ op_dim_size = 1;
+ dim_delta++; /* for indexing in dimensions */
+ }
+ else {
+ op_dim_size = PyArray_DIM(op[i],
+ REMAP_AXIS(i, core_start_dim + idim - dim_delta));
+ }
+ if (core_dim_sizes[core_dim_index] < 0) {
core_dim_sizes[core_dim_index] = op_dim_size;
}
- else if (op_dim_size != core_dim_sizes[core_dim_index]) {
+ else if (op_dim_size != core_dim_size) {
PyErr_Format(PyExc_ValueError,
"%s: %s operand %d has a mismatch in its "
"core dimension %d, with gufunc "
"signature %s (size %zd is different "
"from %zd)",
ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output",
- i < nin ? i : i - nin, idim,
+ i < nin ? i : i - nin, idim - dim_delta,
ufunc->core_signature, op_dim_size,
core_dim_sizes[core_dim_index]);
return -1;
@@ -2208,39 +2428,29 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
/*
* Make sure no core dimension is unspecified.
*/
- for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
- if (core_dim_sizes[i] == -1) {
- break;
- }
- }
- if (i != ufunc->core_num_dim_ix) {
- /*
- * There is at least one core dimension missing, find in which
- * operand it comes up first (it has to be an output operand).
- */
- const int missing_core_dim = i;
- int out_op;
- for (out_op = nin; out_op < nop; ++out_op) {
- int first_idx = ufunc->core_offsets[out_op];
- int last_idx = first_idx + ufunc->core_num_dims[out_op];
- for (i = first_idx; i < last_idx; ++i) {
- if (ufunc->core_dim_ixs[i] == missing_core_dim) {
- break;
- }
- }
- if (i < last_idx) {
- /* Change index offsets for error message */
- out_op -= nin;
- i -= first_idx;
- break;
+ for (i = nin; i < nop; ++i) {
+ int idim;
+ int dim_offset = ufunc->core_offsets[i];
+
+ for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
+ int core_dim_index = ufunc->core_dim_ixs[dim_offset + idim];
+
+ /* check all cases where the size has not yet been set */
+ if (core_dim_sizes[core_dim_index] < 0) {
+ /*
+ * Oops, this dimension was never specified
+ * (can only happen if output op not given)
+ */
+ PyErr_Format(PyExc_ValueError,
+ "%s: Output operand %d has core dimension %d "
+ "unspecified, with gufunc signature %s",
+ ufunc_get_name_cstr(ufunc), i - nin, idim,
+ ufunc->core_signature);
+ return -1;
}
}
- PyErr_Format(PyExc_ValueError,
- "%s: Output operand %d has core dimension %d "
- "unspecified, with gufunc signature %s",
- ufunc_get_name_cstr(ufunc), out_op, i, ufunc->core_signature);
- return -1;
}
+
return 0;
}
@@ -2272,6 +2482,11 @@ _get_identity(PyUFuncObject *ufunc, npy_bool *reorderable) {
*reorderable = 0;
Py_RETURN_NONE;
+ case PyUFunc_IdentityValue:
+ *reorderable = 1;
+ Py_INCREF(ufunc->identity_value);
+ return ufunc->identity_value;
+
default:
PyErr_Format(PyExc_ValueError,
"ufunc %s has an invalid identity", ufunc_get_name_cstr(ufunc));
@@ -2279,6 +2494,26 @@ _get_identity(PyUFuncObject *ufunc, npy_bool *reorderable) {
}
}
+/*
+ * Copy over parts of the ufunc structure that may need to be
+ * changed during execution. Returns 0 on success; -1 otherwise.
+ */
+static int
+_initialize_variable_parts(PyUFuncObject *ufunc,
+ int op_core_num_dims[],
+ npy_intp core_dim_sizes[],
+ npy_uint32 core_dim_flags[]) {
+ int i;
+
+ for (i = 0; i < ufunc->nargs; i++) {
+ op_core_num_dims[i] = ufunc->core_num_dims[i];
+ }
+ for (i = 0; i < ufunc->core_num_dim_ix; i++) {
+ core_dim_sizes[i] = ufunc->core_dim_sizes[i];
+ core_dim_flags[i] = ufunc->core_dim_flags[i];
+ }
+ return 0;
+}
static int
PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
@@ -2295,10 +2530,10 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Use remapped axes for generalized ufunc */
int broadcast_ndim, iter_ndim;
- int core_num_dims_array[NPY_MAXARGS];
- int *core_num_dims;
+ int op_core_num_dims[NPY_MAXARGS];
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
+ npy_uint32 core_dim_flags[NPY_MAXARGS];
npy_uint32 op_flags[NPY_MAXARGS];
npy_intp iter_shape[NPY_MAXARGS];
@@ -2353,6 +2588,12 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
dtypes[i] = NULL;
arr_prep[i] = NULL;
}
+ /* Initialize possibly variable parts to the values from the ufunc */
+ retval = _initialize_variable_parts(ufunc, op_core_num_dims,
+ core_dim_sizes, core_dim_flags);
+ if (retval < 0) {
+ goto fail;
+ }
NPY_UF_DBG_PRINT("Getting arguments\n");
@@ -2384,41 +2625,28 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
}
/*
- * If keepdims is set and true, signal all dimensions will be the same.
+ * If keepdims is set and true, which means all input dimensions are
+ * the same, signal that all output dimensions will be the same too.
*/
if (keepdims == 1) {
- int num_dims = ufunc->core_num_dims[0];
- for (i = 0; i < nop; ++i) {
- core_num_dims_array[i] = num_dims;
+ int num_dims = op_core_num_dims[0];
+ for (i = nin; i < nop; ++i) {
+ op_core_num_dims[i] = num_dims;
}
- core_num_dims = core_num_dims_array;
}
else {
/* keepdims was not set or was false; no adjustment necessary */
- core_num_dims = ufunc->core_num_dims;
keepdims = 0;
}
/*
* Check that operands have the minimum dimensions required.
* (Just checks core; broadcast dimensions are tested by the iterator.)
*/
- for (i = 0; i < nop; i++) {
- if (op[i] != NULL && PyArray_NDIM(op[i]) < core_num_dims[i]) {
- PyErr_Format(PyExc_ValueError,
- "%s: %s operand %d does not have enough "
- "dimensions (has %d, gufunc core with "
- "signature %s requires %d)",
- ufunc_name,
- i < nin ? "Input" : "Output",
- i < nin ? i : i - nin,
- PyArray_NDIM(op[i]),
- ufunc->core_signature,
- core_num_dims[i]);
- retval = -1;
- goto fail;
- }
+ retval = _validate_num_dims(ufunc, op, core_dim_flags,
+ op_core_num_dims);
+ if (retval < 0) {
+ goto fail;
}
-
/*
* Figure out the number of iteration dimensions, which
* is the broadcast result of all the input non-core
@@ -2426,30 +2654,12 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
broadcast_ndim = 0;
for (i = 0; i < nin; ++i) {
- int n = PyArray_NDIM(op[i]) - core_num_dims[i];
+ int n = PyArray_NDIM(op[i]) - op_core_num_dims[i];
if (n > broadcast_ndim) {
broadcast_ndim = n;
}
}
- /*
- * Figure out the number of iterator creation dimensions,
- * which is the broadcast dimensions + all the core dimensions of
- * the outputs, so that the iterator can allocate those output
- * dimensions following the rules of order='F', for example.
- */
- iter_ndim = broadcast_ndim;
- for (i = nin; i < nop; ++i) {
- iter_ndim += core_num_dims[i];
- }
- if (iter_ndim > NPY_MAXDIMS) {
- PyErr_Format(PyExc_ValueError,
- "too many dimensions for generalized ufunc %s",
- ufunc_name);
- retval = -1;
- goto fail;
- }
-
/* Possibly remap axes. */
if (axes != NULL || axis != NULL) {
remap_axis = PyArray_malloc(sizeof(remap_axis[0]) * nop);
@@ -2463,11 +2673,11 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
remap_axis[i] = remap_axis_memory + i * NPY_MAXDIMS;
}
if (axis) {
- retval = _parse_axis_arg(ufunc, core_num_dims, axis, op,
+ retval = _parse_axis_arg(ufunc, op_core_num_dims, axis, op,
broadcast_ndim, remap_axis);
}
else {
- retval = _parse_axes_arg(ufunc, core_num_dims, axes, op,
+ retval = _parse_axes_arg(ufunc, op_core_num_dims, axes, op,
broadcast_ndim, remap_axis);
}
if(retval < 0) {
@@ -2476,10 +2686,28 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
/* Collect the lengths of the labelled core dimensions */
- retval = _get_coredim_sizes(ufunc, op, core_dim_sizes, remap_axis);
+ retval = _get_coredim_sizes(ufunc, op, op_core_num_dims, core_dim_flags,
+ core_dim_sizes, remap_axis);
if(retval < 0) {
goto fail;
}
+ /*
+ * Figure out the number of iterator creation dimensions,
+ * which is the broadcast dimensions + all the core dimensions of
+ * the outputs, so that the iterator can allocate those output
+ * dimensions following the rules of order='F', for example.
+ */
+ iter_ndim = broadcast_ndim;
+ for (i = nin; i < nop; ++i) {
+ iter_ndim += op_core_num_dims[i];
+ }
+ if (iter_ndim > NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "too many dimensions for generalized ufunc %s",
+ ufunc_name);
+ retval = -1;
+ goto fail;
+ }
/* Fill in the initial part of 'iter_shape' */
for (idim = 0; idim < broadcast_ndim; ++idim) {
@@ -2492,11 +2720,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
int n;
if (op[i]) {
- /*
- * Note that n may be negative if broadcasting
- * extends into the core dimensions.
- */
- n = PyArray_NDIM(op[i]) - core_num_dims[i];
+ n = PyArray_NDIM(op[i]) - op_core_num_dims[i];
}
else {
n = broadcast_ndim;
@@ -2520,24 +2744,49 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Except for when it belongs to this output */
if (i >= nin) {
int dim_offset = ufunc->core_offsets[i];
- int num_dims = core_num_dims[i];
+ int num_removed = 0;
/*
* Fill in 'iter_shape' and 'op_axes' for the core dimensions
* of this output. Here, we have to be careful: if keepdims
- * was used, then this axis is not a real core dimension,
- * but is being added back for broadcasting, so its size is 1.
+ * was used, then the axes are not real core dimensions, but
+ * are being added back for broadcasting, so their size is 1.
+ * If the axis was removed, we should skip altogether.
*/
- for (idim = 0; idim < num_dims; ++idim) {
- iter_shape[j] = keepdims ? 1 : core_dim_sizes[
- ufunc->core_dim_ixs[dim_offset + idim]];
- op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
- ++j;
+ if (keepdims) {
+ for (idim = 0; idim < op_core_num_dims[i]; ++idim) {
+ iter_shape[j] = 1;
+ op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
+ ++j;
+ }
+ }
+ else {
+ for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
+ int core_index = dim_offset + idim;
+ int core_dim_index = ufunc->core_dim_ixs[core_index];
+ if ((core_dim_flags[core_dim_index] &
+ UFUNC_CORE_DIM_MISSING)) {
+ /* skip it */
+ num_removed++;
+ continue;
+ }
+ iter_shape[j] = core_dim_sizes[ufunc->core_dim_ixs[core_index]];
+ op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim - num_removed);
+ ++j;
+ }
}
}
op_axes[i] = op_axes_arrays[i];
}
+#if NPY_UF_DBG_TRACING
+ printf("iter shapes:");
+ for (j=0; j < iter_ndim; j++) {
+ printf(" %ld", iter_shape[j]);
+ }
+ printf("\n");
+#endif
+
/* Get the buffersize and errormask */
if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) {
retval = -1;
@@ -2552,6 +2801,18 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
if (retval < 0) {
goto fail;
}
+ /*
+ * We don't write to all elements, and the iterator may make
+ * UPDATEIFCOPY temporary copies. The output arrays (unless they are
+ * allocated by the iterator itself) must be considered READWRITE by the
+ * iterator, so that the elements we don't write to are copied to the
+ * possible temporary array.
+ */
+ _ufunc_setup_flags(ufunc, NPY_ITER_COPY | NPY_UFUNC_DEFAULT_INPUT_FLAGS,
+ NPY_ITER_UPDATEIFCOPY |
+ NPY_ITER_READWRITE |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS,
+ op_flags);
/* For the generalized ufunc, we get the loop right away too */
retval = ufunc->legacy_inner_loop_selector(ufunc, dtypes,
&innerloop, &innerloopdata, &needs_api);
@@ -2594,28 +2855,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
* Set up the iterator per-op flags. For generalized ufuncs, we
* can't do buffering, so must COPY or UPDATEIFCOPY.
*/
- for (i = 0; i < nin; ++i) {
- op_flags[i] = NPY_ITER_READONLY |
- NPY_ITER_COPY |
- NPY_ITER_ALIGNED |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- /*
- * If READWRITE flag has been set for this operand,
- * then clear default READONLY flag
- */
- op_flags[i] |= ufunc->op_flags[i];
- if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
- op_flags[i] &= ~NPY_ITER_READONLY;
- }
- }
- for (i = nin; i < nop; ++i) {
- op_flags[i] = NPY_ITER_READWRITE|
- NPY_ITER_UPDATEIFCOPY|
- NPY_ITER_ALIGNED|
- NPY_ITER_ALLOCATE|
- NPY_ITER_NO_BROADCAST|
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- }
iter_flags = ufunc->iter_flags |
NPY_ITER_MULTI_INDEX |
@@ -2635,13 +2874,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
/* Fill in any allocated outputs */
- for (i = nin; i < nop; ++i) {
- if (op[i] == NULL) {
- op[i] = NpyIter_GetOperandArray(iter)[i];
- Py_INCREF(op[i]);
+ {
+ PyArrayObject **operands = NpyIter_GetOperandArray(iter);
+ for (i = 0; i < nop; ++i) {
+ if (op[i] == NULL) {
+ op[i] = operands[i];
+ Py_INCREF(op[i]);
+ }
}
}
-
/*
* Set up the inner strides array. Because we're not doing
* buffering, the strides are fixed throughout the looping.
@@ -2660,8 +2901,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Copy the strides after the first nop */
idim = nop;
for (i = 0; i < nop; ++i) {
- int num_dims = ufunc->core_num_dims[i];
- int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
/*
* Need to use the arrays in the iterator, not op, because
* a copy with a different-sized type may have been made.
@@ -2669,20 +2908,31 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
PyArrayObject *arr = NpyIter_GetOperandArray(iter)[i];
npy_intp *shape = PyArray_SHAPE(arr);
npy_intp *strides = PyArray_STRIDES(arr);
- for (j = 0; j < num_dims; ++j) {
- if (core_start_dim + j >= 0) {
- /*
- * Force the stride to zero when the shape is 1, so
- * that the broadcasting works right.
- */
- int remapped_axis = REMAP_AXIS(i, core_start_dim + j);
+ /*
+ * Could be negative if flexible dims are used, but not for
+ * keepdims, since those dimensions are allocated in arr.
+ */
+ int core_start_dim = PyArray_NDIM(arr) - op_core_num_dims[i];
+ int num_removed = 0;
+ int dim_offset = ufunc->core_offsets[i];
+
+ for (j = 0; j < ufunc->core_num_dims[i]; ++j) {
+ int core_dim_index = ufunc->core_dim_ixs[dim_offset + j];
+ /*
+ * Force zero stride when the shape is 1 (always the case for
+ * for missing dimensions), so that broadcasting works right.
+ */
+ if (core_dim_flags[core_dim_index] & UFUNC_CORE_DIM_MISSING) {
+ num_removed++;
+ inner_strides[idim++] = 0;
+ }
+ else {
+ int remapped_axis = REMAP_AXIS(i, core_start_dim + j - num_removed);
if (shape[remapped_axis] != 1) {
inner_strides[idim++] = strides[remapped_axis];
} else {
inner_strides[idim++] = 0;
}
- } else {
- inner_strides[idim++] = 0;
}
}
}
@@ -2814,7 +3064,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
- NPY_UF_DBG_PRINT1("Returning code %d\n", reval);
+ NPY_UF_DBG_PRINT1("Returning code %d\n", retval);
return retval;
@@ -2855,7 +3105,8 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
int i, nop;
const char *ufunc_name;
int retval = -1, subok = 1;
- int need_fancy = 0;
+ npy_uint32 op_flags[NPY_MAXARGS];
+ npy_intp default_op_out_flags;
PyArray_Descr *dtypes[NPY_MAXARGS];
@@ -2914,13 +3165,6 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
return retval;
}
- /*
- * Use the masked loop if a wheremask was specified.
- */
- if (wheremask != NULL) {
- need_fancy = 1;
- }
-
/* Get the buffersize and errormask */
if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) {
retval = -1;
@@ -2935,16 +3179,20 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
goto fail;
}
- /* Only do the trivial loop check for the unmasked version. */
- if (!need_fancy) {
- /*
- * This checks whether a trivial loop is ok, making copies of
- * scalar and one dimensional operands if that will help.
- */
- trivial_loop_ok = check_for_trivial_loop(ufunc, op, dtypes, buffersize);
- if (trivial_loop_ok < 0) {
- goto fail;
- }
+ if (wheremask != NULL) {
+ /* Set up the flags. */
+ default_op_out_flags = NPY_ITER_NO_SUBTYPE |
+ NPY_ITER_WRITEMASKED |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS;
+ _ufunc_setup_flags(ufunc, NPY_UFUNC_DEFAULT_INPUT_FLAGS,
+ default_op_out_flags, op_flags);
+ }
+ else {
+ /* Set up the flags. */
+ default_op_out_flags = NPY_ITER_WRITEONLY |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS;
+ _ufunc_setup_flags(ufunc, NPY_UFUNC_DEFAULT_INPUT_FLAGS,
+ default_op_out_flags, op_flags);
}
#if NPY_UF_DBG_TRACING
@@ -2972,23 +3220,46 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
_find_array_prepare(full_args, arr_prep, nin, nout);
}
- /* Start with the floating-point exception flags cleared */
- npy_clear_floatstatus_barrier((char*)&ufunc);
/* Do the ufunc loop */
- if (need_fancy) {
+ if (wheremask != NULL) {
NPY_UF_DBG_PRINT("Executing fancy inner loop\n");
+ if (nop + 1 > NPY_MAXARGS) {
+ PyErr_SetString(PyExc_ValueError,
+ "Too many operands when including where= parameter");
+ return -1;
+ }
+ op[nop] = wheremask;
+ dtypes[nop] = NULL;
+
+ /* Set up the flags */
+
+ npy_clear_floatstatus_barrier((char*)&ufunc);
retval = execute_fancy_ufunc_loop(ufunc, wheremask,
op, dtypes, order,
- buffersize, arr_prep, full_args);
+ buffersize, arr_prep, full_args, op_flags);
}
else {
NPY_UF_DBG_PRINT("Executing legacy inner loop\n");
+ /*
+ * This checks whether a trivial loop is ok, making copies of
+ * scalar and one dimensional operands if that will help.
+ * Since it requires dtypes, it can only be called after
+ * ufunc->type_resolver
+ */
+ trivial_loop_ok = check_for_trivial_loop(ufunc, op, dtypes, buffersize);
+ if (trivial_loop_ok < 0) {
+ goto fail;
+ }
+
+ /* check_for_trivial_loop on half-floats can overflow */
+ npy_clear_floatstatus_barrier((char*)&ufunc);
+
retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok,
op, dtypes, order,
- buffersize, arr_prep, full_args);
+ buffersize, arr_prep, full_args, op_flags);
}
if (retval < 0) {
goto fail;
@@ -4114,7 +4385,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
int axes[NPY_MAXDIMS];
PyObject *axes_in = NULL;
PyArrayObject *mp = NULL, *ret = NULL;
- PyObject *op, *res = NULL;
+ PyObject *op;
PyObject *obj_ind, *context;
PyArrayObject *indices = NULL;
PyArray_Descr *otype = NULL;
@@ -4360,25 +4631,31 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
return NULL;
}
- /* If an output parameter was provided, don't wrap it */
- if (out != NULL) {
- return (PyObject *)ret;
- }
-
- if (Py_TYPE(op) != Py_TYPE(ret)) {
- res = PyObject_CallMethod(op, "__array_wrap__", "O", ret);
- if (res == NULL) {
- PyErr_Clear();
- }
- else if (res == Py_None) {
- Py_DECREF(res);
+ /* Wrap and return the output */
+ {
+ /* Find __array_wrap__ - note that these rules are different to the
+ * normal ufunc path
+ */
+ PyObject *wrap;
+ if (out != NULL) {
+ wrap = Py_None;
+ Py_INCREF(wrap);
+ }
+ else if (Py_TYPE(op) != Py_TYPE(ret)) {
+ wrap = PyObject_GetAttr(op, npy_um_str_array_wrap);
+ if (wrap == NULL) {
+ PyErr_Clear();
+ }
+ else if (!PyCallable_Check(wrap)) {
+ Py_DECREF(wrap);
+ wrap = NULL;
+ }
}
else {
- Py_DECREF(ret);
- return res;
+ wrap = NULL;
}
+ return _apply_array_wrap(wrap, ret, NULL);
}
- return PyArray_Return(ret);
fail:
Py_XDECREF(otype);
@@ -4386,78 +4663,6 @@ fail:
return NULL;
}
-/*
- * This function analyzes the input arguments
- * and determines an appropriate __array_wrap__ function to call
- * for the outputs.
- *
- * If an output argument is provided, then it is wrapped
- * with its own __array_wrap__ not with the one determined by
- * the input arguments.
- *
- * if the provided output argument is already an array,
- * the wrapping function is None (which means no wrapping will
- * be done --- not even PyArray_Return).
- *
- * A NULL is placed in output_wrap for outputs that
- * should just have PyArray_Return called.
- */
-static void
-_find_array_wrap(ufunc_full_args args, PyObject *kwds,
- PyObject **output_wrap, int nin, int nout)
-{
- int i;
- PyObject *obj;
- PyObject *wrap = NULL;
-
- /*
- * If a 'subok' parameter is passed and isn't True, don't wrap but put None
- * into slots with out arguments which means return the out argument
- */
- if (kwds != NULL && (obj = PyDict_GetItem(kwds,
- npy_um_str_subok)) != NULL) {
- if (obj != Py_True) {
- /* skip search for wrap members */
- goto handle_out;
- }
- }
-
- /*
- * Determine the wrapping function given by the input arrays
- * (could be NULL).
- */
- wrap = _find_array_method(args.in, npy_um_str_array_wrap);
-
- /*
- * For all the output arrays decide what to do.
- *
- * 1) Use the wrap function determined from the input arrays
- * This is the default if the output array is not
- * passed in.
- *
- * 2) Use the __array_wrap__ method of the output object
- * passed in. -- this is special cased for
- * exact ndarray so that no PyArray_Return is
- * done in that case.
- */
-handle_out:
- if (args.out == NULL) {
- for (i = 0; i < nout; i++) {
- Py_XINCREF(wrap);
- output_wrap[i] = wrap;
- }
- }
- else {
- for (i = 0; i < nout; i++) {
- output_wrap[i] = _get_output_array_method(
- PyTuple_GET_ITEM(args.out, i), npy_um_str_array_wrap, wrap);
- }
- }
-
- Py_XDECREF(wrap);
- return;
-}
-
static PyObject *
ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
@@ -4480,22 +4685,7 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
errval = PyUFunc_GenericFunction(ufunc, args, kwds, mps);
if (errval < 0) {
- if (errval == -1) {
- return NULL;
- }
- else if (ufunc->nin == 2 && ufunc->nout == 1) {
- /*
- * For array_richcompare's benefit -- see the long comment in
- * get_ufunc_arguments.
- */
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- "XX can't happen, please report a bug XX");
- return NULL;
- }
+ return NULL;
}
/* Free the input references */
@@ -4528,42 +4718,20 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
/* wrap outputs */
for (i = 0; i < ufunc->nout; i++) {
int j = ufunc->nin+i;
- PyObject *wrap = wraparr[i];
+ _ufunc_context context;
+ PyObject *wrapped;
- if (wrap == NULL) {
- /* default behavior */
- retobj[i] = PyArray_Return(mps[j]);
- }
- else if (wrap == Py_None) {
- Py_DECREF(wrap);
- retobj[i] = (PyObject *)mps[j];
- }
- else {
- PyObject *res;
- PyObject *args_tup;
+ context.ufunc = ufunc;
+ context.args = full_args;
+ context.out_i = i;
- /* Call the method with appropriate context */
- args_tup = _get_wrap_prepare_args(full_args);
- if (args_tup == NULL) {
- goto fail;
- }
- res = PyObject_CallFunction(
- wrap, "O(OOi)", mps[j], ufunc, args_tup, i);
- Py_DECREF(args_tup);
-
- /* Handle __array_wrap__ that does not accept a context argument */
- if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
- PyErr_Clear();
- res = PyObject_CallFunctionObjArgs(wrap, mps[j], NULL);
- }
- Py_DECREF(wrap);
- Py_DECREF(mps[j]);
- mps[j] = NULL; /* Prevent fail double-freeing this */
- if (res == NULL) {
- goto fail;
- }
- retobj[i] = res;
+ wrapped = _apply_array_wrap(wraparr[i], mps[j], &context);
+ mps[j] = NULL; /* Prevent fail double-freeing this */
+ if (wrapped == NULL) {
+ goto fail;
}
+
+ retobj[i] = wrapped;
}
Py_XDECREF(full_args.in);
@@ -4701,8 +4869,21 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
const char *name, const char *doc,
int unused, const char *signature)
{
- PyUFuncObject *ufunc;
+ return PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
+ func, data, types, ntypes, nin, nout, identity, name, doc,
+ unused, signature, NULL);
+}
+/*UFUNC_API*/
+NPY_NO_EXPORT PyObject *
+PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, void **data,
+ char *types, int ntypes,
+ int nin, int nout, int identity,
+ const char *name, const char *doc,
+ int unused, const char *signature,
+ PyObject *identity_value)
+{
+ PyUFuncObject *ufunc;
if (nin + nout > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"Cannot construct a ufunc with more than %d operands "
@@ -4715,23 +4896,22 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
if (ufunc == NULL) {
return NULL;
}
+ memset(ufunc, 0, sizeof(PyUFuncObject));
PyObject_Init((PyObject *)ufunc, &PyUFunc_Type);
- ufunc->reserved1 = 0;
- ufunc->reserved2 = NULL;
-
ufunc->nin = nin;
ufunc->nout = nout;
ufunc->nargs = nin+nout;
ufunc->identity = identity;
+ if (ufunc->identity == PyUFunc_IdentityValue) {
+ Py_INCREF(identity_value);
+ }
+ ufunc->identity_value = identity_value;
ufunc->functions = func;
ufunc->data = data;
ufunc->types = types;
ufunc->ntypes = ntypes;
- ufunc->ptr = NULL;
- ufunc->obj = NULL;
- ufunc->userloops=NULL;
/* Type resolution and inner loop selection functions */
ufunc->type_resolver = &PyUFunc_DefaultTypeResolver;
@@ -4748,19 +4928,11 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
ufunc->op_flags = PyArray_malloc(sizeof(npy_uint32)*ufunc->nargs);
if (ufunc->op_flags == NULL) {
+ Py_DECREF(ufunc);
return PyErr_NoMemory();
}
memset(ufunc->op_flags, 0, sizeof(npy_uint32)*ufunc->nargs);
- ufunc->iter_flags = 0;
-
- /* generalized ufunc */
- ufunc->core_enabled = 0;
- ufunc->core_num_dim_ix = 0;
- ufunc->core_num_dims = NULL;
- ufunc->core_dim_ixs = NULL;
- ufunc->core_offsets = NULL;
- ufunc->core_signature = NULL;
if (signature != NULL) {
if (_parse_signature(ufunc, signature) != 0) {
Py_DECREF(ufunc);
@@ -5107,12 +5279,17 @@ ufunc_dealloc(PyUFuncObject *ufunc)
{
PyArray_free(ufunc->core_num_dims);
PyArray_free(ufunc->core_dim_ixs);
+ PyArray_free(ufunc->core_dim_sizes);
+ PyArray_free(ufunc->core_dim_flags);
PyArray_free(ufunc->core_offsets);
PyArray_free(ufunc->core_signature);
PyArray_free(ufunc->ptr);
PyArray_free(ufunc->op_flags);
Py_XDECREF(ufunc->userloops);
Py_XDECREF(ufunc->obj);
+ if (ufunc->identity == PyUFunc_IdentityValue) {
+ Py_DECREF(ufunc->identity_value);
+ }
PyArray_free(ufunc);
}
diff --git a/numpy/core/src/umath/ufunc_object.h b/numpy/core/src/umath/ufunc_object.h
index d6fd3837a..f5de9f9b7 100644
--- a/numpy/core/src/umath/ufunc_object.h
+++ b/numpy/core/src/umath/ufunc_object.h
@@ -1,6 +1,8 @@
#ifndef _NPY_UMATH_UFUNC_OBJECT_H_
#define _NPY_UMATH_UFUNC_OBJECT_H_
+#include <numpy/ufuncobject.h>
+
NPY_NO_EXPORT PyObject *
ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args);
@@ -10,13 +12,23 @@ ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args);
NPY_NO_EXPORT const char*
ufunc_get_name_cstr(PyUFuncObject *ufunc);
-/* interned strings (on umath import) */
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_out;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_subok;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_array_prepare;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_array_wrap;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_array_finalize;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_ufunc;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_pyvals_name;
+/* strings from umathmodule.c that are interned on umath import */
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_out;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_where;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_axes;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_axis;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_keepdims;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_casting;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_order;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_dtype;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_subok;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_signature;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_sig;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_extobj;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_prepare;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_wrap;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_finalize;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_ufunc;
+NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_pyvals_name;
#endif
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 1766ba564..ec60d9cfd 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -9,14 +9,12 @@
* See LICENSE.txt for the license.
*/
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include "Python.h"
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
-#define NO_IMPORT_ARRAY
-
#include "npy_pycompat.h"
#include "numpy/ufuncobject.h"
@@ -24,6 +22,11 @@
#include "ufunc_object.h"
#include "common.h"
+#include "mem_overlap.h"
+#if defined(HAVE_CBLAS)
+#include "cblasfuncs.h"
+#endif
+
static const char *
npy_casting_to_string(NPY_CASTING casting)
{
@@ -42,6 +45,25 @@ npy_casting_to_string(NPY_CASTING casting)
return "<unknown>";
}
}
+
+static int
+raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) {
+ PyObject *errmsg;
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
+ errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
+ "with types ", ufunc_name);
+ PyUString_ConcatAndDel(&errmsg,
+ PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
+ PyUString_ConcatAndDel(&errmsg,
+ PyUString_FromString(" and "));
+ PyUString_ConcatAndDel(&errmsg,
+ PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
+ PyErr_SetObject(PyExc_TypeError, errmsg);
+ Py_DECREF(errmsg);
+ return -1;
+}
+
+
/*UFUNC_API
*
* Validates that the input operands can be cast to
@@ -607,7 +629,6 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -663,7 +684,7 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (type_num1 == NPY_DATETIME) {
@@ -705,7 +726,7 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
@@ -741,11 +762,11 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -758,21 +779,6 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
}
return 0;
-
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
- }
}
/*
@@ -795,7 +801,6 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -848,7 +853,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (type_num1 == NPY_DATETIME) {
@@ -906,7 +911,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
Py_INCREF(out_dtypes[1]);
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
@@ -924,11 +929,11 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -941,21 +946,6 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
}
return 0;
-
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
- }
}
/*
@@ -975,7 +965,6 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -1022,7 +1011,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_DOUBLE;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
@@ -1044,7 +1033,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_LONGLONG;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISFLOAT(type_num1)) {
@@ -1066,11 +1055,11 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_DOUBLE;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -1083,21 +1072,6 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
}
return 0;
-
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
- }
}
@@ -1117,7 +1091,6 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -1185,11 +1158,11 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_DOUBLE;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -1202,21 +1175,57 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
}
return 0;
+}
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+
+NPY_NO_EXPORT int
+PyUFunc_RemainderTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
+{
+ int type_num1, type_num2;
+ int i;
+
+ type_num1 = PyArray_DESCR(operands[0])->type_num;
+ type_num2 = PyArray_DESCR(operands[1])->type_num;
+
+ /* Use the default when datetime and timedelta are not involved */
+ if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
+ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
+ }
+ if (type_num1 == NPY_TIMEDELTA) {
+ if (type_num2 == NPY_TIMEDELTA) {
+ out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]),
+ PyArray_DESCR(operands[1]));
+ if (out_dtypes[0] == NULL) {
+ return -1;
+ }
+ out_dtypes[1] = out_dtypes[0];
+ Py_INCREF(out_dtypes[1]);
+ out_dtypes[2] = out_dtypes[0];
+ Py_INCREF(out_dtypes[2]);
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
+
+ /* Check against the casting rules */
+ if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
+ for (i = 0; i < 3; ++i) {
+ Py_DECREF(out_dtypes[i]);
+ out_dtypes[i] = NULL;
+ }
return -1;
}
+
+ return 0;
}
@@ -1277,7 +1286,7 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
PyObject *type_tup,
PyArray_Descr **out_dtypes)
{
- /* Depreciation checks needed only on python 2 */
+ /* Deprecation checks needed only on python 2 */
#if !defined(NPY_PY3K)
int type_num1, type_num2;
@@ -1295,7 +1304,6 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
type_tup, out_dtypes);
}
-
static int
find_userloop(PyUFuncObject *ufunc,
PyArray_Descr **dtypes,
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index fa9f1dbfa..2f37af753 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -92,6 +92,13 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
PyObject *type_tup,
PyArray_Descr **out_dtypes);
+NPY_NO_EXPORT int
+PyUFunc_RemainderTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
/*
* Does a linear search for the best inner loop of the ufunc.
*
@@ -138,5 +145,4 @@ PyUFunc_DefaultMaskedInnerLoopSelector(PyUFuncObject *ufunc,
NpyAuxData **out_innerloopdata,
int *out_needs_api);
-
#endif
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 5567b9bbf..5de19fec2 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -16,12 +16,12 @@
* __ufunc_api.c
*/
#define _UMATHMODULE
+#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include "Python.h"
#include "npy_config.h"
-#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
@@ -29,20 +29,7 @@
#include "abstract.h"
#include "numpy/npy_math.h"
-
-/*
- *****************************************************************************
- ** INCLUDE GENERATED CODE **
- *****************************************************************************
- */
-#include "funcs.inc"
-#include "loops.h"
-#include "ufunc_object.h"
-#include "ufunc_type_resolution.h"
-#include "__umath_generated.c"
-#include "__ufunc_api.c"
-
-NPY_NO_EXPORT int initscalarmath(PyObject *);
+#include "number.h"
static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om};
@@ -82,7 +69,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc,
return 0;
}
-static PyObject *
+PyObject *
ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds)) {
/* Keywords are ignored for now */
@@ -179,7 +166,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
}
/* docstring in numpy.add_newdocs.py */
-static PyObject *
+PyObject *
add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
PyUFuncObject *ufunc;
@@ -226,20 +213,40 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
*****************************************************************************
*/
-NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_out = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_subok = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_array_prepare = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_array_wrap = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_array_finalize = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_ufunc = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_um_str_pyvals_name = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_out = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_where = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_axes = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_axis = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_keepdims = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_casting = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_order = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_dtype = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_subok = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_signature = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_sig = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_extobj = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_prepare = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_wrap = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_finalize = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_ufunc = NULL;
+NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_pyvals_name = NULL;
/* intern some strings used in ufuncs */
static int
intern_strings(void)
{
npy_um_str_out = PyUString_InternFromString("out");
+ npy_um_str_where = PyUString_InternFromString("where");
+ npy_um_str_axes = PyUString_InternFromString("axes");
+ npy_um_str_axis = PyUString_InternFromString("axis");
+ npy_um_str_keepdims = PyUString_InternFromString("keepdims");
+ npy_um_str_casting = PyUString_InternFromString("casting");
+ npy_um_str_order = PyUString_InternFromString("order");
+ npy_um_str_dtype = PyUString_InternFromString("dtype");
npy_um_str_subok = PyUString_InternFromString("subok");
+ npy_um_str_signature = PyUString_InternFromString("signature");
+ npy_um_str_sig = PyUString_InternFromString("sig");
+ npy_um_str_extobj = PyUString_InternFromString("extobj");
npy_um_str_array_prepare = PyUString_InternFromString("__array_prepare__");
npy_um_str_array_wrap = PyUString_InternFromString("__array_wrap__");
npy_um_str_array_finalize = PyUString_InternFromString("__array_finalize__");
@@ -250,97 +257,20 @@ intern_strings(void)
npy_um_str_array_wrap && npy_um_str_array_finalize && npy_um_str_ufunc;
}
-/* Setup the umath module */
-/* Remove for time being, it is declared in __ufunc_api.h */
-/*static PyTypeObject PyUFunc_Type;*/
-
-static struct PyMethodDef methods[] = {
- {"frompyfunc",
- (PyCFunction) ufunc_frompyfunc,
- METH_VARARGS | METH_KEYWORDS, NULL},
- {"seterrobj",
- (PyCFunction) ufunc_seterr,
- METH_VARARGS, NULL},
- {"geterrobj",
- (PyCFunction) ufunc_geterr,
- METH_VARARGS, NULL},
- {"_add_newdoc_ufunc", (PyCFunction)add_newdoc_ufunc,
- METH_VARARGS, NULL},
- {NULL, NULL, 0, NULL} /* sentinel */
-};
-
-
-#if defined(NPY_PY3K)
-static struct PyModuleDef moduledef = {
- PyModuleDef_HEAD_INIT,
- "umath",
- NULL,
- -1,
- methods,
- NULL,
- NULL,
- NULL,
- NULL
-};
-#endif
-
-#include <stdio.h>
+/* Setup the umath part of the module */
-#if defined(NPY_PY3K)
-#define RETVAL(x) x
-PyMODINIT_FUNC PyInit_umath(void)
-#else
-#define RETVAL(x)
-PyMODINIT_FUNC initumath(void)
-#endif
+int initumath(PyObject *m)
{
- PyObject *m, *d, *s, *s2, *c_api;
+ PyObject *d, *s, *s2;
int UFUNC_FLOATING_POINT_SUPPORT = 1;
#ifdef NO_UFUNC_FLOATING_POINT_SUPPORT
UFUNC_FLOATING_POINT_SUPPORT = 0;
#endif
- /* Create the module and add the functions */
-#if defined(NPY_PY3K)
- m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule("umath", methods);
-#endif
- if (!m) {
- goto err;
- }
-
- /* Import the array */
- if (_import_array() < 0) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ImportError,
- "umath failed: Could not import array core.");
- }
- goto err;
- }
-
- /* Initialize the types */
- if (PyType_Ready(&PyUFunc_Type) < 0)
- goto err;
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
- c_api = NpyCapsule_FromVoidPtr((void *)PyUFunc_API, NULL);
- if (PyErr_Occurred()) {
- goto err;
- }
- PyDict_SetItemString(d, "_UFUNC_API", c_api);
- Py_DECREF(c_api);
- if (PyErr_Occurred()) {
- goto err;
- }
-
- /* Load the ufunc operators into the array module's namespace */
- if (InitOperators(d) < 0) {
- goto err;
- }
-
PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI));
Py_DECREF(s);
PyDict_SetItemString(d, "e", s = PyFloat_FromDouble(NPY_E));
@@ -392,24 +322,16 @@ PyMODINIT_FUNC initumath(void)
s2 = PyDict_GetItemString(d, "remainder");
/* Setup the array object's numerical structures with appropriate
ufuncs in d*/
- PyArray_SetNumericOps(d);
+ _PyArray_SetNumericOps(d);
PyDict_SetItemString(d, "conj", s);
PyDict_SetItemString(d, "mod", s2);
- initscalarmath(m);
-
if (!intern_strings()) {
- goto err;
- }
-
- return RETVAL(m);
-
- err:
- /* Check for errors */
- if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
- "cannot load umath module.");
+ "cannot intern umath strings while initializing _multiarray_umath.");
+ return -1;
}
- return RETVAL(NULL);
+
+ return 0;
}
diff --git a/numpy/core/tests/_locales.py b/numpy/core/tests/_locales.py
index 28eebb14d..52e4ff36d 100644
--- a/numpy/core/tests/_locales.py
+++ b/numpy/core/tests/_locales.py
@@ -6,7 +6,7 @@ from __future__ import division, absolute_import, print_function
import sys
import locale
-from numpy.testing import SkipTest
+import pytest
__ALL__ = ['CommaDecimalPointLocale']
@@ -52,7 +52,7 @@ class CommaDecimalPointLocale(object):
tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
the decimal point instead of periods ('.'). On exit the locale is restored
to the initial locale. It also serves as context manager with the same
- effect. If no such locale is available, it raises SkipTest in both cases.
+ effect. If no such locale is available, the test is skipped.
.. versionadded:: 1.15.0
@@ -61,7 +61,7 @@ class CommaDecimalPointLocale(object):
def setup(self):
if self.tst_locale is None:
- raise SkipTest("No French locale available")
+ pytest.skip("No French locale available")
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
def teardown(self):
@@ -69,7 +69,7 @@ class CommaDecimalPointLocale(object):
def __enter__(self):
if self.tst_locale is None:
- raise SkipTest("No French locale available")
+ pytest.skip("No French locale available")
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
def __exit__(self, type, value, traceback):
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 6214e325c..7a858d2e2 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -8,6 +8,7 @@ import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
+ assert_raises_regex,
)
import textwrap
@@ -210,6 +211,15 @@ class TestArray2String(object):
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
+ def test_unexpected_kwarg(self):
+ # ensure than an appropriate TypeError
+ # is raised when array2string receives
+ # an unexpected kwarg
+
+ with assert_raises_regex(TypeError, 'nonsense'):
+ np.array2string(np.array([1, 2, 3]),
+ nonsense=None)
+
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
@@ -832,6 +842,10 @@ class TestPrintOptions(object):
[[ 0.]]]])""")
)
+ def test_bad_args(self):
+ assert_raises(ValueError, np.set_printoptions, threshold='nan')
+ assert_raises(ValueError, np.set_printoptions, threshold=u'1')
+ assert_raises(ValueError, np.set_printoptions, threshold=b'1')
def test_unicode_object_array():
import sys
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index b1b1e87c1..b2ce0402a 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1,14 +1,15 @@
from __future__ import division, absolute_import, print_function
-import pickle
import numpy
import numpy as np
import datetime
import pytest
from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_warns, suppress_warnings
+ assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
+ assert_raises_regex,
)
+from numpy.core.numeric import pickle
# Use pytz to test out various time zones if available
try:
@@ -130,13 +131,10 @@ class TestDateTime(object):
def test_compare_generic_nat(self):
# regression tests for gh-6452
- assert_equal(np.datetime64('NaT'),
- np.datetime64('2000') + np.timedelta64('NaT'))
- # nb. we may want to make NaT != NaT true in the future
- with suppress_warnings() as sup:
- sup.filter(FutureWarning, ".*NAT ==")
- assert_(np.datetime64('NaT') == np.datetime64('NaT', 'us'))
- assert_(np.datetime64('NaT', 'us') == np.datetime64('NaT'))
+ assert_(np.datetime64('NaT') !=
+ np.datetime64('2000') + np.timedelta64('NaT'))
+ assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
+ assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
def test_datetime_scalar_construction(self):
# Construct with different units
@@ -260,6 +258,21 @@ class TestDateTime(object):
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
+ @pytest.mark.parametrize("unit", [
+ # test all date / time units and use
+ # "generic" to select generic unit
+ ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
+ ("s"), ("ms"), ("us"), ("ns"), ("ps"),
+ ("fs"), ("as"), ("generic") ])
+ def test_timedelta_np_int_construction(self, unit):
+ # regression test for gh-7617
+ if unit != "generic":
+ assert_equal(np.timedelta64(np.int64(123), unit),
+ np.timedelta64(123, unit))
+ else:
+ assert_equal(np.timedelta64(np.int64(123)),
+ np.timedelta64(123))
+
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
@@ -355,6 +368,16 @@ class TestDateTime(object):
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
+ def test_timedelta_0_dim_object_array_conversion(self):
+ # Regression test for gh-11151
+ test = np.array(datetime.timedelta(seconds=20))
+ actual = test.astype(np.timedelta64)
+ # expected value from the array constructor workaround
+ # described in above issue
+ expected = np.array(datetime.timedelta(seconds=20),
+ np.timedelta64)
+ assert_equal(actual, expected)
+
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(np.datetime64('2010').dtype,
@@ -616,14 +639,17 @@ class TestDateTime(object):
def test_pickle(self):
# Check that pickle roundtripping works
- dt = np.dtype('M8[7D]')
- assert_equal(pickle.loads(pickle.dumps(dt)), dt)
- dt = np.dtype('M8[W]')
- assert_equal(pickle.loads(pickle.dumps(dt)), dt)
- scalar = np.datetime64('2016-01-01T00:00:00.000000000')
- assert_equal(pickle.loads(pickle.dumps(scalar)), scalar)
- delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
- assert_equal(pickle.loads(pickle.dumps(delta)), delta)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ dt = np.dtype('M8[7D]')
+ assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
+ dt = np.dtype('M8[W]')
+ assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
+ scalar = np.datetime64('2016-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)),
+ scalar)
+ delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
+ assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)),
+ delta)
# Check that loading pickles from 1.6 works
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
@@ -1144,47 +1170,23 @@ class TestDateTime(object):
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
- with suppress_warnings() as sup:
- # The assert warns contexts will again see the warning:
- sup.filter(FutureWarning, ".*NAT")
-
- for op in [np.equal, np.less, np.less_equal,
- np.greater, np.greater_equal]:
- if op(dt_nat, dt_nat):
- assert_warns(FutureWarning, op, dt_nat, dt_nat)
- if op(dt_nat, dt_other):
- assert_warns(FutureWarning, op, dt_nat, dt_other)
- if op(dt_other, dt_nat):
- assert_warns(FutureWarning, op, dt_other, dt_nat)
- if op(td_nat, td_nat):
- assert_warns(FutureWarning, op, td_nat, td_nat)
- if op(td_nat, td_other):
- assert_warns(FutureWarning, op, td_nat, td_other)
- if op(td_other, td_nat):
- assert_warns(FutureWarning, op, td_other, td_nat)
-
- assert_warns(FutureWarning, np.not_equal, dt_nat, dt_nat)
- assert_warns(FutureWarning, np.not_equal, td_nat, td_nat)
-
- with suppress_warnings() as sup:
- sup.record(FutureWarning)
- assert_(np.not_equal(dt_nat, dt_other))
- assert_(np.not_equal(dt_other, dt_nat))
- assert_(np.not_equal(td_nat, td_other))
- assert_(np.not_equal(td_other, td_nat))
- assert_equal(len(sup.log), 0)
-
- def test_datetime_futurewarning_once_nat(self):
- # Test that the futurewarning is only given once per inner loop
- arr1 = np.array(['NaT', 'NaT', '2000-01-01'] * 2, dtype='M8[s]')
- arr2 = np.array(['NaT', '2000-01-01', 'NaT'] * 2, dtype='M8[s]')
- # All except less, because for less it can't be wrong (NaT is min)
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
- with suppress_warnings() as sup:
- rec = sup.record(FutureWarning, ".*NAT")
- op(arr1, arr2)
- assert_(len(rec) == 1, "failed for {}".format(op))
+ assert_(not op(dt_nat, dt_nat))
+ assert_(not op(dt_nat, dt_other))
+ assert_(not op(dt_other, dt_nat))
+
+ assert_(not op(td_nat, td_nat))
+ assert_(not op(td_nat, td_other))
+ assert_(not op(td_other, td_nat))
+
+ assert_(np.not_equal(dt_nat, dt_nat))
+ assert_(np.not_equal(dt_nat, dt_other))
+ assert_(np.not_equal(dt_other, dt_nat))
+
+ assert_(np.not_equal(td_nat, td_nat))
+ assert_(np.not_equal(td_nat, td_other))
+ assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
@@ -1625,6 +1627,76 @@ class TestDateTime(object):
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
+ @pytest.mark.parametrize("val1, val2, expected", [
+ # case from gh-12092
+ (np.timedelta64(7, 's'),
+ np.timedelta64(3, 's'),
+ np.timedelta64(1, 's')),
+ # negative value cases
+ (np.timedelta64(3, 's'),
+ np.timedelta64(-2, 's'),
+ np.timedelta64(-1, 's')),
+ (np.timedelta64(-3, 's'),
+ np.timedelta64(2, 's'),
+ np.timedelta64(1, 's')),
+ # larger value cases
+ (np.timedelta64(17, 's'),
+ np.timedelta64(22, 's'),
+ np.timedelta64(17, 's')),
+ (np.timedelta64(22, 's'),
+ np.timedelta64(17, 's'),
+ np.timedelta64(5, 's')),
+ # different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(57, 's'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(1, 'us'),
+ np.timedelta64(727, 'ns'),
+ np.timedelta64(273, 'ns')),
+ # NaT is propagated
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'ns'),
+ np.timedelta64('NaT')),
+ # Y % M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64(22, 'M'),
+ np.timedelta64(2, 'M')),
+ ])
+ def test_timedelta_modulus(self, val1, val2, expected):
+ assert_equal(val1 % val2, expected)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # years and months sometimes can't be unambiguously
+ # divided for modulus operation
+ (np.timedelta64(7, 'Y'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(7, 'M'),
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_error(self, val1, val2):
+ with assert_raises_regex(TypeError, "common metadata divisor"):
+ val1 % val2
+
+ def test_timedelta_modulus_div_by_zero(self):
+ with assert_warns(RuntimeWarning):
+ actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
+ assert_equal(actual, np.timedelta64(0, 's'))
+
+ @pytest.mark.parametrize("val1, val2", [
+ # cases where one operand is not
+ # timedelta64
+ (np.timedelta64(7, 'Y'),
+ 15,),
+ (7.5,
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_type_resolution(self, val1, val2):
+ # NOTE: some of the operations may be supported
+ # in the future
+ with assert_raises_regex(TypeError,
+ "remainder cannot use operands with types"):
+ val1 % val2
+
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
@@ -1702,7 +1774,6 @@ class TestDateTime(object):
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
-
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 43f1b71c7..7b0e6f8a4 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -6,7 +6,7 @@ import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises,
- suppress_warnings,
+ assert_raises_regex, suppress_warnings,
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
@@ -626,12 +626,9 @@ class TestOperations(object):
assert_array_equal(Ar, (self.A * r))
for ob in [object(), 'qrs']:
- try:
- A * ob
- except ValueError:
- pass
- else:
- self.fail("chararray can only be multiplied by integers")
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
+ A*ob
def test_rmul(self):
A = self.A
@@ -641,12 +638,9 @@ class TestOperations(object):
assert_array_equal(Ar, (r * self.A))
for ob in [object(), 'qrs']:
- try:
+ with assert_raises_regex(ValueError,
+ 'Can only multiply by integers'):
ob * A
- except ValueError:
- pass
- else:
- self.fail("chararray can only be multiplied by integers")
def test_mod(self):
"""Ticket #856"""
@@ -668,13 +662,9 @@ class TestOperations(object):
assert_(("%r" % self.A) == repr(self.A))
for ob in [42, object()]:
- try:
+ with assert_raises_regex(
+ TypeError, "unsupported operand type.* and 'chararray'"):
ob % self.A
- except TypeError:
- pass
- else:
- self.fail("chararray __rmod__ should fail with "
- "non-string objects")
def test_slice(self):
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8dd42b21c..edb5d5e46 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -13,8 +13,7 @@ import pytest
import numpy as np
from numpy.testing import (
- assert_raises, assert_warns, assert_no_warnings, assert_array_equal,
- assert_
+ assert_raises, assert_warns, assert_
)
try:
@@ -36,7 +35,7 @@ class _DeprecationTestCase(object):
# Do *not* ignore other DeprecationWarnings. Ignoring warnings
# can give very confusing results because of
- # http://bugs.python.org/issue4180 and it is probably simplest to
+ # https://bugs.python.org/issue4180 and it is probably simplest to
# try to keep the tests cleanly giving only the right warning type.
# (While checking them set to "error" those are ignored anyway)
# We still have them show up, because otherwise they would be raised
@@ -190,10 +189,10 @@ class TestComparisonDeprecations(_DeprecationTestCase):
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
- # The empty list is not cast to string, as this is only to document
- # that fact (it likely should be changed). This means that the
- # following works (and returns False) due to dtype mismatch:
- a == []
+ # The empty list is not cast to string, and this used to pass due
+ # to dtype mismatch; now (2018-06-21) it correctly leads to a
+ # FutureWarning.
+ assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
class NotArray(object):
@@ -414,7 +413,7 @@ class TestClassicIntDivision(_DeprecationTestCase):
"""
See #7949. Deprecate the numeric-style dtypes with -3 flag in python 2
if used for division
- List of data types: http://docs.scipy.org/doc/numpy/user/basics.types.html
+ List of data types: https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
def test_int_dtypes(self):
#scramble types and do some mix and match testing
@@ -506,7 +505,31 @@ class TestGeneratorSum(_DeprecationTestCase):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
+class TestSctypeNA(_VisibleDeprecationTestCase):
+ # 2018-06-24, 1.16
+ def test_sctypeNA(self):
+ self.assert_deprecated(lambda: np.sctypeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA.get('?'))
+
+
+class TestPositiveOnNonNumerical(_DeprecationTestCase):
+ # 2018-06-28, 1.16.0
+ def test_positive_on_non_number(self):
+ self.assert_deprecated(operator.pos, args=(np.array('foo'),))
+
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+class Test_GetSet_NumericOps(_DeprecationTestCase):
+ # 2018-09-20, 1.16.0
+ def test_get_numeric_ops(self):
+ from numpy.core._multiarray_tests import getset_numericops
+ self.assert_deprecated(getset_numericops, num=2)
+
+ # empty kwargs prevents any state actually changing which would break
+ # other tests.
+ self.assert_deprecated(np.set_numeric_ops, kwargs={})
+ assert_raises(ValueError, np.set_numeric_ops, add='abc')
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 31ef9d609..c55751e3c 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pickle
import sys
import operator
import pytest
@@ -9,6 +8,7 @@ import ctypes
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.core.numeric import pickle
def assert_dtype_equal(a, b):
assert_equal(a, b)
@@ -21,26 +21,26 @@ def assert_dtype_not_equal(a, b):
"two different types hash to the same value !")
class TestBuiltin(object):
- def test_run(self):
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+ np.unicode])
+ def test_run(self, t):
"""Only test hash runs at all."""
- for t in [int, float, complex, np.int32, str, object,
- np.unicode]:
- dt = np.dtype(t)
- hash(dt)
+ dt = np.dtype(t)
+ hash(dt)
- def test_dtype(self):
+ @pytest.mark.parametrize('t', [int, float])
+ def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
- for t in [int, float]:
- dt = np.dtype(t)
- dt2 = dt.newbyteorder("<")
- dt3 = dt.newbyteorder(">")
- if dt == dt2:
- assert_(dt.byteorder != dt2.byteorder, "bogus test")
- assert_dtype_equal(dt, dt2)
- else:
- assert_(dt.byteorder != dt3.byteorder, "bogus test")
- assert_dtype_equal(dt, dt3)
+ dt = np.dtype(t)
+ dt2 = dt.newbyteorder("<")
+ dt3 = dt.newbyteorder(">")
+ if dt == dt2:
+ assert_(dt.byteorder != dt2.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt2)
+ else:
+ assert_(dt.byteorder != dt3.byteorder, "bogus test")
+ assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
@@ -156,9 +156,9 @@ class TestRecord(object):
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
- dict(names=set(['A', 'B']), formats=['f8', 'i4']))
+ dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
- dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
+ dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
@@ -552,7 +552,7 @@ class TestString(object):
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
- def test_complex_dtype_repr(self):
+ def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
@@ -572,6 +572,7 @@ class TestString(object):
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
+ def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
@@ -596,10 +597,16 @@ class TestString(object):
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
+ def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
+ def test_repr_str_subarray(self):
+ dt = np.dtype(('<i2', (1,)))
+ assert_equal(repr(dt), "dtype(('<i2', (1,)))")
+ assert_equal(str(dt), "('<i2', (1,))")
+
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
def test_dtype_str_with_long_in_shape(self):
# Pull request #376, should not error
@@ -613,6 +620,25 @@ class TestString(object):
# Pull request #4722
np.array(["", ""]).astype(object)
+ def test_void_subclass_unsized(self):
+ dt = np.dtype(np.record)
+ assert_equal(repr(dt), "dtype('V')")
+ assert_equal(str(dt), '|V0')
+ assert_equal(dt.name, 'record')
+
+ def test_void_subclass_sized(self):
+ dt = np.dtype((np.record, 2))
+ assert_equal(repr(dt), "dtype('V2')")
+ assert_equal(str(dt), '|V2')
+ assert_equal(dt.name, 'record16')
+
+ def test_void_subclass_fields(self):
+ dt = np.dtype((np.record, [('a', '<u2')]))
+ assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
+ assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
+ assert_equal(dt.name, 'record16')
+
+
class TestDtypeAttributeDeletion(object):
def test_dtype_non_writable_attributes_deletion(self):
@@ -642,12 +668,12 @@ class TestDtypeAttributes(object):
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
- def test_name_builtin(self):
- for t in np.typeDict.values():
- name = t.__name__
- if name.endswith('_'):
- name = name[:-1]
- assert_equal(np.dtype(t).name, name)
+ @pytest.mark.parametrize('t', np.typeDict.values())
+ def test_name_builtin(self, t):
+ name = t.__name__
+ if name.endswith('_'):
+ name = name[:-1]
+ assert_equal(np.dtype(t).name, name)
def test_name_dtype_subclass(self):
# Ticket #4357
@@ -671,38 +697,46 @@ class TestPickling(object):
assert_equal(x, y)
assert_equal(x[0], y[0])
- def test_builtin(self):
- for t in [int, float, complex, np.int32, str, object,
- np.unicode, bool]:
- self.check_pickling(np.dtype(t))
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
+ np.unicode, bool])
+ def test_builtin(self, t):
+ self.check_pickling(np.dtype(t))
def test_structured(self):
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
self.check_pickling(dt)
+
+ def test_structured_aligned(self):
dt = np.dtype('i4, i1', align=True)
self.check_pickling(dt)
+
+ def test_structured_unaligned(self):
dt = np.dtype('i4, i1', align=False)
self.check_pickling(dt)
+
+ def test_structured_padded(self):
dt = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
self.check_pickling(dt)
+
+ def test_structured_titles(self):
dt = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
self.check_pickling(dt)
- def test_datetime(self):
- for base in ['m8', 'M8']:
- for unit in ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
- 'us', 'ns', 'ps', 'fs', 'as']:
- dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
- self.check_pickling(dt)
- if unit:
- dt = np.dtype('%s[7%s]' % (base, unit))
- self.check_pickling(dt)
+ @pytest.mark.parametrize('base', ['m8', 'M8'])
+ @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
+ 'ms', 'us', 'ns', 'ps', 'fs', 'as'])
+ def test_datetime(self, base, unit):
+ dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
+ self.check_pickling(dt)
+ if unit:
+ dt = np.dtype('%s[7%s]' % (base, unit))
+ self.check_pickling(dt)
def test_metadata(self):
dt = np.dtype(int, metadata={'datum': 1})
@@ -729,6 +763,7 @@ def test_dtypes_are_true():
def test_invalid_dtype_string():
# test for gh-10440
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
+ assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
class TestFromCTypes(object):
@@ -759,7 +794,82 @@ class TestFromCTypes(object):
], align=True)
self.check(PaddedStruct, expected)
- @pytest.mark.xfail(reason="_pack_ is ignored - see gh-11651")
+ def test_bit_fields(self):
+ class BitfieldStruct(ctypes.Structure):
+ _fields_ = [
+ ('a', ctypes.c_uint8, 7),
+ ('b', ctypes.c_uint8, 1)
+ ]
+ assert_raises(TypeError, np.dtype, BitfieldStruct)
+ assert_raises(TypeError, np.dtype, BitfieldStruct())
+
+ def test_pointer(self):
+ p_uint8 = ctypes.POINTER(ctypes.c_uint8)
+ assert_raises(TypeError, np.dtype, p_uint8)
+
+ def test_void_pointer(self):
+ self.check(ctypes.c_void_p, np.uintp)
+
+ def test_union(self):
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b'],
+ formats=[np.uint8, np.uint16],
+ offsets=[0, 0],
+ itemsize=2
+ ))
+ self.check(Union, expected)
+
+ def test_union_with_struct_packed(self):
+ class Struct(ctypes.Structure):
+ _pack_ = 1
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
+ def test_union_packed(self):
+ class Struct(ctypes.Structure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ class Union(ctypes.Union):
+ _pack_ = 1
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
@@ -773,8 +883,45 @@ class TestFromCTypes(object):
])
self.check(PackedStructure, expected)
- @pytest.mark.xfail(sys.byteorder != 'little',
- reason="non-native endianness does not work - see gh-10533")
+ def test_large_packed_structure(self):
+ class PackedStructure(ctypes.Structure):
+ _pack_ = 2
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint8),
+ ('d', ctypes.c_uint16),
+ ('e', ctypes.c_uint32),
+ ('f', ctypes.c_uint32),
+ ('g', ctypes.c_uint8)
+ ]
+ expected = np.dtype(dict(
+ formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
+ offsets=[0, 2, 4, 6, 8, 12, 16],
+ names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
+ itemsize=18))
+ self.check(PackedStructure, expected)
+
+ def test_big_endian_structure_packed(self):
+ class BigEndStruct(ctypes.BigEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '>u4')])
+ self.check(BigEndStruct, expected)
+
+ def test_little_endian_structure_packed(self):
+ class LittleEndStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '<u4')])
+ self.check(LittleEndStruct, expected)
+
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [
@@ -787,8 +934,6 @@ class TestFromCTypes(object):
], align=True)
self.check(PaddedStruct, expected)
- @pytest.mark.xfail(sys.byteorder != 'big',
- reason="non-native endianness does not work - see gh-10533")
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [
@@ -800,3 +945,9 @@ class TestFromCTypes(object):
('b', '>H')
], align=True)
self.check(PaddedStruct, expected)
+
+ def test_simple_endian_types(self):
+ self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
+ self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
+ self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
+ self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 8ce374a75..3be4a8a26 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -11,9 +11,7 @@ from numpy.testing import (
# Setup for optimize einsum
chars = 'abcdefghij'
sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
-global_size_dict = {}
-for size, char in zip(sizes, chars):
- global_size_dict[char] = size
+global_size_dict = dict(zip(chars, sizes))
class TestEinsum(object):
@@ -965,7 +963,6 @@ class TestEinsumPath(object):
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
-
def test_path_type_input(self):
# Test explicit path handeling
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index 4f6111921..670d485c1 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -4,7 +4,7 @@ import platform
import pytest
import numpy as np
-from numpy.testing import assert_
+from numpy.testing import assert_, assert_raises
class TestErrstate(object):
@@ -16,12 +16,8 @@ class TestErrstate(object):
with np.errstate(invalid='ignore'):
np.sqrt(a)
# While this should fail!
- try:
+ with assert_raises(FloatingPointError):
np.sqrt(a)
- except FloatingPointError:
- pass
- else:
- self.fail("Did not raise an invalid error")
def test_divide(self):
with np.errstate(all='raise', under='ignore'):
@@ -30,12 +26,8 @@ class TestErrstate(object):
with np.errstate(divide='ignore'):
a // 0
# While this should fail!
- try:
+ with assert_raises(FloatingPointError):
a // 0
- except FloatingPointError:
- pass
- else:
- self.fail("Did not raise divide by zero error")
def test_errcall(self):
def foo(*args):
diff --git a/numpy/core/tests/test_extint128.py b/numpy/core/tests/test_extint128.py
index 0e9c07fd5..7c454a603 100644
--- a/numpy/core/tests/test_extint128.py
+++ b/numpy/core/tests/test_extint128.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import sys
import itertools
import contextlib
import operator
@@ -8,7 +7,6 @@ import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
-from numpy.compat import long
from numpy.testing import assert_raises, assert_equal
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index d0ff1c15f..459bacab0 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
- ndarray, sqrt, nextafter
+ ndarray, sqrt, nextafter, stack
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
@@ -54,6 +54,20 @@ class TestLogspace(object):
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+ def test_start_stop_array(self):
+ start = array([0., 1.])
+ stop = array([6., 7.])
+ t1 = logspace(start, stop, 6)
+ t2 = stack([logspace(_start, _stop, 6)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = logspace(start, stop[0], 6)
+ t4 = stack([logspace(_start, stop[0], 6)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = logspace(start, stop, 6, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
@@ -156,7 +170,7 @@ class TestGeomspace(object):
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
- def test_array_scalar(self):
+ def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
@@ -172,6 +186,21 @@ class TestGeomspace(object):
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
+ def test_start_stop_array(self):
+ # Try to use all special cases.
+ start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
+ stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
+ t1 = geomspace(start, stop, 5)
+ t2 = stack([geomspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = geomspace(start, stop[0], 5)
+ t4 = stack([geomspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = geomspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
@@ -227,7 +256,7 @@ class TestLinspace(object):
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
- def test_array_scalar(self):
+ def test_start_stop_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
@@ -241,6 +270,20 @@ class TestLinspace(object):
assert_equal(t2, t5)
assert_equal(t3, t6)
+ def test_start_stop_array(self):
+ start = array([-120, 120], dtype="int8")
+ stop = array([100, -100], dtype="int8")
+ t1 = linspace(start, stop, 5)
+ t2 = stack([linspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = linspace(start, stop[0], 5)
+ t4 = stack([linspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = linspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
@@ -285,9 +328,7 @@ class TestLinspace(object):
@property
def __array_interface__(self):
- # Ideally should be `'shape': ()` but the current interface
- # does not allow that
- return {'shape': (1,), 'typestr': '<i4', 'data': self._data,
+ return {'shape': (), 'typestr': '<i4', 'data': self._data,
'version': 3}
def __mul__(self, other):
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index ca8093c62..2f6648183 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -7,10 +7,7 @@ import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import assert_equal, assert_, assert_raises
-from numpy.core.getlimits import (
- _discovered_machar, _float16_ma, _float32_ma, _float64_ma, _float128_ma,
- _float80_ma
- )
+from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
@@ -101,9 +98,9 @@ def assert_ma_equal(discovered, ma_like):
def test_known_types():
# Test we are correctly compiling parameters for known types
- for ftype, ma_like in ((np.float16, _float16_ma),
- (np.float32, _float32_ma),
- (np.float64, _float64_ma)):
+ for ftype, ma_like in ((np.float16, _float_ma[16]),
+ (np.float32, _float_ma[32]),
+ (np.float64, _float_ma[64])):
assert_ma_equal(_discovered_machar(ftype), ma_like)
# Suppress warning for broken discovery of double double on PPC
with np.errstate(all='ignore'):
@@ -111,10 +108,10 @@ def test_known_types():
bytes = np.dtype(np.longdouble).itemsize
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
- assert_ma_equal(ld_ma, _float80_ma)
+ assert_ma_equal(ld_ma, _float_ma[80])
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
- assert_ma_equal(ld_ma, _float128_ma)
+ assert_ma_equal(ld_ma, _float_ma[128])
def test_plausible_finfo():
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index b02f6cae2..b28c933db 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -301,15 +301,19 @@ class TestHalf(object):
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
+
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
+
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
+
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
+
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index fb5687328..99792cee7 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -194,7 +194,6 @@ class TestIndexing(object):
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
-
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index f4ce6a84a..3c8e0e722 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -10,7 +10,7 @@ from numpy.core import _umath_tests
from numpy.lib.stride_tricks import as_strided
from numpy.compat import long
from numpy.testing import (
- assert_, assert_raises, assert_equal, assert_array_equal, assert_allclose
+ assert_, assert_raises, assert_equal, assert_array_equal
)
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 4cc38a9b5..06cabe2cb 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -20,6 +20,9 @@ import gc
import weakref
import pytest
from contextlib import contextmanager
+
+from numpy.core.numeric import pickle
+
if sys.version_info[0] >= 3:
import builtins
else:
@@ -33,7 +36,7 @@ from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
- SkipTest, temppath, suppress_warnings
+ temppath, suppress_warnings
)
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -44,14 +47,19 @@ from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
- # http://docs.python.org/dev/whatsnew/3.3.html#api-changes
+ # https://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
def _aligned_zeros(shape, dtype=float, order="C", align=None):
- """Allocate a new ndarray with aligned memory."""
+ """
+ Allocate a new ndarray with aligned memory.
+
+ The ndarray is guaranteed *not* aligned to twice the requested alignment.
+ Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
+ dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
@@ -64,10 +72,15 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
- buf = np.empty(size + align + 1, np.uint8)
- offset = buf.__array_interface__['data'][0] % align
+ buf = np.empty(size + 2*align + 1, np.uint8)
+
+ ptr = buf.__array_interface__['data'][0]
+ offset = ptr % align
if offset != 0:
offset = align - offset
+ if (ptr % (2*align)) == 0:
+ offset += align
+
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
@@ -89,6 +102,46 @@ class TestFlags(object):
self.a[0] = 5
self.a[0] = 0
+ def test_writeable_from_readonly(self):
+ # gh-9440 - make sure fromstring, from buffer on readonly buffers
+ # set writeable False
+ data = b'\x00' * 100
+ vals = np.frombuffer(data, 'B')
+ assert_raises(ValueError, vals.setflags, write=True)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_raises(ValueError, vals.setflags, write=True)
+
+ def test_writeable_from_buffer(self):
+ data = bytearray(b'\x00' * 100)
+ vals = np.frombuffer(data, 'B')
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
+ def test_writeable_pickle(self):
+ import pickle
+ # Small arrays will be copied without setting base.
+ # See condition for using PyArray_SetBaseObject in
+ # array_setstate.
+ a = np.arange(1000)
+ for v in range(pickle.HIGHEST_PROTOCOL):
+ vals = pickle.loads(pickle.dumps(a, v))
+ assert_(vals.flags.writeable)
+ assert_(isinstance(vals.base, bytes))
+
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
@@ -108,7 +161,6 @@ class TestFlags(object):
assert_equal(self.a.flags['X'], False)
assert_equal(self.a.flags['WRITEBACKIFCOPY'], False)
-
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
@@ -688,6 +740,9 @@ class TestScalarIndexing(object):
class TestCreation(object):
+ """
+ Test the np.array constructor
+ """
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
@@ -903,6 +958,34 @@ class TestCreation(object):
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
+ def test_jagged_ndim_object(self):
+ # Lists of mismatching depths are treated as object arrays
+ a = np.array([[1], 2, 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, [2], 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, 2, [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ def test_jagged_shape_object(self):
+ # The jagged dimension of a list is turned into an object array
+ a = np.array([[1, 1], [2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2, 2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2], [3, 3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
class TestStructured(object):
def test_subarray_field_access(self):
@@ -1341,13 +1424,28 @@ class TestZeroSizeFlexible(object):
assert_equal(zs.view((dt, 1)).shape, (0,))
def test_pickle(self):
- import pickle
- for dt in [bytes, np.void, unicode]:
- zs = self._zeros(10, dt)
- p = pickle.dumps(zs)
- zs2 = pickle.loads(p)
-
- assert_equal(zs.dtype, zs2.dtype)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ for dt in [bytes, np.void, unicode]:
+ zs = self._zeros(10, dt)
+ p = pickle.dumps(zs, protocol=proto)
+ zs2 = pickle.loads(p)
+
+ assert_equal(zs.dtype, zs2.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_pickle_with_buffercallback(self):
+ array = np.arange(10)
+ buffers = []
+ bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
+ protocol=5)
+ array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
+ # when using pickle protocol 5 with buffer callbacks,
+ # array_from_buffer is reconstructed from a buffer holding a view
+ # to the initial array's data, so modifying an element in array
+ # should modify it in array_from_buffer too.
+ array[0] = -1
+ assert array_from_buffer[0] == -1, array_from_buffer[0]
class TestMethods(object):
@@ -1386,6 +1484,10 @@ class TestMethods(object):
A = ind.choose((x, y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
+ oned = np.ones(1)
+ # gh-12031, caused SEGFAULT
+ assert_raises(TypeError, oned.choose,np.void(0), [oned])
+
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
@@ -2543,7 +2645,8 @@ class TestMethods(object):
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
- def test_dot(self):
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
@@ -2567,49 +2670,49 @@ class TestMethods(object):
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
- assert_equal(np.dot(eaf, eaf), eaf)
- assert_equal(np.dot(eaf.T, eaf), eaf)
- assert_equal(np.dot(eaf, eaf.T), eaf)
- assert_equal(np.dot(eaf.T, eaf.T), eaf)
- assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
- assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
- assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
+ assert_equal(func(eaf, eaf), eaf)
+ assert_equal(func(eaf.T, eaf), eaf)
+ assert_equal(func(eaf, eaf.T), eaf)
+ assert_equal(func(eaf.T, eaf.T), eaf)
+ assert_equal(func(eaf.T.copy(), eaf), eaf)
+ assert_equal(func(eaf, eaf.T.copy()), eaf)
+ assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
- assert_equal(np.dot(ebf, ebf), eaf)
- assert_equal(np.dot(ebf.T, ebf), eaf)
- assert_equal(np.dot(ebf, ebf.T), eaf)
- assert_equal(np.dot(ebf.T, ebf.T), eaf)
+ assert_equal(func(ebf, ebf), eaf)
+ assert_equal(func(ebf.T, ebf), eaf)
+ assert_equal(func(ebf, ebf.T), eaf)
+ assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
- np.dot(edf[::-1, :], edf.T),
- np.dot(edf[::-1, :].copy(), edf.T.copy())
+ func(edf[::-1, :], edf.T),
+ func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
- np.dot(edf[:, ::-1], edf.T),
- np.dot(edf[:, ::-1].copy(), edf.T.copy())
+ func(edf[:, ::-1], edf.T),
+ func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
- np.dot(edf, edf[::-1, :].T),
- np.dot(edf, edf[::-1, :].T.copy())
+ func(edf, edf[::-1, :].T),
+ func(edf, edf[::-1, :].T.copy())
)
assert_equal(
- np.dot(edf, edf[:, ::-1].T),
- np.dot(edf, edf[:, ::-1].T.copy())
+ func(edf, edf[:, ::-1].T),
+ func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
- np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
- np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
+ func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
+ func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
- np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
- np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
+ func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
+ func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
@@ -2617,9 +2720,43 @@ class TestMethods(object):
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
- assert_equal(np.dot(edf, edf.T), eddtf)
- assert_equal(np.dot(edf.T, edf), edtdf)
+ assert_equal(func(edf, edf.T), eddtf)
+ assert_equal(func(edf.T, edf), edtdf)
+
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ @pytest.mark.parametrize('dtype', 'ifdFD')
+ def test_no_dgemv(self, func, dtype):
+ # check vector arg for contiguous before gemv
+ # gh-12156
+ a = np.arange(8.0, dtype=dtype).reshape(2, 4)
+ b = np.broadcast_to(1., (4, 1))
+ ret1 = func(a, b)
+ ret2 = func(a, b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T)
+ assert_equal(ret1, ret2)
+
+ # check for unaligned data
+ dt = np.dtype(dtype)
+ a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
+ a = a.reshape(2, 4)
+ b = a[0]
+ # make sure it is not aligned
+ assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
+ ret1 = func(a, b)
+ ret2 = func(a.copy(), b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T.copy())
+ assert_equal(ret1, ret2)
+ def test_dot(self):
+ a = np.array([[1, 0], [0, 1]])
+ b = np.array([[0, 1], [1, 0]])
+ c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
@@ -2675,6 +2812,29 @@ class TestMethods(object):
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
+ def test_dot_matmul_inner_array_casting_fails(self):
+
+ class A(object):
+ def __array__(self, *args, **kwargs):
+ raise NotImplementedError
+
+ # Don't override the error from calling __array__()
+ assert_raises(NotImplementedError, np.dot, A(), A())
+ assert_raises(NotImplementedError, np.matmul, A(), A())
+ assert_raises(NotImplementedError, np.inner, A(), A())
+
+ def test_matmul_out(self):
+ # overlapping memory
+ a = np.arange(18).reshape(2, 3, 3)
+ b = np.matmul(a, a)
+ c = np.matmul(a, a, out=a)
+ assert_(c is a)
+ assert_equal(c, b)
+ a = np.arange(18).reshape(2, 3, 3)
+ c = np.matmul(a, a, out=a[::-1, ...])
+ assert_(c.base is a.base)
+ assert_equal(c, b)
+
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
@@ -2698,7 +2858,6 @@ class TestMethods(object):
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
-
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
@@ -3085,6 +3244,8 @@ class TestBinop(object):
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
+ if sys.version_info >= (3, 5):
+ ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
@@ -3127,7 +3288,7 @@ class TestBinop(object):
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
- return np.arange(3, 5).view(MyType)
+ return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
@@ -3136,7 +3297,7 @@ class TestBinop(object):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
- check_objs = [np.arange(3, 5, dtype=dtype)]
+ check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
@@ -3383,6 +3544,16 @@ class TestBinop(object):
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+ def test_pos_array_ufunc_override(self):
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return getattr(ufunc, method)(*[i.view(np.ndarray) for
+ i in inputs], **kwargs)
+ tst = np.array('foo').view(A)
+ with assert_raises(TypeError):
+ +tst
+
+
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
@@ -3505,21 +3676,105 @@ class TestSubscripting(object):
class TestPickling(object):
+ def test_highest_available_pickle_protocol(self):
+ try:
+ import pickle5
+ except ImportError:
+ pickle5 = None
+
+ if sys.version_info[:2] >= (3, 8) or pickle5 is not None:
+ assert pickle.HIGHEST_PROTOCOL >= 5
+ else:
+ assert pickle.HIGHEST_PROTOCOL < 5
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
+ reason=('this tests the error messages when trying to'
+ 'protocol 5 although it is not available'))
+ def test_correct_protocol5_error_message(self):
+ array = np.arange(10)
+
+ if sys.version_info[:2] in ((3, 6), (3, 7)):
+ # For the specific case of python3.6 and 3.7, raise a clear import
+ # error about the pickle5 backport when trying to use protocol=5
+ # without the pickle5 package
+ with pytest.raises(ImportError):
+ array.__reduce_ex__(5)
+
+ elif sys.version_info[:2] < (3, 6):
+ # when calling __reduce_ex__ explicitly with protocol=5 on python
+ # raise a ValueError saying that protocol 5 is not available for
+ # this python version
+ with pytest.raises(ValueError):
+ array.__reduce_ex__(5)
+
+ def test_record_array_with_object_dtype(self):
+ my_object = object()
+
+ arr_with_object = np.array(
+ [(my_object, 1, 2.0)],
+ dtype=[('a', object), ('b', int), ('c', float)])
+ arr_without_object = np.array(
+ [('xxx', 1, 2.0)],
+ dtype=[('a', str), ('b', int), ('c', float)])
+
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_arr_with_object = pickle.loads(
+ pickle.dumps(arr_with_object, protocol=proto))
+ depickled_arr_without_object = pickle.loads(
+ pickle.dumps(arr_without_object, protocol=proto))
+
+ assert_equal(arr_with_object.dtype,
+ depickled_arr_with_object.dtype)
+ assert_equal(arr_without_object.dtype,
+ depickled_arr_without_object.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_f_contiguous_array(self):
+ f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+ buffers = []
+
+ # When using pickle protocol 5, Fortran-contiguous arrays can be
+ # serialized using out-of-band buffers
+ bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
+ buffer_callback=buffers.append)
+
+ assert len(buffers) > 0
+
+ depickled_f_contiguous_array = pickle.loads(bytes_string,
+ buffers=buffers)
+
+ assert_equal(f_contiguous_array, depickled_f_contiguous_array)
+
+ def test_non_contiguous_array(self):
+ non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
+ assert not non_contiguous_array.flags.c_contiguous
+ assert not non_contiguous_array.flags.f_contiguous
+
+ # make sure non-contiguous arrays can be pickled-depickled
+ # using any protocol
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_non_contiguous_array = pickle.loads(
+ pickle.dumps(non_contiguous_array, protocol=proto))
+
+ assert_equal(non_contiguous_array, depickled_non_contiguous_array)
+
def test_roundtrip(self):
- import pickle
- carray = np.array([[2, 9], [7, 0], [3, 8]])
- DATA = [
- carray,
- np.transpose(carray),
- np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
- ('c', float)])
- ]
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ carray = np.array([[2, 9], [7, 0], [3, 8]])
+ DATA = [
+ carray,
+ np.transpose(carray),
+ np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
+ ('c', float)])
+ ]
- for a in DATA:
- assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
+ for a in DATA:
+ assert_equal(
+ a, pickle.loads(pickle.dumps(a, protocol=proto)),
+ err_msg="%r" % a)
def _loads(self, obj):
- import pickle
if sys.version_info[0] >= 3:
return pickle.loads(obj, encoding='latin1')
else:
@@ -4094,15 +4349,12 @@ class TestPutmask(object):
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
- def tst_byteorder(self, dtype):
+ @pytest.mark.parametrize('dtype', ('>i4', '<i4'))
+ def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
- def test_ip_byteorder(self):
- for dtype in ('>i4', '<i4'):
- self.tst_byteorder(dtype)
-
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
@@ -4152,14 +4404,11 @@ class TestTake(object):
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
- def tst_byteorder(self, dtype):
+ @pytest.mark.parametrize('dtype', ('>i4', '<i4'))
+ def test_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
- def test_ip_byteorder(self):
- for dtype in ('>i4', '<i4'):
- self.tst_byteorder(dtype)
-
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
@@ -4345,7 +4594,6 @@ class TestIO(object):
d.tofile(f)
assert_equal(os.path.getsize(self.filename), d.nbytes * 2)
-
def test_io_open_buffered_fromfile(self):
# gh-6632
self.x.tofile(self.filename)
@@ -4402,6 +4650,19 @@ class TestIO(object):
f.close()
assert_equal(pos, 10, err_msg=err_msg)
+ def test_load_object_array_fromfile(self):
+ # gh-12300
+ with open(self.filename, 'w') as f:
+ # Ensure we have a file with consistent contents
+ pass
+
+ with open(self.filename, 'rb') as f:
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, f, dtype=object)
+
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, self.filename, dtype=object)
+
def _check_from(self, s, value, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
@@ -4536,19 +4797,16 @@ class TestIO(object):
class TestFromBuffer(object):
- def tst_basic(self, buffer, expected, kwargs):
- assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
-
- def test_ip_basic(self):
- for byteorder in ['<', '>']:
- for dtype in [float, int, complex]:
- dt = np.dtype(dtype).newbyteorder(byteorder)
- x = (np.random.random((4, 7))*5).astype(dt)
- buf = x.tobytes()
- self.tst_basic(buf, x.flat, {'dtype':dt})
+ @pytest.mark.parametrize('byteorder', ['<', '>'])
+ @pytest.mark.parametrize('dtype', [float, int, complex])
+ def test_basic(self, byteorder, dtype):
+ dt = np.dtype(dtype).newbyteorder(byteorder)
+ x = (np.random.random((4, 7)) * 5).astype(dt)
+ buf = x.tobytes()
+ assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
def test_empty(self):
- self.tst_basic(b'', np.array([]), {})
+ assert_array_equal(np.frombuffer(b''), np.array([]))
class TestFlat(object):
@@ -4693,6 +4951,12 @@ class TestResize(object):
x_view.resize((0, 10))
x_view.resize((0, 100))
+ def test_check_weakref(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ xref = weakref.ref(x)
+ assert_raises(ValueError, x.resize, (5, 1))
+ del xref # avoid pyflakes unused variable warning.
+
class TestRecord(object):
def test_field_rename(self):
@@ -4701,11 +4965,11 @@ class TestRecord(object):
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
- def test_assign():
- dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
+ def test_dtype_init():
+ np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
- assert_raises(ValueError, test_assign)
+ assert_raises(ValueError, test_dtype_init)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_bytes_fields(self):
@@ -4725,13 +4989,11 @@ class TestRecord(object):
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_multiple_field_name_unicode(self):
- def test_assign_unicode():
- dt = np.dtype([("\u20B9", "f8"),
- ("B", "f8"),
- ("\u20B9", "f8")])
+ def test_dtype_unicode():
+ np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
- assert_raises(ValueError, test_assign_unicode)
+ assert_raises(ValueError, test_dtype_unicode)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_titles(self):
@@ -4813,79 +5075,18 @@ class TestRecord(object):
fn2 = func('f2')
b[fn2] = 3
- # In 1.16 code below can be replaced by:
- # assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- # assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- # assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- with suppress_warnings() as sup:
- sup.filter(FutureWarning,
- ".* selecting multiple fields .*")
-
- assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- # view of subfield view/copy
- assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
- (2, 3))
- assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
- (3, 2))
- view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
- assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
- (2, (1,)))
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
- raise SkipTest('non ascii unicode field indexing skipped; '
- 'raises segfault on python 2.x')
+ pytest.skip('non ascii unicode field indexing skipped; '
+ 'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
- # can be removed in 1.16
- def test_field_names_deprecation(self):
-
- def collect_warnings(f, *args, **kwargs):
- with warnings.catch_warnings(record=True) as log:
- warnings.simplefilter("always")
- f(*args, **kwargs)
- return [w.category for w in log]
-
- a = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- a['f1'][0] = 1
- a['f2'][0] = 2
- a['f3'][0] = (3,)
- b = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- b['f1'][0] = 1
- b['f2'][0] = 2
- b['f3'][0] = (3,)
-
- # All the different functions raise a warning, but not an error
- assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
- [FutureWarning])
- # For <=1.12 a is not modified, but it will be in 1.13
- assert_equal(a, b)
-
- # Views also warn
- subset = a[['f1', 'f2']]
- subset_view = subset.view()
- assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
- [FutureWarning])
- # But the write goes through:
- assert_equal(subset['f1'][0], 10)
- # Only one warning per multiple field indexing, though (even if there
- # are multiple views involved):
- assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
-
- # make sure views of a multi-field index warn too
- c = np.zeros(3, dtype='i8,i8,i8')
- assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
- [FutureWarning])
-
-
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
@@ -4909,6 +5110,16 @@ class TestRecord(object):
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
+ def test_multifield_indexing_view(self):
+ a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
+ v = a[['a', 'c']]
+ assert_(v.base is a)
+ assert_(v.dtype == np.dtype({'names': ['a', 'c'],
+ 'formats': ['i4', 'u4'],
+ 'offsets': [0, 8]}))
+ v[:] = (4,5)
+ assert_equal(a[0].item(), (4, 1, 5))
+
class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
@@ -5532,15 +5743,38 @@ class MatmulCommon(object):
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
- def test_vector_vector_values(self):
- vec = np.array([1, 2])
- tgt = 5
+ def test_scalar_output(self):
+ vec1 = np.array([2])
+ vec2 = np.array([3, 4]).reshape(1, -1)
+ tgt = np.array([6, 8])
for dt in self.types[1:]:
- v1 = vec.astype(dt)
- res = self.matmul(v1, v1)
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt)
+ res = self.matmul(v2.T, v1)
assert_equal(res, tgt)
# boolean type
+ vec = np.array([True, True], dtype='?').reshape(1, -1)
+ res = self.matmul(vec[:, 0], vec)
+ assert_equal(res, True)
+
+ def test_vector_vector_values(self):
+ vec1 = np.array([1, 2])
+ vec2 = np.array([3, 4]).reshape(-1, 1)
+ tgt1 = np.array([11])
+ tgt2 = np.array([[3, 6], [4, 8]])
+ for dt in self.types[1:]:
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt1)
+ # no broadcast, we must make v1 into a 2d ndarray
+ res = self.matmul(v2, v1.reshape(1, -1))
+ assert_equal(res, tgt2)
+
+ # boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
@@ -5669,44 +5903,96 @@ class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
- a = np.ones((2, 2), dtype=float)
- b = np.ones((2, 2), dtype=float)
- tgt = np.full((2,2), 2, dtype=float)
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
- out = np.zeros((2, 2), dtype=float)
+ out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
- out = np.zeros((2, 2), dtype=float)
+ out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
- # einsum and cblas raise different error types, so
- # use Exception.
- msg = "out argument with illegal cast"
- out = np.zeros((2, 2), dtype=np.int32)
- assert_raises(Exception, self.matmul, a, b, out=out)
-
- # skip following tests for now, cblas does not allow non-contiguous
- # outputs and consistency with dot would require same type,
- # dimensions, subtype, and c_contiguous.
-
- # test out with allowed type cast
- # msg = "out argument with allowed cast"
- # out = np.zeros((2, 2), dtype=np.complex128)
- # self.matmul(a, b, out=out)
- # assert_array_equal(out, tgt, err_msg=msg)
+ msg = "Cannot cast ufunc matmul output"
+ out = np.zeros((5, 2), dtype=np.int32)
+ assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
+
+ # test out with type upcast to complex
+ out = np.zeros((5, 2), dtype=np.complex128)
+ c = self.matmul(a, b, out=out)
+ assert_(c is out)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, '')
+ c = c.astype(tgt.dtype)
+ assert_array_equal(c, tgt)
+
+ def test_out_contiguous(self):
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ v = np.array([1, 3], dtype=float)
+ tgt = np.dot(a, b)
+ tgt_mv = np.dot(a, v)
# test out non-contiguous
- # msg = "out argument with non-contiguous layout"
- # c = np.zeros((2, 2, 2), dtype=float)
- # self.matmul(a, b, out=c[..., 0])
- # assert_array_equal(c, tgt, err_msg=msg)
+ out = np.ones((5, 2, 2), dtype=float)
+ c = self.matmul(a, b, out=out[..., 0])
+ assert c.base is out
+ assert_array_equal(c, tgt)
+ c = self.matmul(a, v, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+ c = self.matmul(v, a.T, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+
+ # test out contiguous in only last dim
+ out = np.ones((10, 2), dtype=float)
+ c = self.matmul(a, b, out=out[::2, :])
+ assert_array_equal(c, tgt)
+
+ # test transposes of out, args
+ out = np.ones((5, 2), dtype=float)
+ c = self.matmul(b.T, a.T, out=out.T)
+ assert_array_equal(out, tgt)
+
+ m1 = np.arange(15.).reshape(5, 3)
+ m2 = np.arange(21.).reshape(3, 7)
+ m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
+ vc = np.arange(10.)
+ vr = np.arange(6.)
+ m0 = np.zeros((3, 0))
+ @pytest.mark.parametrize('args', (
+ # matrix-matrix
+ (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
+ # matrix-matrix-transpose, contiguous and non
+ (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
+ (m3, m3.T), (m3.T, m3),
+ # matrix-matrix non-contiguous
+ (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
+ # vector-matrix, matrix-vector, contiguous
+ (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
+ # vector-matrix, matrix-vector, vector non-contiguous
+ (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
+ # vector-matrix, matrix-vector, matrix non-contiguous
+ (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
+ # vector-matrix, matrix-vector, both non-contiguous
+ (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
+ # size == 0
+ (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
+ ))
+ def test_dot_equivalent(self, args):
+ r1 = np.matmul(*args)
+ r2 = np.dot(*args)
+ assert_equal(r1, r2)
+
+ r3 = np.matmul(args[0].copy(), args[1].copy())
+ assert_equal(r1, r3)
+
if sys.version_info[:2] >= (3, 5):
@@ -5730,6 +6016,11 @@ if sys.version_info[:2] >= (3, 5):
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
+ def test_matmul_raises(self):
+ assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
+ assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
+ assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
+
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
@@ -5744,6 +6035,17 @@ if sys.version_info[:2] >= (3, 5):
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
+ def test_matmul_axes():
+ a = np.arange(3*4*5).reshape(3, 4, 5)
+ c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
+ assert c.shape == (3, 4, 4)
+ d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
+ assert d.shape == (4, 4, 3)
+ e = np.swapaxes(d, 0, 2)
+ assert_array_equal(e, c)
+ f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
+ assert f.shape == (4, 5)
+
class TestInner(object):
@@ -5903,9 +6205,10 @@ class TestRepeat(object):
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
+@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
class TestNeighborhoodIter(object):
# Simple, 2d tests
- def _test_simple2d(self, dt):
+ def test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
@@ -5932,13 +6235,7 @@ class TestNeighborhoodIter(object):
x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
assert_array_equal(l, r)
- def test_simple2d(self):
- self._test_simple2d(float)
-
- def test_simple2d_object(self):
- self._test_simple2d(Decimal)
-
- def _test_mirror2d(self, dt):
+ def test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
@@ -5948,14 +6245,8 @@ class TestNeighborhoodIter(object):
x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
- def test_mirror2d(self):
- self._test_mirror2d(float)
-
- def test_mirror2d_object(self):
- self._test_mirror2d(Decimal)
-
# Simple, 1d tests
- def _test_simple(self, dt):
+ def test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
@@ -5973,14 +6264,8 @@ class TestNeighborhoodIter(object):
x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
- def test_simple_float(self):
- self._test_simple(float)
-
- def test_simple_object(self):
- self._test_simple(Decimal)
-
# Test mirror modes
- def _test_mirror(self, dt):
+ def test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
@@ -5989,14 +6274,8 @@ class TestNeighborhoodIter(object):
assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
- def test_mirror(self):
- self._test_mirror(float)
-
- def test_mirror_object(self):
- self._test_mirror(Decimal)
-
# Circular mode
- def _test_circular(self, dt):
+ def test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
@@ -6004,11 +6283,6 @@ class TestNeighborhoodIter(object):
x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
- def test_circular(self):
- self._test_circular(float)
-
- def test_circular_object(self):
- self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(object):
@@ -6454,7 +6728,6 @@ class TestNewBufferProtocol(object):
assert_raises((ValueError, BufferError), memoryview, a)
assert_raises((ValueError, BufferError), memoryview, np.array((3), 'M8[D]'))
-
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
@@ -6663,11 +6936,26 @@ class TestNewBufferProtocol(object):
ValueError, "format string",
np.array, m)
+ def test_error_message_unsupported(self):
+ # wchar has no corresponding numpy type - if this changes in future, we
+ # need a better way to construct an invalid memoryview format.
+ t = ctypes.c_wchar * 4
+ with assert_raises(ValueError) as cm:
+ np.array(t())
+
+ exc = cm.exception
+ if sys.version_info.major > 2:
+ with assert_raises_regex(
+ NotImplementedError,
+ r"Unrepresentable .* 'u' \(UCS-2 strings\)"
+ ):
+ raise exc.__cause__
+
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
value = c_integer(42)
- with warnings.catch_warnings(record=True) as w:
+ with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
np.asarray(value)
@@ -6677,7 +6965,7 @@ class TestNewBufferProtocol(object):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
f = foo(a=1, b=2)
- with warnings.catch_warnings(record=True) as w:
+ with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
arr = np.asarray(f)
@@ -7328,7 +7616,6 @@ class TestFormat(object):
dst = object.__format__(a, '30')
assert_equal(res, dst)
-
class TestCTypes(object):
def test_ctypes_is_available(self):
@@ -7349,6 +7636,55 @@ class TestCTypes(object):
finally:
_internal.ctypes = ctypes
+ def _make_readonly(x):
+ x.flags.writeable = False
+ return x
+
+ @pytest.mark.parametrize('arr', [
+ np.array([1, 2, 3]),
+ np.array([['one', 'two'], ['three', 'four']]),
+ np.array((1, 2), dtype='i4,i4'),
+ np.zeros((2,), dtype=
+ np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['a', 'b'],
+ offsets=[0, 2],
+ itemsize=6
+ ))
+ ),
+ np.array([None], dtype=object),
+ np.array([]),
+ np.empty((0, 0)),
+ _make_readonly(np.array([1, 2, 3])),
+ ], ids=[
+ '1d',
+ '2d',
+ 'structured',
+ 'overlapping',
+ 'object',
+ 'empty',
+ 'empty-2d',
+ 'readonly'
+ ])
+ def test_ctypes_data_as_holds_reference(self, arr):
+ # gh-9647
+ # create a copy to ensure that pytest does not mess with the refcounts
+ arr = arr.copy()
+
+ arr_ref = weakref.ref(arr)
+
+ ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)
+
+ # `ctypes_ptr` should hold onto `arr`
+ del arr
+ gc.collect()
+ assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
+
+ # but when the `ctypes_ptr` object dies, so should `arr`
+ del ctypes_ptr
+ gc.collect()
+ assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
+
class TestWritebackIfCopy(object):
# all these tests use the WRITEBACKIFCOPY mechanism
@@ -7368,6 +7704,7 @@ class TestWritebackIfCopy(object):
mat = np.eye(5)
out = np.eye(5, dtype='i2')
res = np.clip(mat, a_min=-10, a_max=0, out=out)
+ assert_(res is out)
assert_equal(np.sum(out), 0)
def test_insert_noncontiguous(self):
@@ -7608,3 +7945,126 @@ def test_npymath_real():
got = fun(z)
expected = npfun(z)
assert_allclose(got, expected)
+
+def test_uintalignment_and_alignment():
+ # alignment code needs to satisfy these requrements:
+ # 1. numpy structs match C struct layout
+ # 2. ufuncs/casting is safe wrt to aligned access
+ # 3. copy code is safe wrt to "uint alidned" access
+ #
+ # Complex types are the main problem, whose alignment may not be the same
+ # as their "uint alignment".
+ #
+ # This test might only fail on certain platforms, where uint64 alignment is
+ # not equal to complex64 alignment. The second 2 tests will only fail
+ # for DEBUG=1.
+
+ d1 = np.dtype('u1,c8', align=True)
+ d2 = np.dtype('u4,c8', align=True)
+ d3 = np.dtype({'names': ['a', 'b'], 'formats': ['u1', d1]}, align=True)
+
+ assert_equal(np.zeros(1, dtype=d1)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype=d2)['f1'].flags['ALIGNED'], True)
+ assert_equal(np.zeros(1, dtype='u1,c8')['f1'].flags['ALIGNED'], False)
+
+ # check that C struct matches numpy struct size
+ s = _multiarray_tests.get_struct_alignments()
+ for d, (alignment, size) in zip([d1,d2,d3], s):
+ assert_equal(d.alignment, alignment)
+ assert_equal(d.itemsize, size)
+
+ # check that ufuncs don't complain in debug mode
+ # (this is probably OK if the aligned flag is true above)
+ src = np.zeros((2,2), dtype=d1)['f1'] # 4-byte aligned, often
+ np.exp(src) # assert fails?
+
+ # check that copy code doesn't complain in debug mode
+ dst = np.zeros((2,2), dtype='c8')
+ dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
+
+class TestAlignment(object):
+ # adapted from scipy._lib.tests.test__util.test__aligned_zeros
+ # Checks that unusual memory alignments don't trip up numpy.
+ # In particular, check RELAXED_STRIDES don't trip alignment assertions in
+ # NDEBUG mode for size-0 arrays (gh-12503)
+
+ def check(self, shape, dtype, order, align):
+ err_msg = repr((shape, dtype, order, align))
+ x = _aligned_zeros(shape, dtype, order, align=align)
+ if align is None:
+ align = np.dtype(dtype).alignment
+ assert_equal(x.__array_interface__['data'][0] % align, 0)
+ if hasattr(shape, '__len__'):
+ assert_equal(x.shape, shape, err_msg)
+ else:
+ assert_equal(x.shape, (shape,), err_msg)
+ assert_equal(x.dtype, dtype)
+ if order == "C":
+ assert_(x.flags.c_contiguous, err_msg)
+ elif order == "F":
+ if x.size > 0:
+ assert_(x.flags.f_contiguous, err_msg)
+ elif order is None:
+ assert_(x.flags.c_contiguous, err_msg)
+ else:
+ raise ValueError()
+
+ def test_various_alignments(self):
+ for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
+ for n in [0, 1, 3, 11]:
+ for order in ["C", "F", None]:
+ for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
+ if dtype == 'O':
+ # object dtype can't be misaligned
+ continue
+ for shape in [n, (1, 2, 3, n)]:
+ self.check(shape, np.dtype(dtype), order, align)
+
+ def test_strided_loop_alignments(self):
+ # particularly test that complex64 and float128 use right alignment
+ # code-paths, since these are particularly problematic. It is useful to
+ # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
+ for align in [1, 2, 4, 8, 12, 16, None]:
+ xf64 = _aligned_zeros(3, np.float64)
+
+ xc64 = _aligned_zeros(3, np.complex64, align=align)
+ xf128 = _aligned_zeros(3, np.longdouble, align=align)
+
+ # test casting, both to and from misaligned
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, "Casting complex values")
+ xc64.astype('f8')
+ xf64.astype(np.complex64)
+ test = xc64 + xf64
+
+ xf128.astype('f8')
+ xf64.astype(np.longdouble)
+ test = xf128 + xf64
+
+ test = xf128 + xc64
+
+ # test copy, both to and from misaligned
+ # contig copy
+ xf64[:] = xf64.copy()
+ xc64[:] = xc64.copy()
+ xf128[:] = xf128.copy()
+ # strided copy
+ xf64[::2] = xf64[::2].copy()
+ xc64[::2] = xc64[::2].copy()
+ xf128[::2] = xf128[::2].copy()
+
+def test_getfield():
+ a = np.arange(32, dtype='uint16')
+ if sys.byteorder == 'little':
+ i = 0
+ j = 1
+ else:
+ i = 1
+ j = 0
+ b = a.getfield('int8', i)
+ assert_equal(b, a)
+ b = a.getfield('int8', j)
+ assert_equal(b, 0)
+ pytest.raises(ValueError, a.getfield, 'uint8', -1)
+ pytest.raises(ValueError, a.getfield, 'uint8', 16)
+ pytest.raises(ValueError, a.getfield, 'uint64', 0)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 13bc6b34a..26fd9c346 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1,14 +1,13 @@
from __future__ import division, absolute_import, print_function
import sys
-import warnings
import pytest
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy import array, arange, nditer, all
from numpy.testing import (
- assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
+ assert_, assert_equal, assert_array_equal, assert_raises,
HAS_REFCOUNT, suppress_warnings
)
@@ -2196,21 +2195,15 @@ class TestIterNested(object):
a = arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_reorder(self):
@@ -2219,40 +2212,28 @@ class TestIterNested(object):
# In 'K' order (default), it gets reordered
i, j = np.nested_iters(a, [[0], [2, 1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, it doesn't
i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
def test_flip_axes(self):
@@ -2261,40 +2242,28 @@ class TestIterNested(object):
# In 'K' order (default), the axes all get flipped
i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, flipping axes is disabled
i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
def test_broadcast(self):
@@ -2303,15 +2272,11 @@ class TestIterNested(object):
b = arange(3).reshape(1, 3)
i, j = np.nested_iters([a, b], [[0], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
i, j = np.nested_iters([a, b], [[1], [0]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
def test_dtype_copy(self):
@@ -2323,9 +2288,7 @@ class TestIterNested(object):
op_flags=['readonly', 'copy'],
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
@@ -2358,7 +2321,6 @@ class TestIterNested(object):
j.close()
assert_equal(a, [[1, 2, 3], [4, 5, 6]])
-
def test_dtype_buffered(self):
# Test nested iteration with buffering to change dtype
@@ -2377,15 +2339,11 @@ class TestIterNested(object):
def test_0d(self):
a = np.arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[], [1, 0, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0, 2], []])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
@@ -2557,10 +2515,8 @@ def test_iter_buffering_reduction_reuse_reduce_loops():
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
- bufsizes = []
with it:
- for x, y in it:
- bufsizes.append(x.shape[0])
+ bufsizes = [x.shape[0] for x, y in it]
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 53486dc51..37534720a 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -13,7 +13,7 @@ from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- suppress_warnings, HAS_REFCOUNT
+ HAS_REFCOUNT
)
@@ -471,12 +471,9 @@ class TestSeterr(object):
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
- try:
+ with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
- except FloatingPointError:
- pass
- else:
- self.fail()
+
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
@@ -1275,7 +1272,6 @@ class TestArrayComparisons(object):
assert_equal(a == None, [False, False, False])
assert_equal(a != None, [True, True, True])
-
def test_array_equiv(self):
res = np.array_equiv(np.array([1, 2]), np.array([1, 2]))
assert_(res)
@@ -1530,7 +1526,7 @@ class TestClip(object):
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
- self.clip(a, m, M, ac)
+ self.clip(ac, m, M, ac)
assert_array_strict_equal(a, ac)
def test_noncontig_inplace(self):
@@ -1543,7 +1539,7 @@ class TestClip(object):
m = -0.5
M = 0.6
self.fastclip(a, m, M, a)
- self.clip(a, m, M, ac)
+ self.clip(ac, m, M, ac)
assert_array_equal(a, ac)
def test_type_cast_01(self):
@@ -1722,6 +1718,22 @@ class TestClip(object):
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
+ def test_clip_with_out_transposed(self):
+ # Test that the out argument works when tranposed
+ a = np.arange(16).reshape(4, 4)
+ out = np.empty_like(a).T
+ a.clip(4, 10, out=out)
+ expected = self.clip(a, 4, 10)
+ assert_array_equal(out, expected)
+
+ def test_clip_with_out_memory_overlap(self):
+ # Test that the out argument works when it has memory overlap
+ a = np.arange(16).reshape(4, 4)
+ ac = a.copy()
+ a[:-1].clip(4, 10, out=a[1:])
+ expected = self.clip(ac[:-1], 4, 10)
+ assert_array_equal(a[1:], expected)
+
def test_clip_inplace_array(self):
# Test native double input with array min/max
a = self._generate_data(self.nr, self.nc)
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index cdf1b0490..71f7b7150 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -3,6 +3,7 @@ from __future__ import division, absolute_import, print_function
import sys
import itertools
+import pytest
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
@@ -86,10 +87,8 @@ def normalize_descr(descr):
else:
nitem = (item[0], dtype)
out.append(nitem)
- elif isinstance(item[1], list):
- l = []
- for j in normalize_descr(item[1]):
- l.append(j)
+ elif isinstance(dtype, list):
+ l = normalize_descr(dtype)
out.append((item[0], l))
else:
raise ValueError("Expected a str or list and got %s" %
@@ -406,3 +405,95 @@ class TestIsSubDType(object):
for w1, w2 in itertools.product(self.wrappers, repeat=2):
assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
+
+
+class TestSctypeDict(object):
+ def test_longdouble(self):
+ assert_(np.sctypeDict['f8'] is not np.longdouble)
+ assert_(np.sctypeDict['c16'] is not np.clongdouble)
+
+
+class TestBitName(object):
+ def test_abstract(self):
+ assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
+
+
+class TestMaximumSctype(object):
+
+ # note that parametrizing with sctype['int'] and similar would skip types
+ # with the same size (gh-11923)
+
+ @pytest.mark.parametrize('t', [np.byte, np.short, np.intc, np.int_, np.longlong])
+ def test_int(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['int'][-1])
+
+ @pytest.mark.parametrize('t', [np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong])
+ def test_uint(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['uint'][-1])
+
+ @pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
+ def test_float(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['float'][-1])
+
+ @pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
+ def test_complex(self, t):
+ assert_equal(np.maximum_sctype(t), np.sctypes['complex'][-1])
+
+ @pytest.mark.parametrize('t', [np.bool_, np.object_, np.unicode_, np.bytes_, np.void])
+ def test_other(self, t):
+ assert_equal(np.maximum_sctype(t), t)
+
+
+class Test_sctype2char(object):
+ # This function is old enough that we're really just documenting the quirks
+ # at this point.
+
+ def test_scalar_type(self):
+ assert_equal(np.sctype2char(np.double), 'd')
+ assert_equal(np.sctype2char(np.int_), 'l')
+ assert_equal(np.sctype2char(np.unicode_), 'U')
+ assert_equal(np.sctype2char(np.bytes_), 'S')
+
+ def test_other_type(self):
+ assert_equal(np.sctype2char(float), 'd')
+ assert_equal(np.sctype2char(list), 'O')
+ assert_equal(np.sctype2char(np.ndarray), 'O')
+
+ def test_third_party_scalar_type(self):
+ from numpy.core._rational_tests import rational
+ assert_raises(KeyError, np.sctype2char, rational)
+ assert_raises(KeyError, np.sctype2char, rational(1))
+
+ def test_array_instance(self):
+ assert_equal(np.sctype2char(np.array([1.0, 2.0])), 'd')
+
+ def test_abstract_type(self):
+ assert_raises(KeyError, np.sctype2char, np.floating)
+
+ def test_non_type(self):
+ assert_raises(ValueError, np.sctype2char, 1)
+
+@pytest.mark.parametrize("rep, expected", [
+ (np.int32, True),
+ (list, False),
+ (1.1, False),
+ (str, True),
+ (np.dtype(np.float64), True),
+ (np.dtype((np.int16, (3, 4))), True),
+ (np.dtype([('a', np.int8)]), True),
+ ])
+def test_issctype(rep, expected):
+ # ensure proper identification of scalar
+ # data-types by issctype()
+ actual = np.issctype(rep)
+ assert_equal(actual, expected)
+
+
+@pytest.mark.skipif(sys.flags.optimize > 1,
+ reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+class TestDocStrings(object):
+ def test_platform_dependent_aliases(self):
+ if np.int64 is np.int_:
+ assert_('int64' in np.int_.__doc__)
+ elif np.int64 is np.longlong:
+ assert_('int64' in np.longlong.__doc__)
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
new file mode 100644
index 000000000..62b2a3e53
--- /dev/null
+++ b/numpy/core/tests/test_overrides.py
@@ -0,0 +1,388 @@
+from __future__ import division, absolute_import, print_function
+
+import inspect
+import sys
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_equal, assert_raises, assert_raises_regex)
+from numpy.core.overrides import (
+ get_overloaded_types_and_args, array_function_dispatch,
+ verify_matching_signatures, ENABLE_ARRAY_FUNCTION)
+from numpy.core.numeric import pickle
+import pytest
+
+
+requires_array_function = pytest.mark.skipif(
+ not ENABLE_ARRAY_FUNCTION,
+ reason="__array_function__ dispatch not enabled.")
+
+
+def _get_overloaded_args(relevant_args):
+ types, args = get_overloaded_types_and_args(relevant_args)
+ return args
+
+
+def _return_not_implemented(self, *args, **kwargs):
+ return NotImplemented
+
+
+# need to define this at the top level to test pickling
+@array_function_dispatch(lambda array: (array,))
+def dispatched_one_arg(array):
+ """Docstring."""
+ return 'original'
+
+
+@array_function_dispatch(lambda array1, array2: (array1, array2))
+def dispatched_two_arg(array1, array2):
+ """Docstring."""
+ return 'original'
+
+
+@requires_array_function
+class TestGetOverloadedTypesAndArgs(object):
+
+ def test_ndarray(self):
+ array = np.array(1)
+
+ types, args = get_overloaded_types_and_args([array])
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ types, args = get_overloaded_types_and_args([array, array])
+ assert_equal(len(types), 1)
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ types, args = get_overloaded_types_and_args([array, 1])
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ types, args = get_overloaded_types_and_args([1, array])
+ assert_equal(set(types), {np.ndarray})
+ assert_equal(list(args), [array])
+
+ def test_ndarray_subclasses(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ array = np.array(1).view(np.ndarray)
+ override_sub = np.array(1).view(OverrideSub)
+ no_override_sub = np.array(1).view(NoOverrideSub)
+
+ types, args = get_overloaded_types_and_args([array, override_sub])
+ assert_equal(set(types), {np.ndarray, OverrideSub})
+ assert_equal(list(args), [override_sub, array])
+
+ types, args = get_overloaded_types_and_args([array, no_override_sub])
+ assert_equal(set(types), {np.ndarray, NoOverrideSub})
+ assert_equal(list(args), [no_override_sub, array])
+
+ types, args = get_overloaded_types_and_args(
+ [override_sub, no_override_sub])
+ assert_equal(set(types), {OverrideSub, NoOverrideSub})
+ assert_equal(list(args), [override_sub, no_override_sub])
+
+ def test_ndarray_and_duck_array(self):
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ other = Other()
+
+ types, args = get_overloaded_types_and_args([other, array])
+ assert_equal(set(types), {np.ndarray, Other})
+ assert_equal(list(args), [other, array])
+
+ types, args = get_overloaded_types_and_args([array, other])
+ assert_equal(set(types), {np.ndarray, Other})
+ assert_equal(list(args), [array, other])
+
+ def test_ndarray_subclass_and_duck_array(self):
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ array = np.array(1)
+ subarray = np.array(1).view(OverrideSub)
+ other = Other()
+
+ assert_equal(_get_overloaded_args([array, subarray, other]),
+ [subarray, array, other])
+ assert_equal(_get_overloaded_args([array, other, subarray]),
+ [subarray, array, other])
+
+ def test_many_duck_arrays(self):
+
+ class A(object):
+ __array_function__ = _return_not_implemented
+
+ class B(A):
+ __array_function__ = _return_not_implemented
+
+ class C(A):
+ __array_function__ = _return_not_implemented
+
+ class D(object):
+ __array_function__ = _return_not_implemented
+
+ a = A()
+ b = B()
+ c = C()
+ d = D()
+
+ assert_equal(_get_overloaded_args([1]), [])
+ assert_equal(_get_overloaded_args([a]), [a])
+ assert_equal(_get_overloaded_args([a, 1]), [a])
+ assert_equal(_get_overloaded_args([a, a, a]), [a])
+ assert_equal(_get_overloaded_args([a, d, a]), [a, d])
+ assert_equal(_get_overloaded_args([a, b]), [b, a])
+ assert_equal(_get_overloaded_args([b, a]), [b, a])
+ assert_equal(_get_overloaded_args([a, b, c]), [b, c, a])
+ assert_equal(_get_overloaded_args([a, c, b]), [c, b, a])
+
+
+@requires_array_function
+class TestNDArrayArrayFunction(object):
+
+ def test_method(self):
+
+ class Other(object):
+ __array_function__ = _return_not_implemented
+
+ class NoOverrideSub(np.ndarray):
+ pass
+
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
+
+ array = np.array([1])
+ other = Other()
+ no_override_sub = array.view(NoOverrideSub)
+ override_sub = array.view(OverrideSub)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray,),
+ args=(array, 1.), kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, Other),
+ args=(array, other), kwargs={})
+ assert_(result is NotImplemented)
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, NoOverrideSub),
+ args=(array, no_override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, OverrideSub),
+ args=(array, override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
+
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ np.concatenate((array, other))
+
+ expected = np.concatenate((array, array))
+ result = np.concatenate((array, no_override_sub))
+ assert_equal(result, expected.view(NoOverrideSub))
+ result = np.concatenate((array, override_sub))
+ assert_equal(result, expected.view(OverrideSub))
+
+
+@requires_array_function
+class TestArrayFunctionDispatch(object):
+
+ def test_pickle(self):
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ roundtripped = pickle.loads(
+ pickle.dumps(dispatched_one_arg, protocol=proto))
+ assert_(roundtripped is dispatched_one_arg)
+
+ def test_name_and_docstring(self):
+ assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
+ if sys.flags.optimize < 2:
+ assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
+
+ def test_interface(self):
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ return (self, func, types, args, kwargs)
+
+ original = MyArray()
+ (obj, func, types, args, kwargs) = dispatched_one_arg(original)
+ assert_(obj is original)
+ assert_(func is dispatched_one_arg)
+ assert_equal(set(types), {MyArray})
+ # assert_equal uses the overloaded np.iscomplexobj() internally
+ assert_(args == (original,))
+ assert_equal(kwargs, {})
+
+ def test_not_implemented(self):
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ return NotImplemented
+
+ array = MyArray()
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ dispatched_one_arg(array)
+
+
+@requires_array_function
+class TestVerifyMatchingSignatures(object):
+
+ def test_verify_matching_signatures(self):
+
+ verify_matching_signatures(lambda x: 0, lambda x: 0)
+ verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
+ verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
+
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda a: 0, lambda b: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x: 0, lambda x=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
+ with assert_raises(RuntimeError):
+ verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
+
+ def test_array_function_dispatch(self):
+
+ with assert_raises(RuntimeError):
+ @array_function_dispatch(lambda x: (x,))
+ def f(y):
+ pass
+
+ # should not raise
+ @array_function_dispatch(lambda x: (x,), verify=False)
+ def f(y):
+ pass
+
+
+def _new_duck_type_and_implements():
+ """Create a duck array type and implements functions."""
+ HANDLED_FUNCTIONS = {}
+
+ class MyArray(object):
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ def implements(numpy_function):
+ """Register an __array_function__ implementations."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ return (MyArray, implements)
+
+
+@requires_array_function
+class TestArrayFunctionImplementation(object):
+
+ def test_one_arg(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(dispatched_one_arg)
+ def _(array):
+ return 'myarray'
+
+ assert_equal(dispatched_one_arg(1), 'original')
+ assert_equal(dispatched_one_arg(MyArray()), 'myarray')
+
+ def test_optional_args(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array, option=None: (array,))
+ def func_with_option(array, option='default'):
+ return option
+
+ @implements(func_with_option)
+ def my_array_func_with_option(array, new_option='myarray'):
+ return new_option
+
+ # we don't need to implement every option on __array_function__
+ # implementations
+ assert_equal(func_with_option(1), 'default')
+ assert_equal(func_with_option(1, option='extra'), 'extra')
+ assert_equal(func_with_option(MyArray()), 'myarray')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), option='extra')
+
+ # but new options on implementations can't be used
+ result = my_array_func_with_option(MyArray(), new_option='yes')
+ assert_equal(result, 'yes')
+ with assert_raises(TypeError):
+ func_with_option(MyArray(), new_option='no')
+
+ def test_not_implemented(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @array_function_dispatch(lambda array: (array,), module='my')
+ def func(array):
+ return array
+
+ array = np.array(1)
+ assert_(func(array) is array)
+ assert_equal(func.__module__, 'my')
+
+ with assert_raises_regex(
+ TypeError, "no implementation found for 'my.func'"):
+ func(MyArray())
+
+
+class TestNDArrayMethods(object):
+
+ def test_repr(self):
+ # gh-12162: should still be defined even if __array_function__ doesn't
+ # implement np.array_repr()
+
+ class MyArray(np.ndarray):
+ def __array_function__(*args, **kwargs):
+ return NotImplemented
+
+ array = np.array(1).view(MyArray)
+ assert_equal(repr(array), 'MyArray(1)')
+ assert_equal(str(array), '1')
+
+
+class TestNumPyFunctions(object):
+
+ def test_set_module(self):
+ assert_equal(np.sum.__module__, 'numpy')
+ assert_equal(np.char.equal.__module__, 'numpy.char')
+ assert_equal(np.fft.fft.__module__, 'numpy.fft')
+ assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 3 only")
+ def test_inspect_sum(self):
+ signature = inspect.signature(np.sum)
+ assert_('axis' in signature.parameters)
+
+ @requires_array_function
+ def test_override_sum(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(np.sum)
+ def _(array):
+ return 'yes'
+
+ assert_equal(np.sum(MyArray()), 'yes')
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index 433208748..c5c091e13 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -2,8 +2,10 @@ from __future__ import division, absolute_import, print_function
import sys
+import pytest
+
import numpy as np
-from numpy.testing import assert_, assert_equal, SkipTest
+from numpy.testing import assert_, assert_equal
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -15,7 +17,15 @@ else:
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
-def check_float_type(tp):
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_types(tp):
+ """ Check formatting.
+
+ This is only for the str function, and only for simple types.
+ The precision of np.float32 and np.longdouble aren't the same as the
+ python float precision.
+
+ """
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(float(x)),
err_msg='Failed str formatting for type %s' % tp)
@@ -28,34 +38,30 @@ def check_float_type(tp):
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
-def test_float_types():
- """ Check formatting.
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_nan_inf_float(tp):
+ """ Check formatting of nan & inf.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
- for t in [np.float32, np.double, np.longdouble]:
- check_float_type(t)
-
-def check_nan_inf_float(tp):
for x in [np.inf, -np.inf, np.nan]:
assert_equal(str(tp(x)), _REF[x],
err_msg='Failed str formatting for type %s' % tp)
-def test_nan_inf_float():
- """ Check formatting of nan & inf.
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_types(tp):
+ """Check formatting of complex types.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
- for t in [np.float32, np.double, np.longdouble]:
- check_nan_inf_float(t)
-
-def check_complex_type(tp):
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(complex(x)),
err_msg='Failed str formatting for type %s' % tp)
@@ -72,18 +78,9 @@ def check_complex_type(tp):
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
-def test_complex_types():
- """Check formatting of complex types.
-
- This is only for the str function, and only for simple types.
- The precision of np.float32 and np.longdouble aren't the same as the
- python float precision.
-
- """
- for t in [np.complex64, np.cdouble, np.clongdouble]:
- check_complex_type(t)
-def test_complex_inf_nan():
+@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_inf_nan(dtype):
"""Check inf/nan formatting of complex types."""
TESTS = {
complex(np.inf, 0): "(inf+0j)",
@@ -103,12 +100,9 @@ def test_complex_inf_nan():
complex(-np.nan, 1): "(nan+1j)",
complex(1, -np.nan): "(1+nanj)",
}
- for tp in [np.complex64, np.cdouble, np.clongdouble]:
- for c, s in TESTS.items():
- _check_complex_inf_nan(c, s, tp)
+ for c, s in TESTS.items():
+ assert_equal(str(dtype(c)), s)
-def _check_complex_inf_nan(c, s, dtype):
- assert_equal(str(dtype(c)), s)
# print tests
def _test_redirected_print(x, tp, ref=None):
@@ -129,7 +123,10 @@ def _test_redirected_print(x, tp, ref=None):
assert_equal(file.getvalue(), file_tp.getvalue(),
err_msg='print failed for type%s' % tp)
-def check_float_type_print(tp):
+
+@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
+def test_float_type_print(tp):
+ """Check formatting when using print """
for x in [0, 1, -1, 1e20]:
_test_redirected_print(float(x), tp)
@@ -142,7 +139,10 @@ def check_float_type_print(tp):
ref = '1e+16'
_test_redirected_print(float(1e16), tp, ref)
-def check_complex_type_print(tp):
+
+@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
+def test_complex_type_print(tp):
+ """Check formatting when using print """
# We do not create complex with inf/nan directly because the feature is
# missing in python < 2.6
for x in [0, 1, -1, 1e20]:
@@ -158,15 +158,6 @@ def check_complex_type_print(tp):
_test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
_test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
-def test_float_type_print():
- """Check formatting when using print """
- for t in [np.float32, np.double, np.longdouble]:
- check_float_type_print(t)
-
-def test_complex_type_print():
- """Check formatting when using print """
- for t in [np.complex64, np.cdouble, np.clongdouble]:
- check_complex_type_print(t)
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index d7c7d16e3..c059ef510 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -7,17 +7,17 @@ try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
-import pickle
-import warnings
import textwrap
from os import path
import pytest
import numpy as np
+from numpy.compat import Path
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
- assert_raises, assert_warns
+ assert_raises, temppath
)
+from numpy.core.numeric import pickle
class TestFromrecords(object):
@@ -325,6 +325,23 @@ class TestFromrecords(object):
assert_equal(rec['f1'], [b'', b'', b''])
+@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+class TestPathUsage(object):
+ # Test that pathlib.Path can be used
+ def test_tofile_fromfile(self):
+ with temppath(suffix='.bin') as path:
+ path = Path(path)
+ np.random.seed(123)
+ a = np.random.rand(10).astype('f8,i4,a5')
+ a[5] = (0.5,10,'abcde')
+ with path.open("wb") as fd:
+ a.tofile(fd)
+ x = np.core.records.fromfile(path,
+ formats='f8,i4,a5',
+ shape=10)
+ assert_array_equal(x, a)
+
+
class TestRecord(object):
def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
@@ -361,7 +378,6 @@ class TestRecord(object):
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
- @pytest.mark.xfail(reason="See gh-10411, becomes real error in 1.16")
def test_out_of_order_fields(self):
# names in the same order, padding added to descr
x = self.data[['col1', 'col2']]
@@ -378,22 +394,27 @@ class TestRecord(object):
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
- assert_equal(a, pickle.loads(pickle.dumps(a)))
- assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
+ protocol=proto)))
def test_pickle_2(self):
a = self.data
- assert_equal(a, pickle.loads(pickle.dumps(a)))
- assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
+ assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
+ protocol=proto)))
def test_pickle_3(self):
# Issue #7140
a = self.data
- pa = pickle.loads(pickle.dumps(a[0]))
- assert_(pa.flags.c_contiguous)
- assert_(pa.flags.f_contiguous)
- assert_(pa.flags.writeable)
- assert_(pa.flags.aligned)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pa = pickle.loads(pickle.dumps(a[0], protocol=proto))
+ assert_(pa.flags.c_contiguous)
+ assert_(pa.flags.f_contiguous)
+ assert_(pa.flags.writeable)
+ assert_(pa.flags.aligned)
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 26d79468f..2421a1161 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -1,11 +1,8 @@
from __future__ import division, absolute_import, print_function
import copy
-import pickle
import sys
-import platform
import gc
-import warnings
import tempfile
import pytest
from os import path
@@ -16,9 +13,11 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, IS_PYPY, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises,
- assert_warns, suppress_warnings, _assert_valid_refcount, HAS_REFCOUNT,
+ assert_raises_regex, assert_warns, suppress_warnings,
+ _assert_valid_refcount, HAS_REFCOUNT,
)
from numpy.compat import asbytes, asunicode, long
+from numpy.core.numeric import pickle
try:
RecursionError
@@ -38,17 +37,20 @@ class TestRegression(object):
def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
- f = BytesIO()
- pickle.dump(a, f)
- f.seek(0)
- b = pickle.load(f)
- f.close()
- assert_array_equal(a, b)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(a, f, protocol=proto)
+ f.seek(0)
+ b = pickle.load(f)
+ f.close()
+ assert_array_equal(a, b)
def test_typeNA(self):
- # Ticket #31
- assert_equal(np.typeNA[np.int64], 'Int64')
- assert_equal(np.typeNA[np.uint64], 'UInt64')
+ # Issue gh-515
+ with suppress_warnings() as sup:
+ sup.filter(np.VisibleDeprecationWarning)
+ assert_equal(np.typeNA[np.int64], 'Int64')
+ assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self):
# Ticket #35
@@ -92,12 +94,13 @@ class TestRegression(object):
def test_char_dump(self):
# Ticket #50
- f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
- ca.dump(f)
- f.seek(0)
- ca = np.load(f)
- f.close()
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(ca, f, protocol=proto)
+ f.seek(0)
+ ca = np.load(f)
+ f.close()
def test_noncontiguous_fill(self):
# Ticket #58.
@@ -356,12 +359,13 @@ class TestRegression(object):
def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
- f = BytesIO()
- pickle.dump(dt, f)
- f.seek(0)
- dt_ = pickle.load(f)
- f.close()
- assert_equal(dt, dt_)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(dt, f, protocol=proto)
+ f.seek(0)
+ dt_ = pickle.load(f)
+ f.close()
+ assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self):
# Ticket #196
@@ -471,7 +475,8 @@ class TestRegression(object):
def test_pickle_dtype(self):
# Ticket #251
- pickle.dumps(float)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pickle.dumps(float, protocol=proto)
def test_swap_real(self):
# Ticket #265
@@ -815,8 +820,9 @@ class TestRegression(object):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
- new = pickle.loads(pickle.dumps(el))
- assert_equal(new, el)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ new = pickle.loads(pickle.dumps(el, protocol=proto))
+ assert_equal(new, el)
def test_arange_non_native_dtype(self):
# Ticket #616
@@ -1063,11 +1069,12 @@ class TestRegression(object):
def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
- y = pickle.loads(pickle.dumps(x))
- # y is now typically not aligned on a 8-byte boundary
- z = np.ones((1, y.shape[0]))
- # This shouldn't cause a segmentation fault:
- np.dot(z, y)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ y = pickle.loads(pickle.dumps(x, protocol=proto))
+ # y is now typically not aligned on a 8-byte boundary
+ z = np.ones((1, y.shape[0]))
+ # This shouldn't cause a segmentation fault:
+ np.dot(z, y)
def test_astype_copy(self):
# Ticket #788, changeset r5155
@@ -1277,9 +1284,12 @@ class TestRegression(object):
assert_(test_record_void_scalar == test_record)
- #Test pickle and unpickle of void and record scalars
- assert_(pickle.loads(pickle.dumps(test_string)) == test_string)
- assert_(pickle.loads(pickle.dumps(test_record)) == test_record)
+ # Test pickle and unpickle of void and record scalars
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)) == test_string)
+ assert_(pickle.loads(
+ pickle.dumps(test_record, protocol=proto)) == test_record)
def test_blasdot_uninitialized_memory(self):
# Ticket #950
@@ -1307,28 +1317,18 @@ class TestRegression(object):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
- good = 'Maximum allowed dimension exceeded'
- try:
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed dimension exceeded'):
np.empty(sz)
- except ValueError as e:
- if not str(e) == good:
- self.fail("Got msg '%s', expected '%s'" % (e, good))
- except Exception as e:
- self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
- good = 'Maximum allowed size exceeded'
- try:
+ with assert_raises_regex(ValueError,
+ 'Maximum allowed size exceeded'):
np.arange(sz)
assert_(np.size == sz)
- except ValueError as e:
- if not str(e) == good:
- self.fail("Got msg '%s', expected '%s'" % (e, good))
- except Exception as e:
- self.fail("Got exception of type %s instead of ValueError" % type(e))
def test_fromiter_bytes(self):
# Ticket #1058
@@ -1826,7 +1826,6 @@ class TestRegression(object):
assert_equal(oct(a), oct(0))
assert_equal(hex(a), hex(0))
-
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
@@ -1930,11 +1929,12 @@ class TestRegression(object):
def test_pickle_bytes_overwrite(self):
if sys.version_info[0] >= 3:
- data = np.array([1], dtype='b')
- data = pickle.loads(pickle.dumps(data))
- data[0] = 0xdd
- bytestring = "\x01 ".encode('ascii')
- assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ data = np.array([1], dtype='b')
+ data = pickle.loads(pickle.dumps(data, protocol=proto))
+ data[0] = 0xdd
+ bytestring = "\x01 ".encode('ascii')
+ assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
@@ -2236,10 +2236,10 @@ class TestRegression(object):
def test_pickle_empty_string(self):
# gh-3926
-
- import pickle
- test_string = np.string_('')
- assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ test_string = np.string_('')
+ assert_equal(pickle.loads(
+ pickle.dumps(test_string, protocol=proto)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
@@ -2407,7 +2407,7 @@ class TestRegression(object):
t = np.dtype([((s, 'f1'), np.float64)])
data = np.zeros(10, t)
for i in range(10):
- v = str(data[['f1']])
+ str(data[['f1']])
if HAS_REFCOUNT:
assert_(base <= sys.getrefcount(s))
diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py
index cb6c521e1..cd520d99b 100644
--- a/numpy/core/tests/test_scalarbuffer.py
+++ b/numpy/core/tests/test_scalarbuffer.py
@@ -28,35 +28,36 @@ scalars_and_codes = [
(np.cdouble, 'Zd'),
(np.clongdouble, 'Zg'),
]
+scalars_only, codes_only = zip(*scalars_and_codes)
@pytest.mark.skipif(sys.version_info.major < 3,
reason="Python 2 scalars lack a buffer interface")
class TestScalarPEP3118(object):
- def test_scalar_match_array(self):
- for scalar, _ in scalars_and_codes:
- x = scalar()
- a = np.array([], dtype=np.dtype(scalar))
- mv_x = memoryview(x)
- mv_a = memoryview(a)
- assert_equal(mv_x.format, mv_a.format)
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_match_array(self, scalar):
+ x = scalar()
+ a = np.array([], dtype=np.dtype(scalar))
+ mv_x = memoryview(x)
+ mv_a = memoryview(a)
+ assert_equal(mv_x.format, mv_a.format)
- def test_scalar_dim(self):
- for scalar, _ in scalars_and_codes:
- x = scalar()
- mv_x = memoryview(x)
- assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
- assert_equal(mv_x.ndim, 0)
- assert_equal(mv_x.shape, ())
- assert_equal(mv_x.strides, ())
- assert_equal(mv_x.suboffsets, ())
+ @pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
+ def test_scalar_dim(self, scalar):
+ x = scalar()
+ mv_x = memoryview(x)
+ assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
+ assert_equal(mv_x.ndim, 0)
+ assert_equal(mv_x.shape, ())
+ assert_equal(mv_x.strides, ())
+ assert_equal(mv_x.suboffsets, ())
- def test_scalar_known_code(self):
- for scalar, code in scalars_and_codes:
- x = scalar()
- mv_x = memoryview(x)
- assert_equal(mv_x.format, code)
+ @pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
+ def test_scalar_known_code(self, scalar, code):
+ x = scalar()
+ mv_x = memoryview(x)
+ assert_equal(mv_x.format, code)
def test_void_scalar_structured_data(self):
dt = np.dtype([('name', np.unicode_, 16), ('grades', np.float64, (2,))])
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index 28436f6c7..9e32cf624 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -69,6 +69,7 @@ class TestCharacter(object):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
np_i = np.int(5)
- res_np = np_s * np_i
res_s = b'abc' * 5
- assert_(res_np == res_s)
+ res_u = u'abc' * 5
+ assert_(np_s * np_i == res_s)
+ assert_(np_u * np_i == res_u)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index ab2ef5ce6..51bcf2b8d 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -9,7 +9,7 @@ import pytest
import numpy as np
from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_almost_equal, assert_allclose,
+ assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns
)
@@ -136,7 +136,7 @@ class TestPower(object):
# 1 ** -1 possible special case
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
- if i1.dtype.name != 'uint64':
+ if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
@@ -146,7 +146,7 @@ class TestPower(object):
# -1 ** -1 possible special case
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
- if i1.dtype.name != 'uint64':
+ if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
@@ -156,7 +156,7 @@ class TestPower(object):
# 2 ** -1 perhaps generic
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
- if i1.dtype.name != 'uint64':
+ if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
@@ -184,7 +184,7 @@ class TestPower(object):
a = 5
b = 4
c = 10
- expected = pow(a, b, c)
+ expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
@@ -519,7 +519,7 @@ class TestRepr(object):
storage_bytes = np.dtype(t).itemsize*8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
- # Values from http://en.wikipedia.org/wiki/IEEE_754
+ # Values from https://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
@@ -565,10 +565,10 @@ class TestMultiply(object):
# Some of this behaviour may be controversial and could be open for
# change.
accepted_types = set(np.typecodes["AllInteger"])
- deprecated_types = set('?')
+ deprecated_types = {'?'}
forbidden_types = (
set(np.typecodes["All"]) - accepted_types - deprecated_types)
- forbidden_types -= set('V') # can't default-construct void scalars
+ forbidden_types -= {'V'} # can't default-construct void scalars
for seq_type in (list, tuple):
seq = seq_type([1, 2, 3])
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 472ff691d..cde1355aa 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -10,7 +10,7 @@ import pytest
from tempfile import TemporaryFile
import numpy as np
-from numpy.testing import assert_, assert_equal, suppress_warnings, dec
+from numpy.testing import assert_, assert_equal, suppress_warnings
class TestRealScalars(object):
def test_str(self):
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index 72b3451a4..ef5c118ec 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -1,14 +1,17 @@
from __future__ import division, absolute_import, print_function
-import warnings
+import pytest
+import sys
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
+from numpy.core.shape_base import (_block_dispatcher, _block_setup,
+ _block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
- assert_raises_regex, assert_almost_equal
+ assert_raises_regex, assert_warns
)
from numpy.compat import long
@@ -153,6 +156,14 @@ class TestHstack(object):
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ hstack((np.arange(3) for _ in range(2)))
+ if sys.version_info.major > 2:
+ # map returns a list on Python 2
+ with assert_warns(FutureWarning):
+ hstack(map(lambda x: x, np.ones((3, 2))))
+
class TestVstack(object):
def test_non_iterable(self):
@@ -189,8 +200,18 @@ class TestVstack(object):
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ vstack((np.arange(3) for _ in range(2)))
+
class TestConcatenate(object):
+ def test_returns_copy(self):
+ a = np.eye(3)
+ b = np.concatenate([a])
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
+
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
@@ -346,7 +367,7 @@ def test_stack():
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
- (3, 4, 10), (3, 10, 4), (10, 3, 4)]
+ (3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
@@ -364,10 +385,62 @@ def test_stack():
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
+ # generator is deprecated
+ with assert_warns(FutureWarning):
+ result = stack((x for x in range(3)))
+ assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock(object):
- def test_block_simple_row_wise(self):
+ @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
+ def block(self, request):
+ # blocking small arrays and large arrays go through different paths.
+ # the algorithm is triggered depending on the number of element
+ # copies required.
+ # We define a test fixture that forces most tests to go through
+ # both code paths.
+ # Ultimately, this should be removed if a single algorithm is found
+ # to be faster for both small and large arrays.
+ def _block_force_concatenate(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+ def _block_force_slicing(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_slicing(arrays, list_ndim, result_ndim)
+
+ if request.param == 'force_concatenate':
+ return _block_force_concatenate
+ elif request.param == 'force_slicing':
+ return _block_force_slicing
+ elif request.param == 'block':
+ return block
+ else:
+ raise ValueError('Unknown blocking request. There is a typo in the tests.')
+
+ def test_returns_copy(self, block):
+ a = np.eye(3)
+ b = block(a)
+ b[0, 0] = 2
+ assert b[0, 0] != a[0, 0]
+
+ def test_block_total_size_estimate(self, block):
+ _, _, _, total_size = _block_setup([1])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1]])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1, 1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1], [1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1, 2], [3, 4]])
+ assert total_size == 4
+
+ def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
@@ -375,7 +448,7 @@ class TestBlock(object):
result = block([a_2d, b_2d])
assert_equal(desired, result)
- def test_block_simple_column_wise(self):
+ def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
@@ -385,7 +458,7 @@ class TestBlock(object):
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
- def test_block_with_1d_arrays_row_wise(self):
+ def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
@@ -393,7 +466,7 @@ class TestBlock(object):
result = block([a, b])
assert_equal(expected, result)
- def test_block_with_1d_arrays_multiple_rows(self):
+ def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
@@ -401,7 +474,7 @@ class TestBlock(object):
result = block([[a, b], [a, b]])
assert_equal(expected, result)
- def test_block_with_1d_arrays_column_wise(self):
+ def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
@@ -410,7 +483,7 @@ class TestBlock(object):
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
- def test_block_mixed_1d_and_2d(self):
+ def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
@@ -419,7 +492,7 @@ class TestBlock(object):
[2, 2]])
assert_equal(expected, result)
- def test_block_complicated(self):
+ def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
@@ -443,7 +516,7 @@ class TestBlock(object):
[zero_2d]])
assert_equal(result, expected)
- def test_nested(self):
+ def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
@@ -452,9 +525,9 @@ class TestBlock(object):
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
- result = np.block([
+ result = block([
[
- np.block([
+ block([
[one],
[three],
[four]
@@ -473,7 +546,7 @@ class TestBlock(object):
assert_equal(result, expected)
- def test_3d(self):
+ def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
@@ -486,7 +559,7 @@ class TestBlock(object):
a111 = np.ones((3, 3, 3), int) * 8
- result = np.block([
+ result = block([
[
[a000, a001],
[a010, a011],
@@ -528,55 +601,102 @@ class TestBlock(object):
assert_array_equal(result, expected)
- def test_block_with_mismatched_shape(self):
+ def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
- assert_raises(ValueError, np.block, [a, b])
- assert_raises(ValueError, np.block, [b, a])
+ assert_raises(ValueError, block, [a, b])
+ assert_raises(ValueError, block, [b, a])
- def test_no_lists(self):
- assert_equal(np.block(1), np.array(1))
- assert_equal(np.block(np.eye(3)), np.eye(3))
+ to_block = [[np.ones((2,3)), np.ones((2,2))],
+ [np.ones((2,2)), np.ones((2,2))]]
+ assert_raises(ValueError, block, to_block)
+ def test_no_lists(self, block):
+ assert_equal(block(1), np.array(1))
+ assert_equal(block(np.eye(3)), np.eye(3))
- def test_invalid_nesting(self):
+ def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
- assert_raises_regex(ValueError, msg, np.block, [1, [2]])
- assert_raises_regex(ValueError, msg, np.block, [1, []])
- assert_raises_regex(ValueError, msg, np.block, [[1], 2])
- assert_raises_regex(ValueError, msg, np.block, [[], 2])
- assert_raises_regex(ValueError, msg, np.block, [
+ assert_raises_regex(ValueError, msg, block, [1, [2]])
+ assert_raises_regex(ValueError, msg, block, [1, []])
+ assert_raises_regex(ValueError, msg, block, [[1], 2])
+ assert_raises_regex(ValueError, msg, block, [[], 2])
+ assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
- def test_empty_lists(self):
- assert_raises_regex(ValueError, 'empty', np.block, [])
- assert_raises_regex(ValueError, 'empty', np.block, [[]])
- assert_raises_regex(ValueError, 'empty', np.block, [[1], []])
+ def test_empty_lists(self, block):
+ assert_raises_regex(ValueError, 'empty', block, [])
+ assert_raises_regex(ValueError, 'empty', block, [[]])
+ assert_raises_regex(ValueError, 'empty', block, [[1], []])
- def test_tuple(self):
- assert_raises_regex(TypeError, 'tuple', np.block, ([1, 2], [3, 4]))
- assert_raises_regex(TypeError, 'tuple', np.block, [(1, 2), (3, 4)])
+ def test_tuple(self, block):
+ assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
+ assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
- def test_different_ndims(self):
+ def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
- result = np.block([a, b, c])
+ result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
- def test_different_ndims_depths(self):
+ def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
- result = np.block([[a, b], [c]])
+ result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
+
+ def test_block_memory_order(self, block):
+ # 3D
+ arr_c = np.zeros((3,)*3, order='C')
+ arr_f = np.zeros((3,)*3, order='F')
+
+ b_c = [[[arr_c, arr_c],
+ [arr_c, arr_c]],
+ [[arr_c, arr_c],
+ [arr_c, arr_c]]]
+
+ b_f = [[[arr_f, arr_f],
+ [arr_f, arr_f]],
+ [[arr_f, arr_f],
+ [arr_f, arr_f]]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+ arr_c = np.zeros((3, 3), order='C')
+ arr_f = np.zeros((3, 3), order='F')
+ # 2D
+ b_c = [[arr_c, arr_c],
+ [arr_c, arr_c]]
+
+ b_f = [[arr_f, arr_f],
+ [arr_f, arr_f]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+
+def test_block_dispatcher():
+ class ArrayLike(object):
+ pass
+ a = ArrayLike()
+ b = ArrayLike()
+ c = ArrayLike()
+ assert_equal(list(_block_dispatcher(a)), [a])
+ assert_equal(list(_block_dispatcher([a])), [a])
+ assert_equal(list(_block_dispatcher([a, b])), [a, b])
+ assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
+ # don't recurse into non-lists
+ assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index ef9ced354..b83b8ccff 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -13,6 +13,7 @@ from numpy.testing import (
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
assert_allclose,
)
+from numpy.core.numeric import pickle
class TestUfuncKwargs(object):
@@ -43,16 +44,17 @@ class TestUfuncKwargs(object):
class TestUfunc(object):
def test_pickle(self):
- import pickle
- assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(pickle.dumps(np.sin,
+ protocol=proto)) is np.sin)
- # Check that ufunc not defined in the top level numpy namespace such as
- # numpy.core._rational_tests.test_add can also be pickled
- res = pickle.loads(pickle.dumps(_rational_tests.test_add))
- assert_(res is _rational_tests.test_add)
+ # Check that ufunc not defined in the top level numpy namespace
+ # such as numpy.core._rational_tests.test_add can also be pickled
+ res = pickle.loads(pickle.dumps(_rational_tests.test_add,
+ protocol=proto))
+ assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
- import pickle
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
@@ -286,20 +288,98 @@ class TestUfunc(object):
"""
pass
- def test_signature(self):
+ # from include/numpy/ufuncobject.h
+ size_inferred = 2
+ can_ignore = 4
+ def test_signature0(self):
# the arguments to test_signature are: nin, nout, core_signature
- # pass
- enabled, num_dims, ixs = umt.test_signature(2, 1, "(i),(i)->()")
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i),(i)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 1, 0))
assert_equal(ixs, (0, 0))
+ assert_equal(flags, (self.size_inferred,))
+ assert_equal(sizes, (-1,))
+ def test_signature1(self):
# empty core signature; treat as plain ufunc (with trivial core)
- enabled, num_dims, ixs = umt.test_signature(2, 1, "(),()->()")
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(),()->()")
assert_equal(enabled, 0)
assert_equal(num_dims, (0, 0, 0))
assert_equal(ixs, ())
+ assert_equal(flags, ())
+ assert_equal(sizes, ())
+ def test_signature2(self):
+ # more complicated names for variables
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i1,i2),(J_1)->(_kAB)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 1))
+ assert_equal(ixs, (0, 1, 2, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature3(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, u"(i1, i12), (J_1)->(i12, i2)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 2))
+ assert_equal(ixs, (0, 1, 2, 1, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature4(self):
+ # matrix_multiply signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n,k),(k,m)->(n,m)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred,)*3)
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature5(self):
+ # matmul signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n?,k),(k,m?)->(n?,m?)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred | self.can_ignore,
+ self.size_inferred,
+ self.size_inferred | self.can_ignore))
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature6(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "(3)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature7(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3),(03,3),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (0, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature8(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3?),(3?,3?),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature_failure0(self):
# in the following calls, a ValueError should be raised because
# of error in core signature
# FIXME These should be using assert_raises
@@ -312,6 +392,7 @@ class TestUfunc(object):
except ValueError:
pass
+ def test_signature_failure1(self):
# error: parenthesis matching
msg = "core_sig: parenthesis matching"
try:
@@ -320,6 +401,7 @@ class TestUfunc(object):
except ValueError:
pass
+ def test_signature_failure2(self):
# error: incomplete signature. letters outside of parenthesis are ignored
msg = "core_sig: incomplete signature"
try:
@@ -328,6 +410,7 @@ class TestUfunc(object):
except ValueError:
pass
+ def test_signature_failure3(self):
# error: incomplete signature. 2 output arguments are specified
msg = "core_sig: incomplete signature"
try:
@@ -336,12 +419,6 @@ class TestUfunc(object):
except ValueError:
pass
- # more complicated names for variables
- enabled, num_dims, ixs = umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)")
- assert_equal(enabled, 1)
- assert_equal(num_dims, (2, 1, 1))
- assert_equal(ixs, (0, 1, 2, 3))
-
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
@@ -866,6 +943,89 @@ class TestUfunc(object):
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+ def test_cross1d(self):
+ """Test with fixed-sized signature."""
+ a = np.eye(3)
+ assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
+ out = np.zeros((3, 3))
+ result = umt.cross1d(a[0], a, out)
+ assert_(result is out)
+ assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
+ assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
+
+ def test_can_ignore_signature(self):
+ # Comparing the effects of ? in signature:
+ # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
+ # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
+ mat = np.arange(12).reshape((2, 3, 2))
+ single_vec = np.arange(2)
+ col_vec = single_vec[:, np.newaxis]
+ col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # matrix @ single column vector with proper dimension
+ mm_col_vec = umt.matrix_multiply(mat, col_vec)
+ # matmul does the same thing
+ matmul_col_vec = umt.matmul(mat, col_vec)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # matrix @ vector without dimension making it a column vector.
+ # matrix multiply fails -> missing core dim.
+ assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
+ # matmul mimicker passes, and returns a vector.
+ matmul_col = umt.matmul(mat, single_vec)
+ assert_array_equal(matmul_col, mm_col_vec.squeeze())
+ # Now with a column array: same as for column vector,
+ # broadcasting sensibly.
+ mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
+ matmul_col_vec = umt.matmul(mat, col_vec_array)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # As above, but for row vector
+ single_vec = np.arange(3)
+ row_vec = single_vec[np.newaxis, :]
+ row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
+ # row vector @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec, mat)
+ matmul_row_vec = umt.matmul(row_vec, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # single row vector @ matrix
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
+ matmul_row = umt.matmul(single_vec, mat)
+ assert_array_equal(matmul_row, mm_row_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
+ matmul_row_vec = umt.matmul(row_vec_array, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # Now for vector combinations
+ # row vector @ column vector
+ col_vec = row_vec.T
+ col_vec_array = row_vec_array.swapaxes(-2, -1)
+ mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
+ matmul_row_col_vec = umt.matmul(row_vec, col_vec)
+ assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
+ # single row vector @ single col vector
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
+ matmul_row_col = umt.matmul(single_vec, single_vec)
+ assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
+ matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
+ assert_array_equal(matmul_row_col_array, mm_row_col_array)
+ # Finally, check that things are *not* squeezed if one gives an
+ # output.
+ out = np.zeros_like(mm_row_col_array)
+ out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ out[:] = 0
+ out = umt.matmul(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ # And check one cannot put missing dimensions back.
+ out = np.zeros_like(mm_row_col_vec)
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
+ out)
+ # But fine for matmul, since it is just a broadcast.
+ out = umt.matmul(single_vec, single_vec, out)
+ assert_array_equal(out, mm_row_col_vec.squeeze())
+
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
@@ -1643,6 +1803,16 @@ class TestUfunc(object):
target = np.array([ True, False, False, False], dtype=bool)
assert_equal(np.all(target == (mra == ra[0])), True)
+ def test_scalar_equal(self):
+ # Scalar comparisons should always work, without deprecation warnings.
+ # even when the ufunc fails.
+ a = np.array(0.)
+ b = np.array('a')
+ assert_(a != b)
+ assert_(b != a)
+ assert_(not (a == b))
+ assert_(not (b == a))
+
def test_NotImplemented_not_returned(self):
# See gh-5964 and gh-2091. Some of these functions are not operator
# related and were fixed for other reasons in the past.
@@ -1652,17 +1822,16 @@ class TestUfunc(object):
np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
np.logical_and, np.logical_or, np.logical_xor, np.maximum,
- np.minimum, np.mod
- ]
-
- # These functions still return NotImplemented. Will be fixed in
- # future.
- # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]
+ np.minimum, np.mod,
+ np.greater, np.greater_equal, np.less, np.less_equal,
+ np.equal, np.not_equal]
a = np.array('1')
b = 1
+ c = np.array([1., 2.])
for f in binary_funcs:
assert_raises(TypeError, f, a, b)
+ assert_raises(TypeError, f, c, a)
def test_reduce_noncontig_output(self):
# Check that reduction deals with non-contiguous output arrays
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 29850108d..2f8edebc0 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import sys
import platform
import warnings
import fnmatch
@@ -14,7 +13,7 @@ from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_allclose, assert_no_warnings, suppress_warnings,
- _gen_alignment_data, assert_warns
+ _gen_alignment_data
)
@@ -685,6 +684,10 @@ class TestLogAddExp(_FilterInvalids):
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
+ def test_reduce(self):
+ assert_equal(np.logaddexp.identity, -np.inf)
+ assert_equal(np.logaddexp.reduce([]), -np.inf)
+
class TestLog1p(object):
def test_log1p(self):
@@ -1173,7 +1176,6 @@ class TestBitwiseUFuncs(object):
assert_(np.bitwise_xor(zeros, zeros).dtype == dt, msg)
assert_(np.bitwise_and(zeros, zeros).dtype == dt, msg)
-
def test_identity(self):
assert_(np.bitwise_or.identity == 0, 'bitwise_or')
assert_(np.bitwise_xor.identity == 0, 'bitwise_xor')
@@ -1295,6 +1297,7 @@ class TestSign(object):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
+ # FIXME: a not used
a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
@@ -1328,20 +1331,18 @@ class TestMinMax(object):
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
- def test_reduce_warns(self):
+ def test_reduce_reorder(self):
# gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
# and put it before the call to an intrisic function that causes
- # invalid status to be set. Also make sure warnings are emitted
+ # invalid status to be set. Also make sure warnings are not emitted
for n in (2, 4, 8, 16, 32):
- with suppress_warnings() as sup:
- sup.record(RuntimeWarning)
- for r in np.diagflat([np.nan] * n):
+ for dt in (np.float32, np.float16, np.complex64):
+ for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
assert_equal(np.min(r), np.nan)
- assert_equal(len(sup.log), n)
- def test_minimize_warns(self):
- # gh 11589
- assert_warns(RuntimeWarning, np.minimum, np.nan, 1)
+ def test_minimize_no_warns(self):
+ a = np.minimum(np.nan, 1)
+ assert_equal(a, np.nan)
class TestAbsoluteNegative(object):
@@ -1572,13 +1573,14 @@ class TestSpecialMethods(object):
class A(object):
def __array__(self):
- return np.zeros(1)
+ return np.zeros(2)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum.reduce, a)
def test_failing_out_wrap(self):
@@ -1749,18 +1751,22 @@ class TestSpecialMethods(object):
return "B"
class C(object):
+ def __init__(self):
+ self.count = 0
+
def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
return NotImplemented
class CSub(C):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
+ self.count += 1
return NotImplemented
a = A()
a_sub = ASub()
b = B()
c = C()
- c_sub = CSub()
# Standard
res = np.multiply(a, a_sub)
@@ -1771,11 +1777,27 @@ class TestSpecialMethods(object):
# With 1 NotImplemented
res = np.multiply(c, a)
assert_equal(res, "A")
+ assert_equal(c.count, 1)
+ # Check our counter works, so we can trust tests below.
+ res = np.multiply(c, a)
+ assert_equal(c.count, 2)
# Both NotImplemented.
+ c = C()
+ c_sub = CSub()
assert_raises(TypeError, np.multiply, c, c_sub)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = c_sub.count = 0
assert_raises(TypeError, np.multiply, c_sub, c)
+ assert_equal(c.count, 1)
+ assert_equal(c_sub.count, 1)
+ c.count = 0
+ assert_raises(TypeError, np.multiply, c, c)
+ assert_equal(c.count, 1)
+ c.count = 0
assert_raises(TypeError, np.multiply, 2, c)
+ assert_equal(c.count, 1)
# Ternary testing.
assert_equal(three_mul_ufunc(a, 1, 2), "A")
@@ -1787,11 +1809,19 @@ class TestSpecialMethods(object):
assert_equal(three_mul_ufunc(a, 2, b), "A")
assert_equal(three_mul_ufunc(a, 2, a_sub), "ASub")
assert_equal(three_mul_ufunc(a, a_sub, 3), "ASub")
+ c.count = 0
assert_equal(three_mul_ufunc(c, a_sub, 3), "ASub")
+ assert_equal(c.count, 1)
+ c.count = 0
assert_equal(three_mul_ufunc(1, a_sub, c), "ASub")
+ assert_equal(c.count, 0)
+ c.count = 0
assert_equal(three_mul_ufunc(a, b, c), "A")
+ assert_equal(c.count, 0)
+ c_sub.count = 0
assert_equal(three_mul_ufunc(a, b, c_sub), "A")
+ assert_equal(c_sub.count, 0)
assert_equal(three_mul_ufunc(1, 2, b), "B")
assert_raises(TypeError, three_mul_ufunc, 1, 2, c)
@@ -1810,9 +1840,25 @@ class TestSpecialMethods(object):
assert_equal(four_mul_ufunc(a_sub, 1, 2, a), "ASub")
assert_equal(four_mul_ufunc(a, 1, 2, a_sub), "ASub")
+ c = C()
+ c_sub = CSub()
assert_raises(TypeError, four_mul_ufunc, 1, 2, 3, c)
+ assert_equal(c.count, 1)
+ c.count = 0
assert_raises(TypeError, four_mul_ufunc, 1, 2, c_sub, c)
- assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ c2 = C()
+ c.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c2)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 1)
+ assert_equal(c2.count, 0)
+ c.count = c2.count = c_sub.count = 0
+ assert_raises(TypeError, four_mul_ufunc, c2, c, c_sub, c)
+ assert_equal(c_sub.count, 1)
+ assert_equal(c.count, 0)
+ assert_equal(c2.count, 1)
def test_ufunc_override_methods(self):
@@ -2402,11 +2448,6 @@ class TestRationalFunctions(object):
assert_equal(np.gcd(2**100, 3**100), 1)
-def is_longdouble_finfo_bogus():
- info = np.finfo(np.longcomplex)
- return not np.isfinite(np.log10(info.tiny/info.eps))
-
-
class TestComplexFunctions(object):
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
@@ -2502,7 +2543,8 @@ class TestComplexFunctions(object):
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
- def check_loss_of_precision(self, dtype):
+ @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])
+ def test_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
@@ -2544,10 +2586,11 @@ class TestComplexFunctions(object):
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
- check(x_series, 50*eps)
+ # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19.
+ check(x_series, 50.0*eps)
else:
check(x_series, 2.1*eps)
- check(x_basic, 2*eps/1e-3)
+ check(x_basic, 2.0*eps/1e-3)
# Check a few points
@@ -2587,15 +2630,6 @@ class TestComplexFunctions(object):
check(func, pts, 1j)
check(func, pts, 1+1j)
- def test_loss_of_precision(self):
- for dtype in [np.complex64, np.complex_]:
- self.check_loss_of_precision(dtype)
-
- @pytest.mark.skipif(is_longdouble_finfo_bogus(),
- reason="Bogus long double finfo")
- def test_loss_of_precision_longcomplex(self):
- self.check_loss_of_precision(np.longcomplex)
-
class TestAttributes(object):
def test_attributes(self):
diff --git a/numpy/core/umath.py b/numpy/core/umath.py
new file mode 100644
index 000000000..a0e8ad427
--- /dev/null
+++ b/numpy/core/umath.py
@@ -0,0 +1,35 @@
+"""
+Create the numpy.core.umath namespace for backward compatibility. In v1.16
+the multiarray and umath c-extension modules were merged into a single
+_multiarray_umath extension module. So we replicate the old namespace
+by importing from the extension module.
+
+"""
+
+from . import _multiarray_umath
+from numpy.core._multiarray_umath import *
+from numpy.core._multiarray_umath import (
+ _UFUNC_API, _add_newdoc_ufunc, _arg, _ones_like
+ )
+
+__all__ = [
+ '_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG',
+ 'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT',
+ 'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN',
+ 'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID',
+ 'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT',
+ 'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', '_arg', 'absolute', 'add',
+ 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
+ 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',
+ 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide',
+ 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs',
+ 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp',
+ 'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside',
+ 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp',
+ 'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2',
+ 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
+ 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative',
+ 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians',
+ 'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign',
+ 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan',
+ 'tanh', 'true_divide', 'trunc']
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 9d71adbdb..9eefbc9f4 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -12,7 +12,7 @@ as_array : Create an ndarray from a ctypes array.
References
----------
-.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
+.. [1] "SciPy Cookbook: ctypes", https://scipy-cookbook.readthedocs.io/items/Ctypes.html
Examples
--------
@@ -54,8 +54,10 @@ from __future__ import division, absolute_import, print_function
__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
-import sys, os
-from numpy import integer, ndarray, dtype as _dtype, deprecate, array
+import os
+from numpy import (
+ integer, ndarray, dtype as _dtype, deprecate, array, frombuffer
+)
from numpy.core.multiarray import _flagdict, flagsobj
try:
@@ -175,24 +177,6 @@ def _flags_fromnum(num):
class _ndptr(_ndptr_base):
-
- def _check_retval_(self):
- """This method is called when this class is used as the .restype
- attribute for a shared-library function. It constructs a numpy
- array from a void pointer."""
- return array(self)
-
- @property
- def __array_interface__(self):
- return {'descr': self._dtype_.descr,
- '__ref': self,
- 'strides': None,
- 'shape': self._shape_,
- 'version': 3,
- 'typestr': self._dtype_.descr[0][1],
- 'data': (self.value, False),
- }
-
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
@@ -213,6 +197,34 @@ class _ndptr(_ndptr_base):
return obj.ctypes
+class _concrete_ndptr(_ndptr):
+ """
+ Like _ndptr, but with `_shape_` and `_dtype_` specified.
+
+ Notably, this means the pointer has enough information to reconstruct
+ the array, which is not generally true.
+ """
+ def _check_retval_(self):
+ """
+ This method is called when this class is used as the .restype
+ attribute for a shared-library function, to automatically wrap the
+ pointer into an array.
+ """
+ return self.contents
+
+ @property
+ def contents(self):
+ """
+ Get an ndarray viewing the data pointed to by this pointer.
+
+ This mirrors the `contents` attribute of a normal ctypes pointer
+ """
+ full_dtype = _dtype((self._dtype_, self._shape_))
+ full_ctype = ctypes.c_char * full_dtype.itemsize
+ buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
+ return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
+
+
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
@@ -269,8 +281,11 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
+ # normalize dtype to an Optional[dtype]
if dtype is not None:
dtype = _dtype(dtype)
+
+ # normalize flags to an Optional[int]
num = None
if flags is not None:
if isinstance(flags, str):
@@ -287,10 +302,23 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
except Exception:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
+
+ # normalize shape to an Optional[tuple]
+ if shape is not None:
+ try:
+ shape = tuple(shape)
+ except TypeError:
+ # single integer -> 1-tuple
+ shape = (shape,)
+
+ cache_key = (dtype, ndim, shape, num)
+
try:
- return _pointer_type_cache[(dtype, ndim, shape, num)]
+ return _pointer_type_cache[cache_key]
except KeyError:
pass
+
+ # produce a name for the new type
if dtype is None:
name = 'any'
elif dtype.names:
@@ -300,23 +328,21 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
- try:
- strshape = [str(x) for x in shape]
- except TypeError:
- strshape = [str(shape)]
- shape = (shape,)
- shape = tuple(shape)
- name += "_"+"x".join(strshape)
+ name += "_"+"x".join(str(x) for x in shape)
if flags is not None:
name += "_"+"_".join(flags)
+
+ if dtype is not None and shape is not None:
+ base = _concrete_ndptr
else:
- flags = []
- klass = type("ndpointer_%s"%name, (_ndptr,),
+ base = _ndptr
+
+ klass = type("ndpointer_%s"%name, (base,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
- _pointer_type_cache[(dtype, shape, ndim, num)] = klass
+ _pointer_type_cache[cache_key] = klass
return klass
@@ -375,5 +401,5 @@ if ctypes is not None:
raise TypeError("readonly arrays unsupported")
tp = _ctype_ndarray(_typecodes[ai["typestr"]], ai["shape"])
result = tp.from_address(addr)
- result.__keep = ai
+ result.__keep = obj
return result
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index b794bebd7..55514750e 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import sys
-
from .__version__ import version as __version__
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
@@ -17,7 +15,7 @@ try:
# Normally numpy is installed if the above import works, but an interrupted
# in-place build could also have left a __config__.py. In that case the
# next import may still fail, so keep it inside the try block.
- from numpy.testing._private.pytesttester import PytestTester
+ from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
except ImportError:
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index b03fb96b2..100d0d069 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -6,6 +6,7 @@ import sys
import types
import shlex
import time
+import subprocess
from copy import copy
from distutils import ccompiler
from distutils.ccompiler import *
@@ -16,9 +17,11 @@ from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.compat import get_exception
-from numpy.distutils.exec_command import exec_command
+from numpy.distutils.exec_command import (
+ filepath_from_subprocess_output, forward_bytes_to_stdout
+)
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
- quote_args, get_num_build_jobs, \
+ get_num_build_jobs, \
_commandline_dep_string
# globals for parallel build management
@@ -136,20 +139,37 @@ def CCompiler_spawn(self, cmd, display=None):
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
- s, o = exec_command(cmd)
- if s:
- if is_sequence(cmd):
- cmd = ' '.join(list(cmd))
- try:
- print(o)
- except UnicodeError:
- # When installing through pip, `o` can contain non-ascii chars
- pass
- if re.search('Too many open files', o):
- msg = '\nTry rerunning setup command until build succeeds.'
- else:
- msg = ''
- raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg))
+ try:
+ subprocess.check_output(cmd)
+ except subprocess.CalledProcessError as exc:
+ o = exc.output
+ s = exc.returncode
+ except OSError:
+ # OSError doesn't have the same hooks for the exception
+ # output, but exec_command() historically would use an
+ # empty string for EnvironmentError (base class for
+ # OSError)
+ o = b''
+ # status previously used by exec_command() for parent
+ # of OSError
+ s = 127
+ else:
+ # use a convenience return here so that any kind of
+ # caught exception will execute the default code after the
+ # try / except block, which handles various exceptions
+ return None
+
+ if is_sequence(cmd):
+ cmd = ' '.join(list(cmd))
+
+ forward_bytes_to_stdout(o)
+
+ if re.search(b'Too many open files', o):
+ msg = '\nTry rerunning setup command until build succeeds.'
+ else:
+ msg = ''
+ raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
+ (cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
@@ -404,10 +424,8 @@ def _compiler_to_string(compiler):
v = getattr(compiler, key)
mx = max(mx, len(key))
props.append((key, repr(v)))
- lines = []
- format = '%-' + repr(mx+1) + 's = %s'
- for prop in props:
- lines.append(format % prop)
+ fmt = '%-' + repr(mx+1) + 's = %s'
+ lines = [fmt % prop for prop in props]
return '\n'.join(lines)
def CCompiler_show_customization(self):
@@ -620,7 +638,21 @@ def CCompiler_get_version(self, force=False, ok_status=[0]):
version = m.group('version')
return version
- status, output = exec_command(version_cmd, use_tee=0)
+ try:
+ output = subprocess.check_output(version_cmd)
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ status = exc.returncode
+ except OSError:
+ # match the historical returns for a parent
+ # exception class caught by exec_command()
+ status = 127
+ output = b''
+ else:
+ # output isn't actually a filepath but we do this
+ # for now to match previous distutils behavior
+ output = filepath_from_subprocess_output(output)
+ status = 0
version = None
if status in ok_status:
@@ -738,8 +770,13 @@ ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
- library_dirs = quote_args(library_dirs)
- runtime_library_dirs = quote_args(runtime_library_dirs)
+ # the version of this function provided by CPython allows the following
+ # to return lists, which are unpacked automatically:
+ # - compiler.runtime_library_dir_option
+ # our version extends the behavior to:
+ # - compiler.library_dir_option
+ # - compiler.library_option
+ # - compiler.find_library_file
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
@@ -759,11 +796,6 @@ for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
-_distutils_gen_preprocess_options = gen_preprocess_options
-def gen_preprocess_options (macros, include_dirs):
- include_dirs = quote_args(include_dirs)
- return _distutils_gen_preprocess_options(macros, include_dirs)
-ccompiler.gen_preprocess_options = gen_preprocess_options
##Fix distutils.util.split_quoted:
# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index f6bd81b6c..ab9d585a5 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -4,8 +4,7 @@
from __future__ import division, absolute_import, print_function
import os
-import sys
-import shutil
+import subprocess
from glob import glob
from distutils.dep_util import newer_group
@@ -15,7 +14,7 @@ from distutils.errors import DistutilsFileError, DistutilsSetupError,\
from distutils.file_util import copy_file
from numpy.distutils import log
-from numpy.distutils.exec_command import exec_command
+from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.system_info import combine_paths, system_info
from numpy.distutils.misc_util import filter_sources, has_f_sources, \
has_cxx_sources, get_ext_source_files, \
@@ -266,10 +265,10 @@ class build_ext (old_build_ext):
# we blindly assume that both packages need all of the libraries,
# resulting in a larger wheel than is required. This should be fixed,
# but it's so rare that I won't bother to handle it.
- pkg_roots = set(
+ pkg_roots = {
self.get_ext_fullname(ext.name).split('.')[0]
for ext in self.extensions
- )
+ }
for pkg_root in pkg_roots:
shared_lib_dir = os.path.join(pkg_root, '.libs')
if not self.inplace:
@@ -558,9 +557,12 @@ class build_ext (old_build_ext):
# correct path when compiling in Cygwin but with normal Win
# Python
if dir.startswith('/usr/lib'):
- s, o = exec_command(['cygpath', '-w', dir], use_tee=False)
- if not s:
- dir = o
+ try:
+ dir = subprocess.check_output(['cygpath', '-w', dir])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ dir = filepath_from_subprocess_output(dir)
f_lib_dirs.append(dir)
c_library_dirs.extend(f_lib_dirs)
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
index 9def37822..668bc23fe 100644
--- a/numpy/distutils/command/build_src.py
+++ b/numpy/distutils/command/build_src.py
@@ -204,7 +204,6 @@ class build_src(build_ext.build_ext):
def _build_npy_pkg_config(self, info, gd):
- import shutil
template, install_dir, subst_dict = info
template_dir = os.path.dirname(template)
for k, v in gd.items():
@@ -239,7 +238,6 @@ class build_src(build_ext.build_ext):
if not install_cmd.finalized == 1:
install_cmd.finalize_options()
build_npkg = False
- gd = {}
if self.inplace == 1:
top_prefix = '.'
build_npkg = True
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index 47bc496cf..d9b1e8488 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -7,6 +7,7 @@ from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
+import subprocess
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
@@ -14,7 +15,7 @@ from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
-from numpy.distutils.exec_command import exec_command
+from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_gcc_variable_attribute,
@@ -94,7 +95,7 @@ Original exception was: %s, and the Compiler class was %s
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
- msg = str(get_exception())
+ str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
@@ -121,9 +122,13 @@ Original exception was: %s, and the Compiler class was %s
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
- s, o = exec_command(['cygpath', '-w', d],
- use_tee=False)
- if not s: d = o
+ try:
+ d = subprocess.check_output(['cygpath',
+ '-w', d])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ d = filepath_from_subprocess_output(d)
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
@@ -436,7 +441,6 @@ int main (void)
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning, stacklevel=2)
- from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
@@ -450,8 +454,24 @@ int main (void)
grabber.restore()
raise
exe = os.path.join('.', exe)
- exitstatus, output = exec_command(exe, execute_in='.',
- use_tee=use_tee)
+ try:
+ # specify cwd arg for consistency with
+ # historic usage pattern of exec_command()
+ # also, note that exe appears to be a string,
+ # which exec_command() handled, but we now
+ # use a list for check_output() -- this assumes
+ # that exe is always a single command
+ output = subprocess.check_output([exe], cwd='.')
+ except subprocess.CalledProcessError as exc:
+ exitstatus = exc.returncode
+ output = ''
+ except OSError:
+ # preserve the EnvironmentError exit status
+ # used historically in exec_command()
+ exitstatus = 127
+ output = ''
+ else:
+ output = filepath_from_subprocess_output(output)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
diff --git a/numpy/distutils/command/config_compiler.py b/numpy/distutils/command/config_compiler.py
index 5e638fecc..bf170063e 100644
--- a/numpy/distutils/command/config_compiler.py
+++ b/numpy/distutils/command/config_compiler.py
@@ -5,9 +5,12 @@ from numpy.distutils import log
#XXX: Linker flags
-def show_fortran_compilers(_cache=[]):
- # Using cache to prevent infinite recursion
- if _cache: return
+def show_fortran_compilers(_cache=None):
+ # Using cache to prevent infinite recursion.
+ if _cache:
+ return
+ elif _cache is None:
+ _cache = []
_cache.append(1)
from numpy.distutils.fcompiler import show_fcompilers
import distutils.core
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index 4a8746236..b33e315b4 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -206,10 +206,8 @@ def parse_loop_header(loophead) :
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
- for i in range(nsub) :
- tmp = {}
- for name, vals in names :
- tmp[name] = vals[i]
+ for i in range(nsub):
+ tmp = {name: vals[i] for name, vals in names}
dlist.append(tmp)
return dlist
diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py
index d9e125368..70cc37caa 100644
--- a/numpy/distutils/core.py
+++ b/numpy/distutils/core.py
@@ -71,12 +71,14 @@ def _dict_append(d, **kws):
else:
raise TypeError(repr(type(dv)))
-def _command_line_ok(_cache=[]):
+def _command_line_ok(_cache=None):
""" Return True if command line does not contain any
help or display requests.
"""
if _cache:
return _cache[0]
+ elif _cache is None:
+ _cache = []
ok = True
display_opts = ['--'+n for n in Distribution.display_option_names]
for o in Distribution.display_options:
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index 8118e2fc3..ede347b03 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -61,6 +61,49 @@ import locale
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
+def filepath_from_subprocess_output(output):
+ """
+ Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
+
+ Inherited from `exec_command`, and possibly incorrect.
+ """
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ output = output.decode(mylocale, errors='replace')
+ output = output.replace('\r\n', '\n')
+ # Another historical oddity
+ if output[-1:] == '\n':
+ output = output[:-1]
+ # stdio uses bytes in python 2, so to avoid issues, we simply
+ # remove all non-ascii characters
+ if sys.version_info < (3, 0):
+ output = output.encode('ascii', errors='replace')
+ return output
+
+
+def forward_bytes_to_stdout(val):
+ """
+ Forward bytes from a subprocess call to the console, without attempting to
+ decode them.
+
+ The assumption is that the subprocess call already returned bytes in
+ a suitable encoding.
+ """
+ if sys.version_info.major < 3:
+ # python 2 has binary output anyway
+ sys.stdout.write(val)
+ elif hasattr(sys.stdout, 'buffer'):
+ # use the underlying binary output if there is one
+ sys.stdout.buffer.write(val)
+ elif hasattr(sys.stdout, 'encoding'):
+ # round-trip the encoding if necessary
+ sys.stdout.write(val.decode(sys.stdout.encoding))
+ else:
+ # make a best-guess at the encoding
+ sys.stdout.write(val.decode('utf8', errors='replace'))
+
+
def temp_file_name():
fo, name = make_temp_file()
fo.close()
@@ -128,9 +171,7 @@ def find_executable(exe, path=None, _cache={}):
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
- env = {}
- for name in names:
- env[name] = os.environ.get(name)
+ env = {name: os.environ.get(name) for name in names}
return env
def _update_environment( **env ):
@@ -260,9 +301,10 @@ def _exec_command(command, use_shell=None, use_tee = None, **env):
return 127, ''
text, err = proc.communicate()
- text = text.decode(locale.getpreferredencoding(False),
- errors='replace')
-
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ text = text.decode(mylocale, errors='replace')
text = text.replace('\r\n', '\n')
# Another historical oddity
if text[-1:] == '\n':
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index c926e7378..12b32832e 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -22,6 +22,7 @@ import os
import sys
import re
import types
+import shlex
from numpy.compat import open_latin1
@@ -35,10 +36,11 @@ from numpy.distutils.ccompiler import CCompiler, gen_lib_options
from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
-from numpy.distutils.environment import EnvironmentConfig
from numpy.distutils.exec_command import find_executable
from numpy.distutils.compat import get_exception
+from .environment import EnvironmentConfig
+
__metaclass__ = type
class CompilerNotFound(Exception):
@@ -91,7 +93,7 @@ class FCompiler(CCompiler):
# These are the environment variables and distutils keys used.
# Each configuration description is
- # (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>)
+ # (<hook name>, <environment variable>, <key in distutils.cfg>, <convert>, <append>)
# The hook names are handled by the self._environment_hook method.
# - names starting with 'self.' call methods in this class
# - names starting with 'exe.' return the key in the executables dict
@@ -101,43 +103,43 @@ class FCompiler(CCompiler):
distutils_vars = EnvironmentConfig(
distutils_section='config_fc',
- noopt = (None, None, 'noopt', str2bool),
- noarch = (None, None, 'noarch', str2bool),
- debug = (None, None, 'debug', str2bool),
- verbose = (None, None, 'verbose', str2bool),
+ noopt = (None, None, 'noopt', str2bool, False),
+ noarch = (None, None, 'noarch', str2bool, False),
+ debug = (None, None, 'debug', str2bool, False),
+ verbose = (None, None, 'verbose', str2bool, False),
)
command_vars = EnvironmentConfig(
distutils_section='config_fc',
- compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None),
- compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None),
- compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None),
- version_cmd = ('exe.version_cmd', None, None, None),
- linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None),
- linker_exe = ('exe.linker_exe', 'LD', 'ld', None),
- archiver = (None, 'AR', 'ar', None),
- ranlib = (None, 'RANLIB', 'ranlib', None),
+ compiler_f77 = ('exe.compiler_f77', 'F77', 'f77exec', None, False),
+ compiler_f90 = ('exe.compiler_f90', 'F90', 'f90exec', None, False),
+ compiler_fix = ('exe.compiler_fix', 'F90', 'f90exec', None, False),
+ version_cmd = ('exe.version_cmd', None, None, None, False),
+ linker_so = ('exe.linker_so', 'LDSHARED', 'ldshared', None, False),
+ linker_exe = ('exe.linker_exe', 'LD', 'ld', None, False),
+ archiver = (None, 'AR', 'ar', None, False),
+ ranlib = (None, 'RANLIB', 'ranlib', None, False),
)
flag_vars = EnvironmentConfig(
distutils_section='config_fc',
- f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist),
- f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist),
- free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist),
- fix = ('flags.fix', None, None, flaglist),
- opt = ('flags.opt', 'FOPT', 'opt', flaglist),
- opt_f77 = ('flags.opt_f77', None, None, flaglist),
- opt_f90 = ('flags.opt_f90', None, None, flaglist),
- arch = ('flags.arch', 'FARCH', 'arch', flaglist),
- arch_f77 = ('flags.arch_f77', None, None, flaglist),
- arch_f90 = ('flags.arch_f90', None, None, flaglist),
- debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist),
- debug_f77 = ('flags.debug_f77', None, None, flaglist),
- debug_f90 = ('flags.debug_f90', None, None, flaglist),
- flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist),
- linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist),
- linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist),
- ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist),
+ f77 = ('flags.f77', 'F77FLAGS', 'f77flags', flaglist, True),
+ f90 = ('flags.f90', 'F90FLAGS', 'f90flags', flaglist, True),
+ free = ('flags.free', 'FREEFLAGS', 'freeflags', flaglist, True),
+ fix = ('flags.fix', None, None, flaglist, False),
+ opt = ('flags.opt', 'FOPT', 'opt', flaglist, True),
+ opt_f77 = ('flags.opt_f77', None, None, flaglist, False),
+ opt_f90 = ('flags.opt_f90', None, None, flaglist, False),
+ arch = ('flags.arch', 'FARCH', 'arch', flaglist, False),
+ arch_f77 = ('flags.arch_f77', None, None, flaglist, False),
+ arch_f90 = ('flags.arch_f90', None, None, flaglist, False),
+ debug = ('flags.debug', 'FDEBUG', 'fdebug', flaglist, True),
+ debug_f77 = ('flags.debug_f77', None, None, flaglist, False),
+ debug_f90 = ('flags.debug_f90', None, None, flaglist, False),
+ flags = ('self.get_flags', 'FFLAGS', 'fflags', flaglist, True),
+ linker_so = ('flags.linker_so', 'LDFLAGS', 'ldflags', flaglist, True),
+ linker_exe = ('flags.linker_exe', 'LDFLAGS', 'ldflags', flaglist, True),
+ ar = ('flags.ar', 'ARFLAGS', 'arflags', flaglist, True),
)
language_map = {'.f': 'f77',
@@ -464,8 +466,10 @@ class FCompiler(CCompiler):
noarch = self.distutils_vars.get('noarch', noopt)
debug = self.distutils_vars.get('debug', False)
- f77 = self.command_vars.compiler_f77
- f90 = self.command_vars.compiler_f90
+ f77 = shlex.split(self.command_vars.compiler_f77,
+ posix=(os.name == 'posix'))
+ f90 = shlex.split(self.command_vars.compiler_f90,
+ posix=(os.name == 'posix'))
f77flags = []
f90flags = []
@@ -479,6 +483,14 @@ class FCompiler(CCompiler):
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
+ # NOTE: this and similar examples are probably just
+ # exluding --coverage flag when F90 = gfortran --coverage
+ # instead of putting that flag somewhere more appropriate
+ # this and similar examples where a Fortran compiler
+ # environment variable has been customized by CI or a user
+ # should perhaps eventually be more throughly tested and more
+ # robustly handled
+ fix = shlex.split(fix, posix=(os.name == 'posix'))
if fix:
fixflags = self.flag_vars.fix + f90flags
@@ -505,11 +517,11 @@ class FCompiler(CCompiler):
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
- self.set_commands(compiler_f77=[f77]+f77flags+fflags)
+ self.set_commands(compiler_f77=f77+f77flags+fflags)
if f90:
- self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags)
+ self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
if fix:
- self.set_commands(compiler_fix=[fix]+fixflags+fflags)
+ self.set_commands(compiler_fix=fix+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
diff --git a/numpy/distutils/environment.py b/numpy/distutils/fcompiler/environment.py
index 3798e16f5..489784580 100644
--- a/numpy/distutils/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -14,7 +14,7 @@ class EnvironmentConfig(object):
def dump_variable(self, name):
conf_desc = self._conf_keys[name]
- hook, envvar, confvar, convert = conf_desc
+ hook, envvar, confvar, convert, append = conf_desc
if not convert:
convert = lambda x : x
print('%s.%s:' % (self._distutils_section, name))
@@ -49,10 +49,15 @@ class EnvironmentConfig(object):
return var
def _get_var(self, name, conf_desc):
- hook, envvar, confvar, convert = conf_desc
+ hook, envvar, confvar, convert, append = conf_desc
var = self._hook_handler(name, hook)
if envvar is not None:
- var = os.environ.get(envvar, var)
+ envvar_contents = os.environ.get(envvar)
+ if envvar_contents is not None:
+ if var and append and os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
+ var = var + [envvar_contents]
+ else:
+ var = envvar_contents
if confvar is not None and self._conf:
var = self._conf.get(confvar, (None, var))[1]
if convert is not None:
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 0ebbe79dc..81769e562 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -8,10 +8,10 @@ import platform
import tempfile
import hashlib
import base64
+import subprocess
from subprocess import Popen, PIPE, STDOUT
-from copy import copy
+from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.fcompiler import FCompiler
-from numpy.distutils.exec_command import exec_command
from numpy.distutils.compat import get_exception
from numpy.distutils.system_info import system_info
@@ -160,9 +160,13 @@ class GnuFCompiler(FCompiler):
return opt
def get_libgcc_dir(self):
- status, output = exec_command(
- self.compiler_f77 + ['-print-libgcc-file-name'], use_tee=0)
- if not status:
+ try:
+ output = subprocess.check_output(self.compiler_f77 +
+ ['-print-libgcc-file-name'])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
return os.path.dirname(output)
return None
@@ -177,9 +181,13 @@ class GnuFCompiler(FCompiler):
libgfortran_dir = None
if libgfortran_name:
find_lib_arg = ['-print-file-name={0}'.format(libgfortran_name)]
- status, output = exec_command(
- self.compiler_f77 + find_lib_arg, use_tee=0)
- if not status:
+ try:
+ output = subprocess.check_output(
+ self.compiler_f77 + find_lib_arg)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
libgfortran_dir = os.path.dirname(output)
return libgfortran_dir
@@ -257,6 +265,10 @@ class GnuFCompiler(FCompiler):
return []
def runtime_library_dir_option(self, dir):
+ if sys.platform[:3] == 'aix' or sys.platform == 'win32':
+ # Linux/Solaris/Unix support RPATH, Windows and AIX do not
+ raise NotImplementedError
+
sep = ',' if sys.platform == 'darwin' else '='
return '-Wl,-rpath%s"%s"' % (sep, dir)
@@ -303,6 +315,12 @@ class Gnu95FCompiler(GnuFCompiler):
module_dir_switch = '-J'
module_include_switch = '-I'
+ if sys.platform[:3] == 'aix':
+ executables['linker_so'].append('-lpthread')
+ if platform.architecture()[0][:2] == '64':
+ for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
+ executables[key].append('-maix64')
+
g2c = 'gfortran'
def _universal_flags(self, cmd):
@@ -373,8 +391,12 @@ class Gnu95FCompiler(GnuFCompiler):
return opt
def get_target(self):
- status, output = exec_command(self.compiler_f77 + ['-v'], use_tee=0)
- if not status:
+ try:
+ output = subprocess.check_output(self.compiler_f77 + ['-v'])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ output = filepath_from_subprocess_output(output)
m = TARGET_R.search(output)
if m:
return m.group(1)
diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py
index d0c2202d4..c4cb2fca7 100644
--- a/numpy/distutils/fcompiler/ibm.py
+++ b/numpy/distutils/fcompiler/ibm.py
@@ -3,9 +3,10 @@ from __future__ import division, absolute_import, print_function
import os
import re
import sys
+import subprocess
from numpy.distutils.fcompiler import FCompiler
-from numpy.distutils.exec_command import exec_command, find_executable
+from numpy.distutils.exec_command import find_executable
from numpy.distutils.misc_util import make_temp_file
from distutils import log
@@ -35,9 +36,13 @@ class IBMFCompiler(FCompiler):
lslpp = find_executable('lslpp')
xlf = find_executable('xlf')
if os.path.exists(xlf) and os.path.exists(lslpp):
- s, o = exec_command(lslpp + ' -Lc xlfcmp')
- m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
- if m: version = m.group('version')
+ try:
+ o = subprocess.check_output([lslpp, '-Lc', 'xlfcmp'])
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ m = re.search(r'xlfcmp:(?P<version>\d+([.]\d+)+)', o)
+ if m: version = m.group('version')
xlf_dir = '/etc/opt/ibmcmp/xlf'
if version is None and os.path.isdir(xlf_dir):
diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
index e6c816baa..99071800a 100644
--- a/numpy/distutils/fcompiler/pg.py
+++ b/numpy/distutils/fcompiler/pg.py
@@ -2,7 +2,6 @@
from __future__ import division, absolute_import, print_function
import sys
-import os
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
from sys import platform
@@ -62,8 +61,6 @@ class PGroupFCompiler(FCompiler):
if sys.version_info >= (3, 5):
- import subprocess
- import shlex
import functools
class PGroupFlangCompiler(FCompiler):
diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py
index 0a5364566..2d013a1e3 100644
--- a/numpy/distutils/lib2def.py
+++ b/numpy/distutils/lib2def.py
@@ -2,7 +2,6 @@ from __future__ import division, absolute_import, print_function
import re
import sys
-import os
import subprocess
__doc__ = """This module generates a DEF file from the symbols in
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index e7fa7bc0d..e6bbe1996 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -71,7 +71,6 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
- import re
p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 091114f98..67a5f7234 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -13,7 +13,6 @@ import multiprocessing
import distutils
from distutils.errors import DistutilsError
-from distutils.msvccompiler import get_build_architecture
try:
from threading import local as tlocal
except ImportError:
@@ -260,7 +259,7 @@ def minrelpath(path):
return os.sep.join(l)
def sorted_glob(fileglob):
- """sorts output of python glob for http://bugs.python.org/issue30461
+ """sorts output of python glob for https://bugs.python.org/issue30461
to allow extensions to have reproducible build results"""
return sorted(glob.glob(fileglob))
@@ -320,7 +319,7 @@ def make_temp_file(suffix='', prefix='', text=True):
return fo, name
# Hooks for colored terminal output.
-# See also http://www.livinglogic.de/Python/ansistyle
+# See also https://web.archive.org/web/20100314204946/http://www.livinglogic.de/Python/ansistyle
def terminal_has_colors():
if sys.platform=='cygwin' and 'USE_COLOR' not in os.environ:
# Avoid importing curses that causes illegal operation
@@ -1567,7 +1566,6 @@ class Configuration(object):
"""Common implementation for add_library and add_installed_library. Do
not use directly"""
build_info = copy.copy(build_info)
- name = name #+ '__OF__' + self.name
build_info['sources'] = sources
# Sometimes, depends is not set up to an empty list by default, and if
@@ -1693,7 +1691,6 @@ class Configuration(object):
"""
if subst_dict is None:
subst_dict = {}
- basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
@@ -2012,7 +2009,6 @@ class Configuration(object):
f.write('version = %r\n' % (version))
f.close()
- import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
@@ -2054,7 +2050,6 @@ class Configuration(object):
f.write('version = %r\n' % (version))
f.close()
- import atexit
def rm_file(f=target,p=self.info):
if delete:
try: os.remove(f); p('removed '+f)
@@ -2229,7 +2224,6 @@ def is_bootstrapping():
return True
except AttributeError:
return False
- __NUMPY_SETUP__ = False
#########################
@@ -2339,3 +2333,9 @@ def msvc_version(compiler):
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
+
+def get_build_architecture():
+ # Importing distutils.msvccompiler triggers a warning on non-Windows
+ # systems, so delay the import to here.
+ from distutils.msvccompiler import get_build_architecture
+ return get_build_architecture()
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 6fe517659..bfe8b9f77 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -5,9 +5,9 @@ import re
import os
if sys.version_info[0] < 3:
- from ConfigParser import RawConfigParser, NoOptionError
+ from ConfigParser import RawConfigParser
else:
- from configparser import RawConfigParser, NoOptionError
+ from configparser import RawConfigParser
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
@@ -222,9 +222,7 @@ def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
- d = {}
- for name, value in config.items('meta'):
- d[name] = value
+ d = dict(config.items('meta'))
for k in ['name', 'description', 'version']:
if not k in d:
@@ -414,7 +412,6 @@ if __name__ == '__main__':
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
- import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 65d7de316..cd63cc849 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -126,7 +126,6 @@ import os
import re
import copy
import warnings
-import atexit
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
@@ -147,7 +146,8 @@ from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import (
- find_executable, exec_command, get_pythonexe)
+ find_executable, filepath_from_subprocess_output,
+ get_pythonexe)
from numpy.distutils.misc_util import (is_sequence, is_string,
get_shared_lib_extension)
from numpy.distutils.command.config import config as cmd_config
@@ -487,7 +487,7 @@ class FFTWNotFoundError(NotFoundError):
class DJBFFTNotFoundError(NotFoundError):
"""
- DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
+ DJBFFT (https://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
@@ -495,7 +495,7 @@ class DJBFFTNotFoundError(NotFoundError):
class NumericNotFoundError(NotFoundError):
"""
- Numeric (http://www.numpy.org/) module not found.
+ Numeric (https://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
@@ -505,7 +505,7 @@ class X11NotFoundError(NotFoundError):
class UmfpackNotFoundError(NotFoundError):
"""
- UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)
+ UMFPACK sparse solver (https://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
@@ -1124,8 +1124,9 @@ class atlas_info(system_info):
lapack = None
atlas_1 = None
for d in lib_dirs:
- atlas = self.check_libs2(d, atlas_libs, [])
+ # FIXME: lapack_atlas is unused
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
+ atlas = self.check_libs2(d, atlas_libs, [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
@@ -2107,17 +2108,17 @@ class numerix_info(system_info):
if which[0] is None:
which = "numpy", "defaulted"
try:
- import numpy
+ import numpy # noqa: F401
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
- import Numeric
+ import Numeric # noqa: F401
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
- import numarray
+ import numarray # noqa: F401
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
@@ -2243,8 +2244,12 @@ class _pkg_config_info(system_info):
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
- s, o = exec_command(cmd, use_tee=0)
- if not s:
+ try:
+ o = subprocess.check_output(cmd)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ o = filepath_from_subprocess_output(o)
return o
def calc_info(self):
@@ -2433,7 +2438,6 @@ class umfpack_info(system_info):
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
- amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
@@ -2529,6 +2533,7 @@ def show_all(argv=None):
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
+ # FIXME: r not used
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py
new file mode 100644
index 000000000..95e44b051
--- /dev/null
+++ b/numpy/distutils/tests/test_fcompiler.py
@@ -0,0 +1,44 @@
+from __future__ import division, absolute_import, print_function
+
+from numpy.testing import assert_
+import numpy.distutils.fcompiler
+
+customizable_flags = [
+ ('f77', 'F77FLAGS'),
+ ('f90', 'F90FLAGS'),
+ ('free', 'FREEFLAGS'),
+ ('arch', 'FARCH'),
+ ('debug', 'FDEBUG'),
+ ('flags', 'FFLAGS'),
+ ('linker_so', 'LDFLAGS'),
+]
+
+
+def test_fcompiler_flags(monkeypatch):
+ monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '0')
+ fc = numpy.distutils.fcompiler.new_fcompiler(compiler='none')
+ flag_vars = fc.flag_vars.clone(lambda *args, **kwargs: None)
+
+ for opt, envvar in customizable_flags:
+ new_flag = '-dummy-{}-flag'.format(opt)
+ prev_flags = getattr(flag_vars, opt)
+
+ monkeypatch.setenv(envvar, new_flag)
+ new_flags = getattr(flag_vars, opt)
+ monkeypatch.delenv(envvar)
+ assert_(new_flags == [new_flag])
+
+ monkeypatch.setenv('NPY_DISTUTILS_APPEND_FLAGS', '1')
+
+ for opt, envvar in customizable_flags:
+ new_flag = '-dummy-{}-flag'.format(opt)
+ prev_flags = getattr(flag_vars, opt)
+
+ monkeypatch.setenv(envvar, new_flag)
+ new_flags = getattr(flag_vars, opt)
+ monkeypatch.delenv(envvar)
+ if prev_flags is None:
+ assert_(new_flags == [new_flag])
+ else:
+ assert_(new_flags == prev_flags + [new_flag])
+
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index 2a3294ddf..3e239cf48 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -79,3 +79,6 @@ def test_installed_npymath_ini():
# Regression test for gh-7707. If npymath.ini wasn't installed, then this
# will give an error.
info = get_info('npymath')
+
+ assert isinstance(info, dict)
+ assert "define_macros" in info
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 4d3ab046e..c87a40ccd 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -9,39 +9,157 @@ Array types and conversions between types
NumPy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
-============ ==========================================================
-Data type Description
-============ ==========================================================
-``bool_`` Boolean (True or False) stored as a byte
-``int_`` Default integer type (same as C ``long``; normally either
- ``int64`` or ``int32``)
-intc Identical to C ``int`` (normally ``int32`` or ``int64``)
-intp Integer used for indexing (same as C ``ssize_t``; normally
- either ``int32`` or ``int64``)
-int8 Byte (-128 to 127)
-int16 Integer (-32768 to 32767)
-int32 Integer (-2147483648 to 2147483647)
-int64 Integer (-9223372036854775808 to 9223372036854775807)
-uint8 Unsigned integer (0 to 255)
-uint16 Unsigned integer (0 to 65535)
-uint32 Unsigned integer (0 to 4294967295)
-uint64 Unsigned integer (0 to 18446744073709551615)
-``float_`` Shorthand for ``float64``.
-float16 Half precision float: sign bit, 5 bits exponent,
- 10 bits mantissa
-float32 Single precision float: sign bit, 8 bits exponent,
- 23 bits mantissa
-float64 Double precision float: sign bit, 11 bits exponent,
- 52 bits mantissa
-``complex_`` Shorthand for ``complex128``.
-complex64 Complex number, represented by two 32-bit floats (real
- and imaginary components)
-complex128 Complex number, represented by two 64-bit floats (real
- and imaginary components)
-============ ==========================================================
-
-Additionally to ``intc`` the platform dependent C integer types ``short``,
-``long``, ``longlong`` and their unsigned versions are defined.
+The primitive types supported are tied closely to those in C:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Numpy type
+ - C type
+ - Description
+
+ * - `np.bool`
+ - ``bool``
+ - Boolean (True or False) stored as a byte
+
+ * - `np.byte`
+ - ``signed char``
+ - Platform-defined
+
+ * - `np.ubyte`
+ - ``unsigned char``
+ - Platform-defined
+
+ * - `np.short`
+ - ``short``
+ - Platform-defined
+
+ * - `np.ushort`
+ - ``unsigned short``
+ - Platform-defined
+
+ * - `np.intc`
+ - ``int``
+ - Platform-defined
+
+ * - `np.uintc`
+ - ``unsigned int``
+ - Platform-defined
+
+ * - `np.int_`
+ - ``long``
+ - Platform-defined
+
+ * - `np.uint`
+ - ``unsigned long``
+ - Platform-defined
+
+ * - `np.longlong`
+ - ``long long``
+ - Platform-defined
+
+ * - `np.ulonglong`
+ - ``unsigned long long``
+ - Platform-defined
+
+ * - `np.half` / `np.float16`
+ -
+ - Half precision float:
+ sign bit, 5 bits exponent, 10 bits mantissa
+
+ * - `np.single`
+ - ``float``
+ - Platform-defined single precision float:
+ typically sign bit, 8 bits exponent, 23 bits mantissa
+
+ * - `np.double`
+ - ``double``
+ - Platform-defined double precision float:
+ typically sign bit, 11 bits exponent, 52 bits mantissa.
+
+ * - `np.longdouble`
+ - ``long double``
+ - Platform-defined extended-precision float
+
+ * - `np.csingle`
+ - ``float complex``
+ - Complex number, represented by two single-precision floats (real and imaginary components)
+
+ * - `np.cdouble`
+ - ``double complex``
+ - Complex number, represented by two double-precision floats (real and imaginary components).
+
+ * - `np.clongdouble`
+ - ``long double complex``
+ - Complex number, represented by two extended-precision floats (real and imaginary components).
+
+
+Since many of these have platform-dependent definitions, a set of fixed-size
+aliases are provided:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Numpy type
+ - C type
+ - Description
+
+ * - `np.int8`
+ - ``int8_t``
+ - Byte (-128 to 127)
+
+ * - `np.int16`
+ - ``int16_t``
+ - Integer (-32768 to 32767)
+
+ * - `np.int32`
+ - ``int32_t``
+ - Integer (-2147483648 to 2147483647)
+
+ * - `np.int64`
+ - ``int64_t``
+ - Integer (-9223372036854775808 to 9223372036854775807)
+
+ * - `np.uint8`
+ - ``uint8_t``
+ - Unsigned integer (0 to 255)
+
+ * - `np.uint16`
+ - ``uint16_t``
+ - Unsigned integer (0 to 65535)
+
+ * - `np.uint32`
+ - ``uint32_t``
+ - Unsigned integer (0 to 4294967295)
+
+ * - `np.uint64`
+ - ``uint64_t``
+ - Unsigned integer (0 to 18446744073709551615)
+
+ * - `np.intp`
+ - ``intptr_t``
+ - Integer used for indexing, typically the same as ``ssize_t``
+
+ * - `np.uintp`
+ - ``uintptr_t``
+ - Integer large enough to hold a pointer
+
+ * - `np.float32`
+ - ``float``
+ -
+
+ * - `np.float64` / `np.float_`
+ - ``double``
+ - Note that this matches the precision of the builtin python `float`.
+
+ * - `np.complex64`
+ - ``float complex``
+ - Complex number, represented by two 32-bit floats (real and imaginary components)
+
+ * - `np.complex128` / `np.complex_`
+ - ``double complex``
+ - Note that this matches the precision of the builtin python `complex`.
+
NumPy numerical types are instances of ``dtype`` (data-type) objects, each
having unique characteristics. Once you have imported NumPy using
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
index 717914cda..0bdb6ae7d 100644
--- a/numpy/doc/broadcasting.py
+++ b/numpy/doc/broadcasting.py
@@ -3,6 +3,12 @@
Broadcasting over arrays
========================
+.. note::
+ See `this article
+ <https://numpy.org/devdocs/user/theory.broadcasting.html>`_
+ for illustrations of broadcasting concepts.
+
+
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
@@ -53,9 +59,10 @@ dimensions are compatible when
2) one of them is 1
If these conditions are not met, a
-``ValueError: frames are not aligned`` exception is thrown, indicating that
-the arrays have incompatible shapes. The size of the resulting array
-is the maximum size along each dimension of the input arrays.
+``ValueError: operands could not be broadcast together`` exception is
+thrown, indicating that the arrays have incompatible shapes. The size of
+the resulting array is the maximum size along each dimension of the input
+arrays.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
@@ -124,7 +131,7 @@ An example of broadcasting in practice::
(5,)
>>> x + y
- <type 'exceptions.ValueError'>: shape mismatch: objects cannot be broadcast to a single shape
+ ValueError: operands could not be broadcast together with shapes (4,) (5,)
>>> xx.shape
(4, 1)
@@ -171,8 +178,5 @@ Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
-See `this article <http://wiki.scipy.org/EricsBroadcastingDoc>`_
-for illustrations of broadcasting concepts.
-
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 0e1df495b..a3b9423a8 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -69,7 +69,7 @@ Glossary
micro-processors and used for transmission of data over network protocols.
BLAS
- `Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_
+ `Basic Linear Algebra Subprograms <https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
@@ -155,7 +155,7 @@ Glossary
be used as keys.
For more information on dictionaries, read the
- `Python tutorial <http://docs.python.org/tut>`_.
+ `Python tutorial <https://docs.python.org/tutorial/>`_.
field
In a :term:`structured data type`, each sub-type is called a `field`.
@@ -238,7 +238,7 @@ Glossary
[3, 4]])
For more information, read the section on lists in the `Python
- tutorial <http://docs.python.org/tut>`_. For a mapping
+ tutorial <https://docs.python.org/tutorial/>`_. For a mapping
type (key-value), see *dictionary*.
little-endian
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
index 5f5033117..087a688bc 100644
--- a/numpy/doc/indexing.py
+++ b/numpy/doc/indexing.py
@@ -93,7 +93,7 @@ well. A few examples illustrates best: ::
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
-also produce new views of the original data.
+only produce new views of the original data.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
index 24369871c..a76abe164 100644
--- a/numpy/doc/misc.py
+++ b/numpy/doc/misc.py
@@ -209,7 +209,7 @@ Only a survey of the choices. Little detail on how each works.
Interfacing to Fortran:
-----------------------
The clear choice to wrap Fortran code is
-`f2py <http://docs.scipy.org/doc/numpy/f2py/>`_.
+`f2py <https://docs.scipy.org/doc/numpy/f2py/>`_.
Pyfort is an older alternative, but not supported any longer.
Fwrap is a newer project that looked promising but isn't being developed any
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index ab97c5df6..e92a06124 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -35,26 +35,24 @@ with the field name::
array([('Rex', 5, 81.0), ('Fido', 5, 27.0)],
dtype=[('name', 'S10'), ('age', '<i4'), ('weight', '<f4')])
-Structured arrays are designed for low-level manipulation of structured data,
-for example, for interpreting binary blobs. Structured datatypes are
-designed to mimic 'structs' in the C language, making them also useful for
-interfacing with C code. For these purposes, numpy supports specialized
-features such as subarrays and nested datatypes, and allows manual control over
-the memory layout of the structure.
-
-For simple manipulation of tabular data other pydata projects, such as pandas,
-xarray, or DataArray, provide higher-level interfaces that may be more
-suitable. These projects may also give better performance for tabular data
-analysis because the C-struct-like memory layout of structured arrays can lead
-to poor cache behavior.
+Structured datatypes are designed to be able to mimic 'structs' in the C
+language, and share a similar memory layout. They are meant for interfacing with
+C code and for low-level manipulation of structured buffers, for example for
+interpreting binary blobs. For these purposes they support specialized features
+such as subarrays, nested datatypes, and unions, and allow control over the
+memory layout of the structure.
+
+Users looking to manipulate tabular data, such as stored in csv files, may find
+other pydata projects more suitable, such as xarray, pandas, or DataArray.
+These provide a high-level interface for tabular data analysis and are better
+optimized for that use. For instance, the C-struct-like memory layout of
+structured arrays in numpy can lead to poor cache behavior in comparison.
.. _defining-structured-types:
Structured Datatypes
====================
-To use structured arrays one first needs to define a structured datatype.
-
A structured datatype can be thought of as a sequence of bytes of a certain
length (the structure's :term:`itemsize`) which is interpreted as a collection
of fields. Each field has a name, a datatype, and a byte offset within the
@@ -180,7 +178,9 @@ values are tuples containing the dtype and byte offset of each field. ::
mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
Both the ``names`` and ``fields`` attributes will equal ``None`` for
-unstructured arrays.
+unstructured arrays. The recommended way to test if a dtype is structured is
+with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
+with 0 fields.
The string representation of a structured datatype is shown in the "list of
tuples" form if possible, otherwise numpy falls back to using the more general
@@ -397,6 +397,15 @@ typically a non-structured array, except in the case of nested structures.
>>> y.dtype, y.shape, y.strides
(dtype('float32'), (2,), (12,))
+If the accessed field is a subarray, the dimensions of the subarray
+are appended to the shape of the result::
+
+ >>> x = np.zeros((2,2), dtype=[('a', np.int32), ('b', np.float64, (3,3))])
+ >>> x['a'].shape
+ (2, 2)
+ >>> x['b'].shape
+ (2, 2, 3, 3)
+
Accessing Multiple Fields
```````````````````````````
@@ -404,11 +413,10 @@ One can index and assign to a structured array with a multi-field index, where
the index is a list of field names.
.. warning::
- The behavior of multi-field indexes will change from Numpy 1.15 to Numpy
- 1.16.
+ The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16.
-In Numpy 1.16, the result of indexing with a multi-field index will be a view
-into the original array, as follows::
+The result of indexing with a multi-field index is a view into the original
+array, as follows::
>>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
>>> a[['a', 'c']]
@@ -420,32 +428,58 @@ in the order they were indexed. Note that unlike for single-field indexing, the
view's dtype has the same itemsize as the original array, and has fields at the
same offsets as in the original array, and unindexed fields are merely missing.
-In Numpy 1.15, indexing an array with a multi-field index returns a copy of
-the result above for 1.16, but with fields packed together in memory as if
-passed through :func:`numpy.lib.recfunctions.repack_fields`. This is the
-behavior since Numpy 1.7.
-
.. warning::
- The new behavior in Numpy 1.16 leads to extra "padding" bytes at the
- location of unindexed fields. You will need to update any code which depends
- on the data having a "packed" layout. For instance code such as::
+ In Numpy 1.15, indexing an array with a multi-field index returned a copy of
+ the result above, but with fields packed together in memory as if
+ passed through :func:`numpy.lib.recfunctions.repack_fields`.
+
+ The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
+ location of unindexed fields compared to 1.15. You will need to update any
+ code which depends on the data having a "packed" layout. For instance code
+ such as::
+
+ >>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
+ >>> a[['a','c']].view('i8') # Fails in Numpy 1.16
+ ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
+
+ will need to be changed. This code has raised a ``FutureWarning`` since
+ Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7.
+
+ In 1.16 a number of functions have been introduced in the
+ :module:`numpy.lib.recfunctions` module to help users account for this
+ change. These are
+ :func:`numpy.lib.recfunctions.repack_fields`.
+ :func:`numpy.lib.recfunctions.structured_to_unstructured`,
+ :func:`numpy.lib.recfunctions.unstructured_to_structured`,
+ :func:`numpy.lib.recfunctions.apply_along_fields`,
+ :func:`numpy.lib.recfunctions.assign_fields_by_name`, and
+ :func:`numpy.lib.recfunctions.require_fields`.
+
+ The function :func:`numpy.lib.recfunctions.repack_fields` can always be
+ used to reproduce the old behavior, as it will return a packed copy of the
+ structured array. The code above, for example, can be replaced with:
+
+ >>> repack_fields(a[['a','c']]).view('i8') # supported in 1.16
+ array([0, 0, 0])
+
+ Furthermore, numpy now provides a new function
+ :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
+ and more efficient alternative for users who wish to convert structured
+ arrays to unstructured arrays, as the view above is often indeded to do.
+ This function allows safe conversion to an unstructured type taking into
+ account padding, often avoids a copy, and also casts the datatypes
+ as needed, unlike the view. Code such as:
- >>> a[['a','c']].view('i8') # will fail in Numpy 1.16
- ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
+ >>> a = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
+ >>> a[['x', 'z']].view('f4')
- will need to be changed. This code has raised a ``FutureWarning`` since
- Numpy 1.12.
+ can be made safer by replacing with:
- The following is a recommended fix, which will behave identically in Numpy
- 1.15 and Numpy 1.16::
+ >>> structured_to_unstructured(a[['x', 'z']])
+ array([0, 0, 0])
- >>> from numpy.lib.recfunctions import repack_fields
- >>> repack_fields(a[['a','c']]).view('i8') # supported 1.15 and 1.16
- array([0, 0, 0])
-Assigning to an array with a multi-field index will behave the same in Numpy
-1.15 and Numpy 1.16. In both versions the assignment will modify the original
-array::
+Assignment to an array with a multi-field index modifies the original array::
>>> a[['a', 'c']] = (2, 3)
>>> a
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index 3be3d94b3..4b983893a 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -108,7 +108,7 @@ A brief Python primer on ``__new__`` and ``__init__``
``__new__`` is a standard Python method, and, if present, is called
before ``__init__`` when we create a class instance. See the `python
__new__ documentation
-<http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
+<https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
For example, consider the following Python code:
diff --git a/numpy/dual.py b/numpy/dual.py
index 8b91da262..3a16a8ec5 100644
--- a/numpy/dual.py
+++ b/numpy/dual.py
@@ -7,7 +7,7 @@ developers to transparently support these accelerated functions when
scipy is available but still support users who have only installed
NumPy.
-.. _Scipy : http://www.scipy.org
+.. _Scipy : https://www.scipy.org
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index 5075c682d..23a4b7c41 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -7,6 +7,10 @@ from __future__ import division, absolute_import, print_function
__all__ = ['run_main', 'compile', 'f2py_testing']
import sys
+import subprocess
+import os
+
+import numpy as np
from . import f2py2e
from . import f2py_testing
@@ -32,8 +36,12 @@ def compile(source,
Fortran source of module / subroutine to compile
modulename : str, optional
The name of the compiled python module
- extra_args : str, optional
+ extra_args : str or list, optional
Additional parameters passed to f2py
+
+ .. versionchanged:: 1.16.0
+ A list of args may also be provided.
+
verbose : bool, optional
Print f2py output to screen
source_fn : str, optional
@@ -48,27 +56,50 @@ def compile(source,
.. versionadded:: 1.11.0
"""
- from numpy.distutils.exec_command import exec_command
import tempfile
+ import shlex
+
if source_fn is None:
- f = tempfile.NamedTemporaryFile(suffix=extension)
+ f, fname = tempfile.mkstemp(suffix=extension)
+ # f is a file descriptor so need to close it
+ # carefully -- not with .close() directly
+ os.close(f)
else:
- f = open(source_fn, 'w')
+ fname = source_fn
try:
- f.write(source)
- f.flush()
+ with open(fname, 'w') as f:
+ f.write(str(source))
+
+ args = ['-c', '-m', modulename, f.name]
+
+ if isinstance(extra_args, np.compat.basestring):
+ is_posix = (os.name == 'posix')
+ extra_args = shlex.split(extra_args, posix=is_posix)
+
+ args.extend(extra_args)
- args = ' -c -m {} {} {}'.format(modulename, f.name, extra_args)
- c = '{} -c "import numpy.f2py as f2py2e;f2py2e.main()" {}'
- c = c.format(sys.executable, args)
- status, output = exec_command(c)
+ c = [sys.executable,
+ '-c',
+ 'import numpy.f2py as f2py2e;f2py2e.main()'] + args
+ try:
+ output = subprocess.check_output(c)
+ except subprocess.CalledProcessError as exc:
+ status = exc.returncode
+ output = ''
+ except OSError:
+ # preserve historic status code used by exec_command()
+ status = 127
+ output = ''
+ else:
+ status = 0
if verbose:
print(output)
finally:
- f.close()
+ if source_fn is None:
+ os.remove(fname)
return status
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py
index cb8f261c1..708f7f362 100644
--- a/numpy/f2py/__main__.py
+++ b/numpy/f2py/__main__.py
@@ -1,27 +1,6 @@
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
-import os
-import sys
-for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
- try:
- i = sys.argv.index("--" + mode)
- del sys.argv[i]
- break
- except ValueError:
- pass
-os.environ["NO_SCIPY_IMPORT"] = "f2py"
-if mode == "g3-numpy":
- sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
- sys.exit(1)
-elif mode == "2e-numeric":
- from f2py2e import main
-elif mode == "2e-numarray":
- sys.argv.append("-DNUMARRAY")
- from f2py2e import main
-elif mode == "2e-numpy":
- from numpy.f2py import main
-else:
- sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
- sys.exit(1)
+from numpy.f2py.f2py2e import main
+
main()
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 8e63d3cff..c41dd77c6 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -718,10 +718,7 @@ def modsign2map(m):
def cb_sign2map(a, var, index=None):
ret = {'varname': a}
- if index is None or 1: # disable 7712 patch
- ret['varname_i'] = ret['varname']
- else:
- ret['varname_i'] = ret['varname'] + '_' + str(index)
+ ret['varname_i'] = ret['varname']
ret['ctype'] = getctype(var)
if ret['ctype'] in c2capi_map:
ret['atype'] = c2capi_map[ret['ctype']]
diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py
index 1940d4211..62c1ba207 100644
--- a/numpy/f2py/common_rules.py
+++ b/numpy/f2py/common_rules.py
@@ -31,11 +31,9 @@ from .crackfortran import rmbadname
def findcommonblocks(block, top=1):
ret = []
if hascommon(block):
- for n in block['common'].keys():
- vars = {}
- for v in block['common'][n]:
- vars[v] = block['vars'][v]
- ret.append((n, block['common'][n], vars))
+ for key, value in block['common'].items():
+ vars_ = {v: block['vars'][v] for v in value}
+ ret.append((key, value, vars_))
elif hasbody(block):
for b in block['body']:
ret = ret + findcommonblocks(b, 0)
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 2510e18a0..c4a650585 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -33,7 +33,7 @@ Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
- `postlist=crackfortran(files,funcs)`
+ `postlist=crackfortran(files)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
@@ -346,8 +346,6 @@ def readfortrancode(ffile, dowithline=show, istop=1):
cont = 0
finalline = ''
ll = ''
- commentline = re.compile(
- r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline = re.compile(
r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")', re.I)
cont1 = re.compile(r'(?P<line>.*)&\s*\Z')
@@ -391,17 +389,10 @@ def readfortrancode(ffile, dowithline=show, istop=1):
break
l = l[:-1]
if not strictf77:
- r = commentline.match(l)
- if r:
- l = r.group('line') + ' ' # Strip comments starting with `!'
- rl = r.group('rest')
- if rl[:4].lower() == 'f2py': # f2py directive
- l = l + 4 * ' '
- r = commentline.match(rl[4:])
- if r:
- l = l + r.group('line')
- else:
- l = l + rl[4:]
+ (l, rl) = split_by_unquoted(l, '!')
+ l += ' '
+ if rl[:5].lower() == '!f2py': # f2py directive
+ l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!')
if l.strip() == '': # Skip empty line
cont = 0
continue
@@ -618,6 +609,25 @@ multilinepattern = re.compile(
r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z", re.S), 'multiline'
##
+def split_by_unquoted(line, characters):
+ """
+ Splits the line into (line[:i], line[i:]),
+ where i is the index of first occurrence of one of the characters
+ not within quotes, or len(line) if no such index exists
+ """
+ assert not (set('"\'') & set(characters)), "cannot split by unquoted quotes"
+ r = re.compile(
+ r"\A(?P<before>({single_quoted}|{double_quoted}|{not_quoted})*)"
+ r"(?P<after>{char}.*)\Z".format(
+ not_quoted="[^\"'{}]".format(re.escape(characters)),
+ char="[{}]".format(re.escape(characters)),
+ single_quoted=r"('([^'\\]|(\\.))*')",
+ double_quoted=r'("([^"\\]|(\\.))*")'))
+ m = r.match(line)
+ if m:
+ d = m.groupdict()
+ return (d["before"], d["after"])
+ return (line, "")
def _simplifyargs(argsline):
a = []
@@ -642,12 +652,17 @@ def crackline(line, reset=0):
global filepositiontext, currentfilename, neededmodule, expectbegin
global skipblocksuntil, skipemptyends, previous_context, gotnextfile
- if ';' in line and not (f2pyenhancementspattern[0].match(line) or
- multilinepattern[0].match(line)):
- for l in line.split(';'):
- # XXX: non-zero reset values need testing
- assert reset == 0, repr(reset)
- crackline(l, reset)
+ _, has_semicolon = split_by_unquoted(line, ";")
+ if has_semicolon and not (f2pyenhancementspattern[0].match(line) or
+ multilinepattern[0].match(line)):
+ # XXX: non-zero reset values need testing
+ assert reset == 0, repr(reset)
+ # split line on unquoted semicolons
+ line, semicolon_line = split_by_unquoted(line, ";")
+ while semicolon_line:
+ crackline(line, reset)
+ line, semicolon_line = split_by_unquoted(semicolon_line[1:], ";")
+ crackline(line, reset)
return
if reset < 0:
groupcounter = 0
@@ -802,26 +817,22 @@ def markouterparen(line):
def markoutercomma(line, comma=','):
l = ''
f = 0
- cc = ''
- for c in line:
- if (not cc or cc == ')') and c == '(':
- f = f + 1
- cc = ')'
- elif not cc and c == '\'' and (not l or l[-1] != '\\'):
- f = f + 1
- cc = '\''
- elif c == cc:
- f = f - 1
- if f == 0:
- cc = ''
- elif c == comma and f == 0:
- l = l + '@' + comma + '@'
- continue
- l = l + c
- assert not f, repr((f, line, l, cc))
+ before, after = split_by_unquoted(line, comma + '()')
+ l += before
+ while after:
+ if (after[0] == comma) and (f == 0):
+ l += '@' + comma + '@'
+ else:
+ l += after[0]
+ if after[0] == '(':
+ f += 1
+ elif after[0] == ')':
+ f -= 1
+ before, after = split_by_unquoted(after[1:], comma + '()')
+ l += before
+ assert not f, repr((f, line, l))
return l
-
def unmarkouterparen(line):
r = line.replace('@(@', '(').replace('@)@', ')')
return r
@@ -1838,10 +1849,8 @@ def postcrack2(block, tab='', param_map=None):
if not f90modulevars:
return block
if isinstance(block, list):
- ret = []
- for g in block:
- g = postcrack2(g, tab=tab + '\t', param_map=param_map)
- ret.append(g)
+ ret = [postcrack2(g, tab=tab + '\t', param_map=param_map)
+ for g in block]
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
@@ -1859,10 +1868,8 @@ def postcrack2(block, tab='', param_map=None):
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
- new_body = []
- for b in block['body']:
- b = postcrack2(b, tab=tab + '\t', param_map=param_map)
- new_body.append(b)
+ new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map)
+ for b in block['body']]
block['body'] = new_body
return block
@@ -2392,7 +2399,7 @@ def _selected_real_kind_func(p, r=0, radix=0):
if p < 16:
return 8
machine = platform.machine().lower()
- if machine.startswith(('aarch64', 'power', 'ppc64', 's390x')):
+ if machine.startswith(('aarch64', 'power', 'ppc64', 's390x', 'sparc')):
if p <= 20:
return 16
else:
@@ -3200,10 +3207,8 @@ def vars2fortran(block, vars, args, tab='', as_interface=False):
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
- attr = []
- for l in vars[a]['attrspec']:
- if l not in ['external']:
- attr.append(l)
+ attr = [l for l in vars[a]['attrspec']
+ if l not in ['external']]
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
@@ -3330,7 +3335,7 @@ if __name__ == "__main__":
and also be sure that the files do not contain programs without program statement).
""", 0)
- postlist = crackfortran(files, funcs)
+ postlist = crackfortran(files)
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index 254f99966..8750ed0b3 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -644,13 +644,25 @@ def main():
from numpy.distutils.system_info import show_all
show_all()
return
+
+ # Probably outdated options that were not working before 1.16
+ if '--g3-numpy' in sys.argv[1:]:
+ sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
+ sys.exit(1)
+ elif '--2e-numeric' in sys.argv[1:]:
+ sys.argv.remove('--2e-numeric')
+ elif '--2e-numarray' in sys.argv[1:]:
+ # Note that this errors becaust the -DNUMARRAY argument is
+ # not recognized. Just here for back compatibility and the
+ # error message.
+ sys.argv.append("-DNUMARRAY")
+ sys.argv.remove('--2e-numarray')
+ elif '--2e-numpy' in sys.argv[1:]:
+ sys.argv.remove('--2e-numpy')
+ else:
+ pass
+
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
-
-# if __name__ == "__main__":
-# main()
-
-
-# EOF
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 36e2222ea..23d36b2c2 100644
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -1452,7 +1452,7 @@ def buildapi(rout):
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
- # Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
+ # Workaround for Python 2.6, 2.6.1 bug: https://bugs.python.org/issue4720
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index 73cb3b8bf..c0c50ce54 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -18,69 +18,25 @@ Pearu Peterson
"""
from __future__ import division, print_function
-__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
-
-import os
-import sys
-from distutils.dep_util import newer
-from numpy.distutils import log
from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
-from __version__ import version
-
-
-def _get_f2py_shebang():
- """ Return shebang line for f2py script
- If we are building a binary distribution format, then the shebang line
- should be ``#!python`` rather than ``#!`` followed by the contents of
- ``sys.executable``.
- """
- if set(('bdist_wheel', 'bdist_egg', 'bdist_wininst',
- 'bdist_rpm')).intersection(sys.argv):
- return '#!python'
- return '#!' + sys.executable
+from __version__ import version
def configuration(parent_package='', top_path=None):
config = Configuration('f2py', parent_package, top_path)
-
config.add_data_dir('tests')
-
- config.add_data_files('src/fortranobject.c',
- 'src/fortranobject.h',
- )
-
- config.make_svn_version_py()
-
- def generate_f2py_py(build_dir):
- f2py_exe = 'f2py' + os.path.basename(sys.executable)[6:]
- if f2py_exe[-4:] == '.exe':
- f2py_exe = f2py_exe[:-4] + '.py'
- if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py':
- f2py_exe = f2py_exe + '.py'
- target = os.path.join(build_dir, f2py_exe)
- if newer(__file__, target):
- log.info('Creating %s', target)
- f = open(target, 'w')
- f.write(_get_f2py_shebang() + '\n')
- mainloc = os.path.join(os.path.dirname(__file__), "__main__.py")
- with open(mainloc) as mf:
- f.write(mf.read())
- f.close()
- return target
-
- config.add_scripts(generate_f2py_py)
-
- log.info('F2PY Version %s', config.get_version())
-
+ config.add_data_files(
+ 'src/fortranobject.c',
+ 'src/fortranobject.h')
return config
+
if __name__ == "__main__":
config = configuration(top_path='')
- print('F2PY Version', version)
config = config.todict()
config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
diff --git a/numpy/f2py/src/test/foomodule.c b/numpy/f2py/src/test/foomodule.c
index d7ecc2519..733fab0be 100644
--- a/numpy/f2py/src/test/foomodule.c
+++ b/numpy/f2py/src/test/foomodule.c
@@ -116,8 +116,6 @@ static PyMethodDef foo_module_methods[] = {
void initfoo() {
int i;
PyObject *m, *d, *s;
- PyTypeObject *t;
- PyObject *f;
import_array();
m = Py_InitModule("foo", foo_module_methods);
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index 8b021491f..a80090185 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -1,14 +1,14 @@
from __future__ import division, absolute_import, print_function
-import unittest
import os
import sys
import copy
+import pytest
from numpy import (
array, alltrue, ndarray, zeros, dtype, intp, clongdouble
)
-from numpy.testing import assert_, assert_equal, SkipTest
+from numpy.testing import assert_, assert_equal
from numpy.core.multiarray import typeinfo
from . import util
@@ -24,7 +24,7 @@ def setup_module():
# Check compiler availability first
if not util.has_c_compiler():
- raise SkipTest("No C compiler available")
+ pytest.skip("No C compiler available")
if wrap is None:
config_code = """
@@ -304,10 +304,16 @@ class TestIntent(object):
assert_(not intent.in_.is_intent('c'))
-class _test_shared_memory(object):
+class TestSharedMemory(object):
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
+ @pytest.fixture(autouse=True, scope='class', params=_type_names)
+ def setup_type(self, request):
+ request.cls.type = Type(request.param)
+ request.cls.array = lambda self, dims, intent, obj: \
+ Array(Type(request.param), dims, intent, obj)
+
def test_in_from_2seq(self):
a = self.array([2], intent.in_, self.num2seq)
assert_(not a.has_shared_memory())
@@ -573,12 +579,3 @@ class _test_shared_memory(object):
assert_(obj.flags['FORTRAN']) # obj attributes changed inplace!
assert_(not obj.flags['CONTIGUOUS'])
assert_(obj.dtype.type is self.type.dtype) # obj changed inplace!
-
-
-for t in _type_names:
- exec('''\
-class TestGen_%s(_test_shared_memory):
- def setup(self):
- self.type = Type(%r)
- array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj)
-''' % (t, t, t))
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
index b2981aa82..8fc072a5e 100644
--- a/numpy/f2py/tests/test_block_docstring.py
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import textwrap
import sys
import pytest
from . import util
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 4e74947b0..824ef7b0c 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -62,9 +62,9 @@ cf2py intent(out) a
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t,t2".split(","):
- self.check_function(name)
+ @pytest.mark.parametrize('name', 't,t2'.split(','))
+ def test_all(self, name):
+ self.check_function(name)
@pytest.mark.slow
def test_docstring(self):
diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py
new file mode 100644
index 000000000..74e0804e2
--- /dev/null
+++ b/numpy/f2py/tests/test_compile_function.py
@@ -0,0 +1,108 @@
+"""See https://github.com/numpy/numpy/pull/11937.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+import os
+import uuid
+from importlib import import_module
+import pytest
+
+import numpy.f2py
+
+from numpy.testing import assert_equal
+from . import util
+
+
+def setup_module():
+ if sys.platform == 'win32' and sys.version_info[0] < 3:
+ pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
+ if not util.has_c_compiler():
+ pytest.skip("Needs C compiler")
+ if not util.has_f77_compiler():
+ pytest.skip('Needs FORTRAN 77 compiler')
+
+
+# extra_args can be a list (since gh-11937) or string.
+# also test absence of extra_args
+@pytest.mark.parametrize(
+ "extra_args", [['--noopt', '--debug'], '--noopt --debug', '']
+ )
+def test_f2py_init_compile(extra_args):
+ # flush through the f2py __init__ compile() function code path as a
+ # crude test for input handling following migration from
+ # exec_command() to subprocess.check_output() in gh-11937
+
+ # the Fortran 77 syntax requires 6 spaces before any commands, but
+ # more space may be added/
+ fsource = """
+ integer function foo()
+ foo = 10 + 5
+ return
+ end
+ """
+ # use various helper functions in util.py to enable robust build /
+ # compile and reimport cycle in test suite
+ moddir = util.get_module_dir()
+ modname = util.get_temp_module_name()
+
+ cwd = os.getcwd()
+ target = os.path.join(moddir, str(uuid.uuid4()) + '.f')
+ # try running compile() with and without a source_fn provided so
+ # that the code path where a temporary file for writing Fortran
+ # source is created is also explored
+ for source_fn in [target, None]:
+ # mimic the path changing behavior used by build_module() in
+ # util.py, but don't actually use build_module() because it has
+ # its own invocation of subprocess that circumvents the
+ # f2py.compile code block under test
+ try:
+ os.chdir(moddir)
+ ret_val = numpy.f2py.compile(
+ fsource,
+ modulename=modname,
+ extra_args=extra_args,
+ source_fn=source_fn
+ )
+ finally:
+ os.chdir(cwd)
+
+ # check for compile success return value
+ assert_equal(ret_val, 0)
+
+ # we are not currently able to import the Python-Fortran
+ # interface module on Windows / Appveyor, even though we do get
+ # successful compilation on that platform with Python 3.x
+ if sys.platform != 'win32':
+ # check for sensible result of Fortran function; that means
+ # we can import the module name in Python and retrieve the
+ # result of the sum operation
+ return_check = import_module(modname)
+ calc_result = return_check.foo()
+ assert_equal(calc_result, 15)
+
+
+def test_f2py_init_compile_failure():
+ # verify an appropriate integer status value returned by
+ # f2py.compile() when invalid Fortran is provided
+ ret_val = numpy.f2py.compile(b"invalid")
+ assert_equal(ret_val, 1)
+
+
+def test_f2py_init_compile_bad_cmd():
+ # verify that usage of invalid command in f2py.compile() returns
+ # status value of 127 for historic consistency with exec_command()
+ # error handling
+
+ # patch the sys Python exe path temporarily to induce an OSError
+ # downstream NOTE: how bad of an idea is this patching?
+ try:
+ temp = sys.executable
+ sys.executable = 'does not exist'
+
+ # the OSError should take precedence over invalid Fortran
+ ret_val = numpy.f2py.compile(b"invalid")
+ assert_equal(ret_val, 127)
+ finally:
+ sys.executable = temp
diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py
index a0bbd9460..6a378687a 100644
--- a/numpy/f2py/tests/test_parameter.py
+++ b/numpy/f2py/tests/test_parameter.py
@@ -1,7 +1,6 @@
from __future__ import division, absolute_import, print_function
import os
-import math
import pytest
import numpy as np
diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py
new file mode 100644
index 000000000..c9a1c36f5
--- /dev/null
+++ b/numpy/f2py/tests/test_quoted_character.py
@@ -0,0 +1,35 @@
+"""See https://github.com/numpy/numpy/pull/10676.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import sys
+from importlib import import_module
+import pytest
+
+from numpy.testing import assert_equal
+from . import util
+
+
+class TestQuotedCharacter(util.F2PyTest):
+ code = """
+ SUBROUTINE FOO(OUT1, OUT2, OUT3, OUT4, OUT5, OUT6)
+ CHARACTER SINGLE, DOUBLE, SEMICOL, EXCLA, OPENPAR, CLOSEPAR
+ PARAMETER (SINGLE="'", DOUBLE='"', SEMICOL=';', EXCLA="!",
+ 1 OPENPAR="(", CLOSEPAR=")")
+ CHARACTER OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
+Cf2py intent(out) OUT1, OUT2, OUT3, OUT4, OUT5, OUT6
+ OUT1 = SINGLE
+ OUT2 = DOUBLE
+ OUT3 = SEMICOL
+ OUT4 = EXCLA
+ OUT5 = OPENPAR
+ OUT6 = CLOSEPAR
+ RETURN
+ END
+ """
+
+ @pytest.mark.skipif(sys.platform=='win32',
+ reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ def test_quoted_character(self):
+ assert_equal(self.module.foo(), (b"'", b'"', b';', b'!', b'(', b')'))
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
index d695de61b..3adae635d 100644
--- a/numpy/f2py/tests/test_regression.py
+++ b/numpy/f2py/tests/test_regression.py
@@ -1,7 +1,6 @@
from __future__ import division, absolute_import, print_function
import os
-import math
import pytest
import numpy as np
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 4a94c5b98..fc3a58d36 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -82,9 +82,9 @@ cf2py intent(out) ts
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t1,t5,s0,s1,s5,ss".split(","):
- self.check_function(getattr(self.module, name))
+ @pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module, name))
class TestF90ReturnCharacter(TestReturnCharacter):
@@ -141,6 +141,6 @@ end module f90_return_char
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t1,t5,ts,s0,s1,s5,ss".split(","):
- self.check_function(getattr(self.module.f90_return_char, name))
+ @pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module.f90_return_char, name))
diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py
index 152cfc960..43c884dfb 100644
--- a/numpy/f2py/tests/test_return_complex.py
+++ b/numpy/f2py/tests/test_return_complex.py
@@ -105,9 +105,9 @@ cf2py intent(out) td
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","):
- self.check_function(getattr(self.module, name))
+ @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module, name))
class TestF90ReturnComplex(TestReturnComplex):
@@ -164,6 +164,6 @@ end module f90_return_complex
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t8,t16,td,s0,s8,s16,sd".split(","):
- self.check_function(getattr(self.module.f90_return_complex, name))
+ @pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module.f90_return_complex, name))
diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py
index 7a4b07c4f..22f4acfdf 100644
--- a/numpy/f2py/tests/test_return_integer.py
+++ b/numpy/f2py/tests/test_return_integer.py
@@ -104,9 +104,10 @@ cf2py intent(out) t8
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","):
- self.check_function(getattr(self.module, name))
+ @pytest.mark.parametrize('name',
+ 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module, name))
class TestF90ReturnInteger(TestReturnInteger):
@@ -174,6 +175,7 @@ end module f90_return_integer
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","):
- self.check_function(getattr(self.module.f90_return_integer, name))
+ @pytest.mark.parametrize('name',
+ 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module.f90_return_integer, name))
diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py
index 403f4e205..96f215a91 100644
--- a/numpy/f2py/tests/test_return_logical.py
+++ b/numpy/f2py/tests/test_return_logical.py
@@ -113,9 +113,9 @@ c end
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t1,t2,t4,s0,s1,s2,s4".split(","):
- self.check_function(getattr(self.module, name))
+ @pytest.mark.parametrize('name', 't0,t1,t2,t4,s0,s1,s2,s4'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module, name))
class TestF90ReturnLogical(TestReturnLogical):
@@ -183,6 +183,7 @@ end module f90_return_logical
"""
@pytest.mark.slow
- def test_all(self):
- for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","):
- self.check_function(getattr(self.module.f90_return_logical, name))
+ @pytest.mark.parametrize('name',
+ 't0,t1,t2,t4,t8,s0,s1,s2,s4,s8'.split(','))
+ def test_all(self, name):
+ self.check_function(getattr(self.module.f90_return_logical, name))
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index 466fd4970..5fa5dadd2 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -15,18 +15,16 @@ import shutil
import atexit
import textwrap
import re
-import random
import pytest
-import numpy.f2py
from numpy.compat import asbytes, asstr
-from numpy.testing import SkipTest, temppath
+from numpy.testing import temppath
from importlib import import_module
try:
from hashlib import md5
except ImportError:
- from md5 import new as md5
+ from md5 import new as md5 # noqa: F401
#
# Maintaining a temporary module directory
@@ -322,14 +320,14 @@ class F2PyTest(object):
def setup(self):
if sys.platform == 'win32':
- raise SkipTest('Fails with MinGW64 Gfortran (Issue #9673)')
+ pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
if self.module is not None:
return
# Check compiler availability first
if not has_c_compiler():
- raise SkipTest("No C compiler available")
+ pytest.skip("No C compiler available")
codes = []
if self.sources:
@@ -345,9 +343,9 @@ class F2PyTest(object):
elif fn.endswith('.f90'):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
- raise SkipTest("No Fortran 77 compiler available")
+ pytest.skip("No Fortran 77 compiler available")
if needs_f90 and not has_f90_compiler():
- raise SkipTest("No Fortran 90 compiler available")
+ pytest.skip("No Fortran 90 compiler available")
# Build the module
if self.code is not None:
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index bbb6ec8c7..44243b483 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -6,6 +6,6 @@ from .info import __doc__
from .fftpack import *
from .helper import *
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py
index e17e1cb34..de675936f 100644
--- a/numpy/fft/fftpack.py
+++ b/numpy/fft/fftpack.py
@@ -35,8 +35,12 @@ from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
+import functools
+
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
+from numpy.core.multiarray import normalize_axis_index
+from numpy.core import overrides
from . import fftpack_lite as fftpack
from .helper import _FFTCache
@@ -44,9 +48,14 @@ _fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.fft')
+
+
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
+ axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
@@ -78,10 +87,10 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
z[tuple(index)] = a
a = z
- if axis != -1:
+ if axis != a.ndim - 1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
- if axis != -1:
+ if axis != a.ndim - 1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
@@ -99,6 +108,11 @@ def _unitary(norm):
return norm is not None
+def _fft_dispatcher(a, n=None, axis=None, norm=None):
+ return (a,)
+
+
+@array_function_dispatch(_fft_dispatcher)
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
@@ -195,6 +209,7 @@ def fft(a, n=None, axis=-1, norm=None):
return output
+@array_function_dispatch(_fft_dispatcher)
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
@@ -288,6 +303,8 @@ def ifft(a, n=None, axis=-1, norm=None):
return output * (1 / (sqrt(n) if unitary else n))
+
+@array_function_dispatch(_fft_dispatcher)
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
@@ -377,6 +394,7 @@ def rfft(a, n=None, axis=-1, norm=None):
return output
+@array_function_dispatch(_fft_dispatcher)
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
@@ -467,6 +485,7 @@ def irfft(a, n=None, axis=-1, norm=None):
return output * (1 / (sqrt(n) if unitary else n))
+@array_function_dispatch(_fft_dispatcher)
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
@@ -549,6 +568,7 @@ def hfft(a, n=None, axis=-1, norm=None):
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
+@array_function_dispatch(_fft_dispatcher)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
@@ -639,6 +659,11 @@ def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
return a
+def _fftn_dispatcher(a, s=None, axes=None, norm=None):
+ return (a,)
+
+
+@array_function_dispatch(_fftn_dispatcher)
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
@@ -736,6 +761,7 @@ def fftn(a, s=None, axes=None, norm=None):
return _raw_fftnd(a, s, axes, fft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
@@ -833,6 +859,7 @@ def ifftn(a, s=None, axes=None, norm=None):
return _raw_fftnd(a, s, axes, ifft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
@@ -923,6 +950,7 @@ def fft2(a, s=None, axes=(-2, -1), norm=None):
return _raw_fftnd(a, s, axes, fft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
@@ -1010,6 +1038,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None):
return _raw_fftnd(a, s, axes, ifft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
@@ -1102,6 +1131,7 @@ def rfftn(a, s=None, axes=None, norm=None):
return a
+@array_function_dispatch(_fftn_dispatcher)
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
@@ -1139,6 +1169,7 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None):
return rfftn(a, s, axes, norm)
+@array_function_dispatch(_fftn_dispatcher)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
@@ -1233,6 +1264,7 @@ def irfftn(a, s=None, axes=None, norm=None):
return a
+@array_function_dispatch(_fftn_dispatcher)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index 729121f31..864768df5 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -11,6 +11,7 @@ except ImportError:
import dummy_threading as threading
from numpy.compat import integer_types
from numpy.core import integer, empty, arange, asarray, roll
+from numpy.core.overrides import array_function_dispatch, set_module
# Created by Pearu Peterson, September 2002
@@ -19,6 +20,11 @@ __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
integer_types = integer_types + (integer,)
+def _fftshift_dispatcher(x, axes=None):
+ return (x,)
+
+
+@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
@@ -75,6 +81,7 @@ def fftshift(x, axes=None):
return roll(x, shift, axes)
+@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
@@ -121,6 +128,7 @@ def ifftshift(x, axes=None):
return roll(x, shift, axes)
+@set_module('numpy.fft')
def fftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies.
@@ -167,9 +175,9 @@ def fftfreq(n, d=1.0):
p2 = arange(-(n//2), 0, dtype=int)
results[N:] = p2
return results * val
- #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d)
+@set_module('numpy.fft')
def rfftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index d764cdc7e..c1757150e 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -26,7 +26,7 @@ from .financial import *
from .arrayterator import Arrayterator
from .arraypad import *
from ._version import *
-from numpy.core.multiarray import tracemalloc_domain
+from numpy.core._multiarray_umath import tracemalloc_domain
__all__ = ['emath', 'math', 'tracemalloc_domain']
__all__ += type_check.__all__
@@ -46,6 +46,6 @@ __all__ += financial.__all__
__all__ += nanfunctions.__all__
__all__ += histograms.__all__
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index ab00b1444..30237b76f 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -41,8 +41,12 @@ import warnings
import shutil
import io
+from numpy.core.overrides import set_module
+
+
_open = open
+
def _check_mode(mode, encoding, newline):
"""Check mode and that encoding and newline are compatible.
@@ -262,7 +266,8 @@ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
return ds.open(path, mode, encoding=encoding, newline=newline)
-class DataSource (object):
+@set_module('numpy')
+class DataSource(object):
"""
DataSource(destpath='.')
@@ -323,7 +328,7 @@ class DataSource (object):
def __del__(self):
# Remove temp directories
- if self._istmpdest:
+ if hasattr(self, '_istmpdest') and self._istmpdest:
shutil.rmtree(self._destpath)
def _iszip(self, filename):
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index b604b8c52..8a042f190 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -8,7 +8,7 @@ __docformat__ = "restructuredtext en"
import sys
import numpy as np
import numpy.core.numeric as nx
-from numpy.compat import asbytes, asunicode, bytes, asbytes_nested, basestring
+from numpy.compat import asbytes, asunicode, bytes, basestring
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
@@ -693,7 +693,7 @@ class StringConverter(object):
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
- self.missing_values = set([''])
+ self.missing_values = {''}
else:
if isinstance(missing_values, basestring):
missing_values = missing_values.split(",")
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index e9ca9de4d..4f6371058 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -6,6 +6,7 @@ of an n-dimensional array.
from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.core.overrides import array_function_dispatch
__all__ = ['pad']
@@ -885,111 +886,82 @@ def _pad_wrap(arr, pad_amt, axis=-1):
return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
-def _normalize_shape(ndarray, shape, cast_to_int=True):
+def _as_pairs(x, ndim, as_index=False):
"""
- Private function which does some checks and normalizes the possibly
- much simpler representations of 'pad_width', 'stat_length',
- 'constant_values', 'end_values'.
+ Broadcast `x` to an array with the shape (`ndim`, 2).
- Parameters
- ----------
- narray : ndarray
- Input ndarray
- shape : {sequence, array_like, float, int}, optional
- The width of padding (pad_width), the number of elements on the
- edge of the narray used for statistics (stat_length), the constant
- value(s) to use when filling padded regions (constant_values), or the
- endpoint target(s) for linear ramps (end_values).
- ((before_1, after_1), ... (before_N, after_N)) unique number of
- elements for each axis where `N` is rank of `narray`.
- ((before, after),) yields same before and after constants for each
- axis.
- (constant,) or val is a shortcut for before = after = constant for
- all axes.
- cast_to_int : bool, optional
- Controls if values in ``shape`` will be rounded and cast to int
- before being returned.
-
- Returns
- -------
- normalized_shape : tuple of tuples
- val => ((val, val), (val, val), ...)
- [[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)
- ((val1, val2), (val3, val4), ...) => no change
- [[val1, val2], ] => ((val1, val2), (val1, val2), ...)
- ((val1, val2), ) => ((val1, val2), (val1, val2), ...)
- [[val , ], ] => ((val, val), (val, val), ...)
- ((val , ), ) => ((val, val), (val, val), ...)
-
- """
- ndims = ndarray.ndim
-
- # Shortcut shape=None
- if shape is None:
- return ((None, None), ) * ndims
-
- # Convert any input `info` to a NumPy array
- shape_arr = np.asarray(shape)
-
- try:
- shape_arr = np.broadcast_to(shape_arr, (ndims, 2))
- except ValueError:
- fmt = "Unable to create correctly shaped tuple from %s"
- raise ValueError(fmt % (shape,))
-
- # Cast if necessary
- if cast_to_int is True:
- shape_arr = np.round(shape_arr).astype(int)
-
- # Convert list of lists to tuple of tuples
- return tuple(tuple(axis) for axis in shape_arr.tolist())
-
-
-def _validate_lengths(narray, number_elements):
- """
- Private function which does some checks and reformats pad_width and
- stat_length using _normalize_shape.
+ A helper function for `pad` that prepares and validates arguments like
+ `pad_width` for iteration in pairs.
Parameters
----------
- narray : ndarray
- Input ndarray
- number_elements : {sequence, int}, optional
- The width of padding (pad_width) or the number of elements on the edge
- of the narray used for statistics (stat_length).
- ((before_1, after_1), ... (before_N, after_N)) unique number of
- elements for each axis.
- ((before, after),) yields same before and after constants for each
- axis.
- (constant,) or int is a shortcut for before = after = constant for all
- axes.
+ x : {None, scalar, array-like}
+ The object to broadcast to the shape (`ndim`, 2).
+ ndim : int
+ Number of pairs the broadcasted `x` will have.
+ as_index : bool, optional
+ If `x` is not None, try to round each element of `x` to an integer
+ (dtype `np.intp`) and ensure every element is positive.
Returns
-------
- _validate_lengths : tuple of tuples
- int => ((int, int), (int, int), ...)
- [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
- ((int1, int2), (int3, int4), ...) => no change
- [[int1, int2], ] => ((int1, int2), (int1, int2), ...)
- ((int1, int2), ) => ((int1, int2), (int1, int2), ...)
- [[int , ], ] => ((int, int), (int, int), ...)
- ((int , ), ) => ((int, int), (int, int), ...)
-
+ pairs : nested iterables, shape (`ndim`, 2)
+ The broadcasted version of `x`.
+
+ Raises
+ ------
+ ValueError
+ If `as_index` is True and `x` contains negative elements.
+ Or if `x` is not broadcastable to the shape (`ndim`, 2).
"""
- normshp = _normalize_shape(narray, number_elements)
- for i in normshp:
- chk = [1 if x is None else x for x in i]
- chk = [1 if x >= 0 else -1 for x in chk]
- if (chk[0] < 0) or (chk[1] < 0):
- fmt = "%s cannot contain negative values."
- raise ValueError(fmt % (number_elements,))
- return normshp
+ if x is None:
+ # Pass through None as a special case, otherwise np.round(x) fails
+ # with an AttributeError
+ return ((None, None),) * ndim
+
+ x = np.array(x)
+ if as_index:
+ x = np.round(x).astype(np.intp, copy=False)
+
+ if x.ndim < 3:
+ # Optimization: Possibly use faster paths for cases where `x` has
+ # only 1 or 2 elements. `np.broadcast_to` could handle these as well
+ # but is currently slower
+
+ if x.size == 1:
+ # x was supplied as a single value
+ x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
+ if as_index and x < 0:
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[0]),) * ndim
+
+ if x.size == 2 and x.shape != (2, 1):
+ # x was supplied with a single value for each side
+ # but except case when each dimension has a single value
+ # which should be broadcasted to a pair,
+ # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
+ x = x.ravel() # Ensure x[0], x[1] works
+ if as_index and (x[0] < 0 or x[1] < 0):
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[1]),) * ndim
+
+ if as_index and x.min() < 0:
+ raise ValueError("index can't contain negative values")
+
+ # Converting the array with `tolist` seems to improve performance
+ # when iterating and indexing the result (see usage in `pad`)
+ return np.broadcast_to(x, (ndim, 2)).tolist()
###############################################################################
# Public functions
+def _pad_dispatcher(array, pad_width, mode, **kwargs):
+ return (array,)
+
+
+@array_function_dispatch(_pad_dispatcher, module='numpy')
def pad(array, pad_width, mode, **kwargs):
"""
Pads an array.
@@ -1197,7 +1169,7 @@ def pad(array, pad_width, mode, **kwargs):
raise TypeError('`pad_width` must be of integral type.')
narray = np.array(array)
- pad_width = _validate_lengths(narray, pad_width)
+ pad_width = _as_pairs(pad_width, narray.ndim, as_index=True)
allowedkwargs = {
'constant': ['constant_values'],
@@ -1233,10 +1205,9 @@ def pad(array, pad_width, mode, **kwargs):
# Need to only normalize particular keywords.
for i in kwargs:
if i == 'stat_length':
- kwargs[i] = _validate_lengths(narray, kwargs[i])
+ kwargs[i] = _as_pairs(kwargs[i], narray.ndim, as_index=True)
if i in ['end_values', 'constant_values']:
- kwargs[i] = _normalize_shape(narray, kwargs[i],
- cast_to_int=False)
+ kwargs[i] = _as_pairs(kwargs[i], narray.ndim)
else:
# Drop back to old, slower np.apply_along_axis mode for user-supplied
# vector function
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index c1f9f5bea..fd64ecbd6 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -27,7 +27,14 @@ To do: Optionally return indices analogously to unique for all functions.
"""
from __future__ import division, absolute_import, print_function
+import functools
+
import numpy as np
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
__all__ = [
@@ -36,6 +43,11 @@ __all__ = [
]
+def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
+ return (ary, to_end, to_begin)
+
+
+@array_function_dispatch(_ediff1d_dispatcher)
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
@@ -82,6 +94,11 @@ def ediff1d(ary, to_end=None, to_begin=None):
# force a 1d array
ary = np.asanyarray(ary).ravel()
+ # we have unit tests enforcing
+ # propagation of the dtype of input
+ # ary to returned result
+ dtype_req = ary.dtype
+
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
@@ -89,13 +106,23 @@ def ediff1d(ary, to_end=None, to_begin=None):
if to_begin is None:
l_begin = 0
else:
- to_begin = np.asanyarray(to_begin).ravel()
+ to_begin = np.asanyarray(to_begin)
+ if not np.can_cast(to_begin, dtype_req):
+ raise TypeError("dtype of to_begin must be compatible "
+ "with input ary")
+
+ to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
- to_end = np.asanyarray(to_end).ravel()
+ to_end = np.asanyarray(to_end)
+ if not np.can_cast(to_end, dtype_req):
+ raise TypeError("dtype of to_end must be compatible "
+ "with input ary")
+
+ to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
@@ -118,6 +145,12 @@ def _unpack_tuple(x):
return x
+def _unique_dispatcher(ar, return_index=None, return_inverse=None,
+ return_counts=None, axis=None):
+ return (ar,)
+
+
+@array_function_dispatch(_unique_dispatcher)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
@@ -298,6 +331,12 @@ def _unique1d(ar, return_index=False, return_inverse=False,
return ret
+def _intersect1d_dispatcher(
+ ar1, ar2, assume_unique=None, return_indices=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_intersect1d_dispatcher)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
@@ -393,6 +432,11 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
return int1d
+def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setxor1d_dispatcher)
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
@@ -435,6 +479,11 @@ def setxor1d(ar1, ar2, assume_unique=False):
return aux[flag[1:] & flag[:-1]]
+def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_in1d_dispatcher)
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
@@ -547,6 +596,11 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
return ret[rev_idx]
+def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None):
+ return (element, test_elements)
+
+
+@array_function_dispatch(_isin_dispatcher)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
@@ -611,6 +665,14 @@ def isin(element, test_elements, assume_unique=False, invert=False):
[ True, False]])
>>> element[mask]
array([2, 4])
+
+ The indices of the matched values can be obtained with `nonzero`:
+
+ >>> np.nonzero(mask)
+ (array([0, 1]), array([1, 0]))
+
+ The test can also be inverted:
+
>>> mask = np.isin(element, test_elements, invert=True)
>>> mask
array([[ True, False],
@@ -637,6 +699,11 @@ def isin(element, test_elements, assume_unique=False, invert=False):
invert=invert).reshape(element.shape)
+def _union1d_dispatcher(ar1, ar2):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_union1d_dispatcher)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
@@ -672,11 +739,17 @@ def union1d(ar1, ar2):
"""
return unique(np.concatenate((ar1, ar2), axis=None))
+
+def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setdiff1d_dispatcher)
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
- Return the sorted, unique values in `ar1` that are not in `ar2`.
+ Return the unique values in `ar1` that are not in `ar2`.
Parameters
----------
@@ -691,7 +764,9 @@ def setdiff1d(ar1, ar2, assume_unique=False):
Returns
-------
setdiff1d : ndarray
- Sorted 1D array of values in `ar1` that are not in `ar2`.
+ 1D array of values in `ar1` that are not in `ar2`. The result
+ is sorted when `assume_unique=False`, but otherwise only sorted
+ if the input is sorted.
See Also
--------
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index 06fa1bd92..e1e297492 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -13,8 +13,15 @@ otherwise stated.
from __future__ import division, absolute_import, print_function
from decimal import Decimal
+import functools
import numpy as np
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
'irr', 'npv', 'mirr']
@@ -36,6 +43,12 @@ def _convert_when(when):
except (KeyError, TypeError):
return [_when_to_num[x] for x in when]
+
+def _fv_dispatcher(rate, nper, pmt, pv, when=None):
+ return (rate, nper, pmt, pv)
+
+
+@array_function_dispatch(_fv_dispatcher)
def fv(rate, nper, pmt, pv, when='end'):
"""
Compute the future value.
@@ -124,6 +137,12 @@ def fv(rate, nper, pmt, pv, when='end'):
(1 + rate*when)*(temp - 1)/rate)
return -(pv*temp + pmt*fact)
+
+def _pmt_dispatcher(rate, nper, pv, fv=None, when=None):
+ return (rate, nper, pv, fv)
+
+
+@array_function_dispatch(_pmt_dispatcher)
def pmt(rate, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal plus interest.
@@ -216,6 +235,12 @@ def pmt(rate, nper, pv, fv=0, when='end'):
(1 + masked_rate*when)*(temp - 1)/masked_rate)
return -(fv + pv*temp) / fact
+
+def _nper_dispatcher(rate, pmt, pv, fv=None, when=None):
+ return (rate, pmt, pv, fv)
+
+
+@array_function_dispatch(_nper_dispatcher)
def nper(rate, pmt, pv, fv=0, when='end'):
"""
Compute the number of periodic payments.
@@ -284,6 +309,12 @@ def nper(rate, pmt, pv, fv=0, when='end'):
B = np.log((-fv+z) / (pv+z))/np.log(1+rate)
return np.where(rate == 0, A, B)
+
+def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ return (rate, per, nper, pv, fv)
+
+
+@array_function_dispatch(_ipmt_dispatcher)
def ipmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the interest portion of a payment.
@@ -379,6 +410,7 @@ def ipmt(rate, per, nper, pv, fv=0, when='end'):
pass
return ipmt
+
def _rbl(rate, per, pmt, pv, when):
"""
This function is here to simply have a different name for the 'fv'
@@ -388,6 +420,12 @@ def _rbl(rate, per, pmt, pv, when):
"""
return fv(rate, (per - 1), pmt, pv, when)
+
+def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ return (rate, per, nper, pv, fv)
+
+
+@array_function_dispatch(_ppmt_dispatcher)
def ppmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal.
@@ -416,6 +454,12 @@ def ppmt(rate, per, nper, pv, fv=0, when='end'):
total = pmt(rate, nper, pv, fv, when)
return total - ipmt(rate, per, nper, pv, fv, when)
+
+def _pv_dispatcher(rate, nper, pmt, fv=None, when=None):
+ return (rate, nper, nper, pv, fv)
+
+
+@array_function_dispatch(_pv_dispatcher)
def pv(rate, nper, pmt, fv=0, when='end'):
"""
Compute the present value.
@@ -520,6 +564,12 @@ def _g_div_gp(r, n, p, x, y, w):
(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r +
p*(t1 - 1)*w/r))
+
+def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None,
+ maxiter=None):
+ return (nper, pmt, pv, fv)
+
+
# Use Newton's iteration until the change is less than 1e-6
# for all values or a maximum of 100 iterations is reached.
# Newton's rule is
@@ -527,6 +577,7 @@ def _g_div_gp(r, n, p, x, y, w):
# where
# g(r) is the formula
# g'(r) is the derivative with respect to r.
+@array_function_dispatch(_rate_dispatcher)
def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
"""
Compute the rate of interest per period.
@@ -598,6 +649,12 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
else:
return rn
+
+def _irr_dispatcher(values):
+ return (values,)
+
+
+@array_function_dispatch(_irr_dispatcher)
def irr(values):
"""
Return the Internal Rate of Return (IRR).
@@ -677,6 +734,12 @@ def irr(values):
rate = rate.item(np.argmin(np.abs(rate)))
return rate
+
+def _npv_dispatcher(rate, values):
+ return (values,)
+
+
+@array_function_dispatch(_npv_dispatcher)
def npv(rate, values):
"""
Returns the NPV (Net Present Value) of a cash flow series.
@@ -722,6 +785,12 @@ def npv(rate, values):
values = np.asarray(values)
return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0)
+
+def _mirr_dispatcher(values, finance_rate, reinvest_rate):
+ return (values,)
+
+
+@array_function_dispatch(_mirr_dispatcher)
def mirr(values, finance_rate, reinvest_rate):
"""
Modified internal rate of return.
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 23eac7e7d..10945e5e8 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -150,7 +150,7 @@ Notes
-----
The ``.npy`` format, including motivation for creating it and a comparison of
alternatives, is described in the `"npy-format" NEP
-<http://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
+<https://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
evolved with time and this document is more current.
"""
@@ -161,12 +161,11 @@ import sys
import io
import warnings
from numpy.lib.utils import safe_eval
-from numpy.compat import asbytes, asstr, isfileobj, long, basestring
+from numpy.compat import (
+ asbytes, asstr, isfileobj, long, os_fspath
+ )
+from numpy.core.numeric import pickle
-if sys.version_info[0] >= 3:
- import pickle
-else:
- import cPickle as pickle
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
@@ -260,6 +259,43 @@ def dtype_to_descr(dtype):
else:
return dtype.str
+def descr_to_dtype(descr):
+ '''
+ descr may be stored as dtype.descr, which is a list of
+ (name, format, [shape]) tuples. Offsets are not explicitly saved, rather
+ empty fields with name,format == '', '|Vn' are added as padding.
+
+ This function reverses the process, eliminating the empty padding fields.
+ '''
+ if isinstance(descr, (str, dict)):
+ # No padding removal needed
+ return numpy.dtype(descr)
+
+ fields = []
+ offset = 0
+ for field in descr:
+ if len(field) == 2:
+ name, descr_str = field
+ dt = descr_to_dtype(descr_str)
+ else:
+ name, descr_str, shape = field
+ dt = numpy.dtype((descr_to_dtype(descr_str), shape))
+
+ # Ignore padding bytes, which will be void bytes with '' as name
+ # Once support for blank names is removed, only "if name == ''" needed)
+ is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
+ if not is_pad:
+ fields.append((name, dt, offset))
+
+ offset += dt.itemsize
+
+ names, formats, offsets = zip(*fields)
+ # names may be (title, names) tuples
+ nametups = (n if isinstance(n, tuple) else (None, n) for n in names)
+ titles, names = zip(*nametups)
+ return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
+ 'offsets': offsets, 'itemsize': offset})
+
def header_data_from_array_1_0(array):
""" Get the dictionary of header metadata from a numpy.ndarray.
@@ -524,7 +560,7 @@ def _read_array_header(fp, version):
msg = "fortran_order is not a valid bool: %r"
raise ValueError(msg % (d['fortran_order'],))
try:
- dtype = numpy.dtype(d['descr'])
+ dtype = descr_to_dtype(d['descr'])
except TypeError as e:
msg = "descr is not a valid dtype descriptor: %r"
raise ValueError(msg % (d['descr'],))
@@ -709,7 +745,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
Parameters
----------
- filename : str
+ filename : str or path-like
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
@@ -750,9 +786,9 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
memmap
"""
- if not isinstance(filename, basestring):
- raise ValueError("Filename must be a string. Memmap cannot use"
- " existing file handles.")
+ if isfileobj(filename):
+ raise ValueError("Filename must be a string or a path-like object."
+ " Memmap cannot use existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
@@ -770,7 +806,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
shape=shape,
)
# If we got here, then it should be safe to create the file.
- fp = open(filename, mode+'b')
+ fp = open(os_fspath(filename), mode+'b')
try:
used_ver = _write_array_header(fp, d, version)
# this warning can be removed when 1.9 has aged enough
@@ -782,7 +818,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
fp.close()
else:
# Read the header of the file first.
- fp = open(filename, 'rb')
+ fp = open(os_fspath(filename), 'rb')
try:
version = read_magic(fp)
_check_version(version)
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 95edb95fa..5f87c8b2c 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -6,36 +6,38 @@ try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
+import functools
import re
import sys
import warnings
-import operator
import numpy as np
import numpy.core.numeric as _nx
-from numpy.core import linspace, atleast_1d, atleast_2d, transpose
+from numpy.core import atleast_1d, transpose
from numpy.core.numeric import (
ones, zeros, arange, concatenate, array, asarray, asanyarray, empty,
empty_like, ndarray, around, floor, ceil, take, dot, where, intp,
- integer, isscalar, absolute, AxisError
+ integer, isscalar, absolute
)
from numpy.core.umath import (
- pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
- mod, exp, log10, not_equal, subtract
+ pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
+ mod, exp, not_equal, subtract
)
from numpy.core.fromnumeric import (
- ravel, nonzero, sort, partition, mean, any, sum
+ ravel, nonzero, partition, mean, any, sum
)
-from numpy.core.numerictypes import typecodes, number
+from numpy.core.numerictypes import typecodes
+from numpy.core.overrides import set_module
+from numpy.core import overrides
+from numpy.core.function_base import add_newdoc
from numpy.lib.twodim_base import diag
from .utils import deprecate
from numpy.core.multiarray import (
- _insert, add_docstring, digitize, bincount, normalize_axis_index,
+ _insert, add_docstring, bincount, normalize_axis_index, _monotonicity,
interp as compiled_interp, interp_complex as compiled_interp_complex
)
from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc
from numpy.compat import long
-from numpy.compat.py3k import basestring
if sys.version_info[0] < 3:
# Force range to be a generator, for np.delete's usage.
@@ -44,6 +46,11 @@ if sys.version_info[0] < 3:
else:
import builtins
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
# needed in this module for compatibility
from numpy.lib.histograms import histogram, histogramdd
@@ -59,6 +66,11 @@ __all__ = [
]
+def _rot90_dispatcher(m, k=None, axes=None):
+ return (m,)
+
+
+@array_function_dispatch(_rot90_dispatcher)
def rot90(m, k=1, axes=(0,1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
@@ -145,6 +157,11 @@ def rot90(m, k=1, axes=(0,1)):
return flip(transpose(m, axes_list), axes[1])
+def _flip_dispatcher(m, axis=None):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
def flip(m, axis=None):
"""
Reverse the order of elements in an array along the given axis.
@@ -238,6 +255,7 @@ def flip(m, axis=None):
return m[indexer]
+@set_module('numpy')
def iterable(y):
"""
Check whether or not an object can be iterated over.
@@ -269,6 +287,11 @@ def iterable(y):
return True
+def _average_dispatcher(a, axis=None, weights=None, returned=None):
+ return (a, weights)
+
+
+@array_function_dispatch(_average_dispatcher)
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
@@ -304,12 +327,17 @@ def average(a, axis=None, weights=None, returned=False):
Returns
-------
- average, [sum_of_weights] : array_type or double
- Return the average along the specified axis. When returned is `True`,
+ retval, [sum_of_weights] : array_type or double
+ Return the average along the specified axis. When `returned` is `True`,
return a tuple with the average as the first element and the sum
- of the weights as the second element. The return type is `Float`
- if `a` is of integer type, otherwise it is of the same type as `a`.
- `sum_of_weights` is of the same type as `average`.
+ of the weights as the second element. `sum_of_weights` is of the
+ same type as `retval`. The result dtype follows a genereal pattern.
+ If `weights` is None, the result dtype will be that of `a` , or ``float64``
+ if `a` is integral. Otherwise, if `weights` is not None and `a` is non-
+ integral, the result type will be the type of lowest precision capable of
+ representing values of both `a` and `weights`. If `a` happens to be
+ integral, the previous rules still applies but the result dtype will
+ at least be ``float64``.
Raises
------
@@ -326,6 +354,8 @@ def average(a, axis=None, weights=None, returned=False):
ma.average : average for masked arrays -- useful if your data contains
"missing" values
+ numpy.result_type : Returns the type that results from applying the
+ numpy type promotion rules to the arguments.
Examples
--------
@@ -345,10 +375,16 @@ def average(a, axis=None, weights=None, returned=False):
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
+
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
-
+
+ >>> a = np.ones(5, dtype=np.float128)
+ >>> w = np.ones(5, dtype=np.complex64)
+ >>> avg = np.average(a, weights=w)
+ >>> print(avg.dtype)
+ complex256
"""
a = np.asanyarray(a)
@@ -395,6 +431,7 @@ def average(a, axis=None, weights=None, returned=False):
return avg
+@set_module('numpy')
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
@@ -462,6 +499,15 @@ def asarray_chkfinite(a, dtype=None, order=None):
return a
+def _piecewise_dispatcher(x, condlist, funclist, *args, **kw):
+ yield x
+ # support the undocumented behavior of allowing scalars
+ if np.iterable(condlist):
+ for c in condlist:
+ yield c
+
+
+@array_function_dispatch(_piecewise_dispatcher)
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
@@ -583,6 +629,14 @@ def piecewise(x, condlist, funclist, *args, **kw):
return y
+def _select_dispatcher(condlist, choicelist, default=None):
+ for c in condlist:
+ yield c
+ for c in choicelist:
+ yield c
+
+
+@array_function_dispatch(_select_dispatcher)
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
@@ -686,6 +740,11 @@ def select(condlist, choicelist, default=0):
return result
+def _copy_dispatcher(a, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_copy_dispatcher)
def copy(a, order='K'):
"""
Return an array copy of the given object.
@@ -735,6 +794,13 @@ def copy(a, order='K'):
# Basic operations
+def _gradient_dispatcher(f, *varargs, **kwargs):
+ yield f
+ for v in varargs:
+ yield v
+
+
+@array_function_dispatch(_gradient_dispatcher)
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
@@ -1076,7 +1142,12 @@ def gradient(f, *varargs, **kwargs):
return outvals
-def diff(a, n=1, axis=-1):
+def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None):
+ return (a, prepend, append)
+
+
+@array_function_dispatch(_diff_dispatcher)
+def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
"""
Calculate the n-th discrete difference along the given axis.
@@ -1094,6 +1165,12 @@ def diff(a, n=1, axis=-1):
axis : int, optional
The axis along which the difference is taken, default is the
last axis.
+ prepend, append : array_like, optional
+ Values to prepend or append to "a" along axis prior to
+ performing the difference. Scalar values are expanded to
+ arrays with length 1 in the direction of axis and the shape
+ of the input array in along all other axes. Otherwise the
+ dimension and shape must match "a" except along axis.
Returns
-------
@@ -1162,6 +1239,28 @@ def diff(a, n=1, axis=-1):
nd = a.ndim
axis = normalize_axis_index(axis, nd)
+ combined = []
+ if prepend is not np._NoValue:
+ prepend = np.asanyarray(prepend)
+ if prepend.ndim == 0:
+ shape = list(a.shape)
+ shape[axis] = 1
+ prepend = np.broadcast_to(prepend, tuple(shape))
+ combined.append(prepend)
+
+ combined.append(a)
+
+ if append is not np._NoValue:
+ append = np.asanyarray(append)
+ if append.ndim == 0:
+ shape = list(a.shape)
+ shape[axis] = 1
+ append = np.broadcast_to(append, tuple(shape))
+ combined.append(append)
+
+ if len(combined) > 1:
+ a = np.concatenate(combined, axis)
+
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
@@ -1176,6 +1275,11 @@ def diff(a, n=1, axis=-1):
return a
+def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None):
+ return (x, xp, fp)
+
+
+@array_function_dispatch(_interp_dispatcher)
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
@@ -1308,7 +1412,12 @@ def interp(x, xp, fp, left=None, right=None, period=None):
return interp_func(x, xp, fp, left, right)
-def angle(z, deg=0):
+def _angle_dispatcher(z, deg=None):
+ return (z,)
+
+
+@array_function_dispatch(_angle_dispatcher)
+def angle(z, deg=False):
"""
Return the angle of the complex argument.
@@ -1324,6 +1433,9 @@ def angle(z, deg=0):
angle : ndarray or scalar
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
+
+ ..versionchanged:: 1.16.0
+ This function works on subclasses of ndarray like `ma.array`.
See Also
--------
@@ -1338,20 +1450,25 @@ def angle(z, deg=0):
45.0
"""
- if deg:
- fact = 180/pi
- else:
- fact = 1.0
- z = asarray(z)
- if (issubclass(z.dtype.type, _nx.complexfloating)):
+ z = asanyarray(z)
+ if issubclass(z.dtype.type, _nx.complexfloating):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
- return arctan2(zimag, zreal) * fact
+ a = arctan2(zimag, zreal)
+ if deg:
+ a *= 180/pi
+ return a
+
+
+def _unwrap_dispatcher(p, discont=None, axis=None):
+ return (p,)
+
+@array_function_dispatch(_unwrap_dispatcher)
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
@@ -1408,6 +1525,11 @@ def unwrap(p, discont=pi, axis=-1):
return up
+def _sort_complex(a):
+ return (a,)
+
+
+@array_function_dispatch(_sort_complex)
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
@@ -1444,6 +1566,11 @@ def sort_complex(a):
return b
+def _trim_zeros(filt, trim=None):
+ return (filt,)
+
+
+@array_function_dispatch(_trim_zeros)
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
@@ -1494,25 +1621,11 @@ def trim_zeros(filt, trim='fb'):
last = last - 1
return filt[first:last]
-
-@deprecate
-def unique(x):
- """
- This function is deprecated. Use numpy.lib.arraysetops.unique()
- instead.
- """
- try:
- tmp = x.flatten()
- if tmp.size == 0:
- return tmp
- tmp.sort()
- idx = concatenate(([True], tmp[1:] != tmp[:-1]))
- return tmp[idx]
- except AttributeError:
- items = sorted(set(x))
- return asarray(items)
+def _extract_dispatcher(condition, arr):
+ return (condition, arr)
+@array_function_dispatch(_extract_dispatcher)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
@@ -1564,6 +1677,11 @@ def extract(condition, arr):
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
+def _place_dispatcher(arr, mask, vals):
+ return (arr, mask, vals)
+
+
+@array_function_dispatch(_place_dispatcher)
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
@@ -1649,7 +1767,7 @@ def disp(mesg, device=None, linefeed=True):
return
-# See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
+# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
_DIMENSION_NAME = r'\w+'
_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)
_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST)
@@ -1757,6 +1875,7 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
return arrays
+@set_module('numpy')
class vectorize(object):
"""
vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
@@ -1765,8 +1884,8 @@ class vectorize(object):
Generalized function class.
Define a vectorized function which takes a nested sequence of objects or
- numpy arrays as inputs and returns an single or tuple of numpy array as
- output. The vectorized function evaluates `pyfunc` over successive tuples
+ numpy arrays as inputs and returns a single numpy array or a tuple of numpy
+ arrays. The vectorized function evaluates `pyfunc` over successive tuples
of the input arrays like the python map function, except it uses the
broadcasting rules of numpy.
@@ -1906,7 +2025,7 @@ class vectorize(object):
References
----------
.. [1] NumPy Reference, section `Generalized Universal Function API
- <http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
+ <https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
"""
def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
@@ -2118,6 +2237,12 @@ class vectorize(object):
return outputs[0] if nout == 1 else outputs
+def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None,
+ fweights=None, aweights=None):
+ return (m, y, fweights, aweights)
+
+
+@array_function_dispatch(_cov_dispatcher)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
@@ -2327,6 +2452,11 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
return c.squeeze()
+def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None):
+ return (x, y)
+
+
+@array_function_dispatch(_corrcoef_dispatcher)
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
@@ -2410,6 +2540,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
return c
+@set_module('numpy')
def blackman(M):
"""
Return the Blackman window.
@@ -2508,6 +2639,7 @@ def blackman(M):
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
+@set_module('numpy')
def bartlett(M):
"""
Return the Bartlett window.
@@ -2561,7 +2693,7 @@ def bartlett(M):
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
- http://en.wikipedia.org/wiki/Window_function
+ https://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
@@ -2614,6 +2746,7 @@ def bartlett(M):
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
+@set_module('numpy')
def hanning(M):
"""
Return the Hanning window.
@@ -2661,7 +2794,7 @@ def hanning(M):
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
- http://en.wikipedia.org/wiki/Window_function
+ https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
@@ -2714,6 +2847,7 @@ def hanning(M):
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
+@set_module('numpy')
def hamming(M):
"""
Return the Hamming window.
@@ -2759,7 +2893,7 @@ def hamming(M):
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
- http://en.wikipedia.org/wiki/Window_function
+ https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
@@ -2895,6 +3029,11 @@ def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
+def _i0_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_i0_dispatcher)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
@@ -2962,6 +3101,7 @@ def i0(x):
## End of cephes code for i0
+@set_module('numpy')
def kaiser(M, beta):
"""
Return the Kaiser window.
@@ -3036,7 +3176,7 @@ def kaiser(M, beta):
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
- http://en.wikipedia.org/wiki/Window_function
+ https://en.wikipedia.org/wiki/Window_function
Examples
--------
@@ -3089,6 +3229,11 @@ def kaiser(M, beta):
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
+def _sinc_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_sinc_dispatcher)
def sinc(x):
"""
Return the sinc function.
@@ -3124,7 +3269,7 @@ def sinc(x):
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
- http://en.wikipedia.org/wiki/Sinc_function
+ https://en.wikipedia.org/wiki/Sinc_function
Examples
--------
@@ -3168,6 +3313,11 @@ def sinc(x):
return sin(y)/y
+def _msort_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_msort_dispatcher)
def msort(a):
"""
Return a copy of an array sorted along the first axis.
@@ -3251,6 +3401,12 @@ def _ureduce(a, func, **kwargs):
return r, keepdim
+def _median_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_median_dispatcher)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
@@ -3395,12 +3551,18 @@ def _median(a, axis=None, out=None, overwrite_input=False):
return mean(part[indexer], axis=axis, out=out)
+def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_percentile_dispatcher)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
- Compute the qth percentile of the data along the specified axis.
+ Compute the q-th percentile of the data along the specified axis.
- Returns the qth percentile(s) of the array elements.
+ Returns the q-th percentile(s) of the array elements.
Parameters
----------
@@ -3467,7 +3629,7 @@ def percentile(a, q, axis=None, out=None,
Notes
-----
- Given a vector ``V`` of length ``N``, the ``q``-th percentile of
+ Given a vector ``V`` of length ``N``, the q-th percentile of
``V`` is the value ``q/100`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
@@ -3540,10 +3702,16 @@ def percentile(a, q, axis=None, out=None,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_quantile_dispatcher)
def quantile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
- Compute the `q`th quantile of the data along the specified axis.
+ Compute the q-th quantile of the data along the specified axis.
..versionadded:: 1.15.0
Parameters
@@ -3569,6 +3737,7 @@ def quantile(a, q, axis=None, out=None,
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
+
* linear: ``i + (j - i) * fraction``, where ``fraction``
is the fractional part of the index surrounded by ``i``
and ``j``.
@@ -3602,7 +3771,7 @@ def quantile(a, q, axis=None, out=None,
Notes
-----
- Given a vector ``V`` of length ``N``, the ``q``-th quantile of
+ Given a vector ``V`` of length ``N``, the q-th quantile of
``V`` is the value ``q`` of the way from the minimum to the
maximum in a sorted copy of ``V``. The values and distances of
the two nearest neighbors as well as the `interpolation` parameter
@@ -3720,7 +3889,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
indices = concatenate((indices, [-1]))
ap.partition(indices, axis=axis)
- # ensure axis with qth is first
+ # ensure axis with q-th is first
ap = np.moveaxis(ap, axis, 0)
axis = 0
@@ -3753,7 +3922,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
- # ensure axis with qth is first
+ # ensure axis with q-th is first
ap = np.moveaxis(ap, axis, 0)
weights_below = np.moveaxis(weights_below, axis, 0)
weights_above = np.moveaxis(weights_above, axis, 0)
@@ -3767,7 +3936,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
x1 = take(ap, indices_below, axis=axis) * weights_below
x2 = take(ap, indices_above, axis=axis) * weights_above
- # ensure axis with qth is first
+ # ensure axis with q-th is first
x1 = np.moveaxis(x1, axis, 0)
x2 = np.moveaxis(x2, axis, 0)
@@ -3801,6 +3970,11 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
return r
+def _trapz_dispatcher(y, x=None, dx=None, axis=None):
+ return (y, x)
+
+
+@array_function_dispatch(_trapz_dispatcher)
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
@@ -3840,10 +4014,10 @@ def trapz(y, x=None, dx=1.0, axis=-1):
References
----------
- .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
- http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
+ https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
@@ -3891,42 +4065,12 @@ def trapz(y, x=None, dx=1.0, axis=-1):
return ret
-#always succeed
-def add_newdoc(place, obj, doc):
- """
- Adds documentation to obj which is in module place.
-
- If doc is a string add it to obj as a docstring
-
- If doc is a tuple, then the first element is interpreted as
- an attribute of obj and the second as the docstring
- (method, docstring)
-
- If doc is a list, then each element of the list should be a
- sequence of length two --> [(method1, docstring1),
- (method2, docstring2), ...]
-
- This routine never raises an error.
-
- This routine cannot modify read-only docstrings, as appear
- in new-style classes or built-in functions. Because this
- routine never raises an error the caller must check manually
- that the docstrings were changed.
- """
- try:
- new = getattr(__import__(place, globals(), {}, [obj]), obj)
- if isinstance(doc, str):
- add_docstring(new, doc.strip())
- elif isinstance(doc, tuple):
- add_docstring(getattr(new, doc[0]), doc[1].strip())
- elif isinstance(doc, list):
- for val in doc:
- add_docstring(getattr(new, val[0]), val[1].strip())
- except Exception:
- pass
+def _meshgrid_dispatcher(*xi, **kwargs):
+ return xi
# Based on scitools meshgrid
+@array_function_dispatch(_meshgrid_dispatcher)
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
@@ -4022,11 +4166,13 @@ def meshgrid(*xi, **kwargs):
`meshgrid` is very useful to evaluate functions on a grid.
+ >>> import matplotlib.pyplot as plt
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
+ >>> plt.show()
"""
ndim = len(xi)
@@ -4062,6 +4208,11 @@ def meshgrid(*xi, **kwargs):
return output
+def _delete_dispatcher(arr, obj, axis=None):
+ return (arr, obj)
+
+
+@array_function_dispatch(_delete_dispatcher)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
@@ -4267,6 +4418,11 @@ def delete(arr, obj, axis=None):
return new
+def _insert_dispatcher(arr, obj, values, axis=None):
+ return (arr, obj, values)
+
+
+@array_function_dispatch(_insert_dispatcher)
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
@@ -4473,6 +4629,11 @@ def insert(arr, obj, values, axis=None):
return new
+def _append_dispatcher(arr, values, axis=None):
+ return (arr, values)
+
+
+@array_function_dispatch(_append_dispatcher)
def append(arr, values, axis=None):
"""
Append values to the end of an array.
@@ -4526,3 +4687,118 @@ def append(arr, values, axis=None):
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
+
+
+def _digitize_dispatcher(x, bins, right=None):
+ return (x, bins)
+
+
+@array_function_dispatch(_digitize_dispatcher)
+def digitize(x, bins, right=False):
+ """
+ Return the indices of the bins to which each value in input array belongs.
+
+ ========= ============= ============================
+ `right` order of bins returned index `i` satisfies
+ ========= ============= ============================
+ ``False`` increasing ``bins[i-1] <= x < bins[i]``
+ ``True`` increasing ``bins[i-1] < x <= bins[i]``
+ ``False`` decreasing ``bins[i-1] > x >= bins[i]``
+ ``True`` decreasing ``bins[i-1] >= x > bins[i]``
+ ========= ============= ============================
+
+ If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is
+ returned as appropriate.
+
+ Parameters
+ ----------
+ x : array_like
+ Input array to be binned. Prior to NumPy 1.10.0, this array had to
+ be 1-dimensional, but can now have any shape.
+ bins : array_like
+ Array of bins. It has to be 1-dimensional and monotonic.
+ right : bool, optional
+ Indicating whether the intervals include the right or the left bin
+ edge. Default behavior is (right==False) indicating that the interval
+ does not include the right edge. The left bin end is open in this
+ case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
+ monotonically increasing bins.
+
+ Returns
+ -------
+ indices : ndarray of ints
+ Output array of indices, of same shape as `x`.
+
+ Raises
+ ------
+ ValueError
+ If `bins` is not monotonic.
+ TypeError
+ If the type of the input is complex.
+
+ See Also
+ --------
+ bincount, histogram, unique, searchsorted
+
+ Notes
+ -----
+ If values in `x` are such that they fall outside the bin range,
+ attempting to index `bins` with the indices that `digitize` returns
+ will result in an IndexError.
+
+ .. versionadded:: 1.10.0
+
+ `np.digitize` is implemented in terms of `np.searchsorted`. This means
+ that a binary search is used to bin the values, which scales much better
+ for larger number of bins than the previous linear search. It also removes
+ the requirement for the input array to be 1-dimensional.
+
+ For monotonically _increasing_ `bins`, the following are equivalent::
+
+ np.digitize(x, bins, right=True)
+ np.searchsorted(bins, x, side='left')
+
+ Note that as the order of the arguments are reversed, the side must be too.
+ The `searchsorted` call is marginally faster, as it does not do any
+ monotonicity checks. Perhaps more importantly, it supports all dtypes.
+
+ Examples
+ --------
+ >>> x = np.array([0.2, 6.4, 3.0, 1.6])
+ >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
+ >>> inds = np.digitize(x, bins)
+ >>> inds
+ array([1, 4, 3, 2])
+ >>> for n in range(x.size):
+ ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
+ ...
+ 0.0 <= 0.2 < 1.0
+ 4.0 <= 6.4 < 10.0
+ 2.5 <= 3.0 < 4.0
+ 1.0 <= 1.6 < 2.5
+
+ >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
+ >>> bins = np.array([0, 5, 10, 15, 20])
+ >>> np.digitize(x,bins,right=True)
+ array([1, 2, 3, 4, 4])
+ >>> np.digitize(x,bins,right=False)
+ array([1, 3, 3, 4, 5])
+ """
+ x = _nx.asarray(x)
+ bins = _nx.asarray(bins)
+
+ # here for compatibility, searchsorted below is happy to take this
+ if np.issubdtype(x.dtype, _nx.complexfloating):
+ raise TypeError("x may not be complex")
+
+ mono = _monotonicity(bins)
+ if mono == 0:
+ raise ValueError("bins must be monotonically increasing or decreasing")
+
+ # this is backwards because the arguments below are swapped
+ side = 'left' if right else 'right'
+ if mono == -1:
+ # reverse the bins, and invert the results
+ return len(bins) - _nx.searchsorted(bins[::-1], x, side=side)
+ else:
+ return _nx.searchsorted(bins, x, side=side)
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index 209b55867..482eabe14 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -3,20 +3,25 @@ Histogram-related functions
"""
from __future__ import division, absolute_import, print_function
+import functools
import operator
import warnings
import numpy as np
from numpy.compat.py3k import basestring
+from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
-def _hist_bin_sqrt(x):
+def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
@@ -33,10 +38,11 @@ def _hist_bin_sqrt(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return x.ptp() / np.sqrt(x.size)
-def _hist_bin_sturges(x):
+def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
@@ -55,10 +61,11 @@ def _hist_bin_sturges(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return x.ptp() / (np.log2(x.size) + 1.0)
-def _hist_bin_rice(x):
+def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
@@ -78,10 +85,11 @@ def _hist_bin_rice(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
-def _hist_bin_scott(x):
+def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
@@ -99,10 +107,52 @@ def _hist_bin_scott(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
-def _hist_bin_doane(x):
+def _hist_bin_stone(x, range):
+ """
+ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
+
+ The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
+ The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
+ https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
+
+ This paper by Stone appears to be the origination of this rule.
+ http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+ range : (float, float)
+ The lower and upper range of the bins.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+
+ n = x.size
+ ptp_x = np.ptp(x)
+ if n <= 1 or ptp_x == 0:
+ return 0
+
+ def jhat(nbins):
+ hh = ptp_x / nbins
+ p_k = np.histogram(x, bins=nbins, range=range)[0] / n
+ return (2 - (n + 1) * p_k.dot(p_k)) / hh
+
+ nbins_upper_bound = max(100, int(np.sqrt(n)))
+ nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
+ if nbins == nbins_upper_bound:
+ warnings.warn("The number of bins estimated may be suboptimal.", RuntimeWarning, stacklevel=2)
+ return ptp_x / nbins
+
+
+def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
@@ -120,6 +170,7 @@ def _hist_bin_doane(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
@@ -136,7 +187,7 @@ def _hist_bin_doane(x):
return 0.0
-def _hist_bin_fd(x):
+def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
@@ -161,11 +212,12 @@ def _hist_bin_fd(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
-def _hist_bin_auto(x):
+def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
@@ -199,8 +251,9 @@ def _hist_bin_auto(x):
--------
_hist_bin_fd, _hist_bin_sturges
"""
- fd_bw = _hist_bin_fd(x)
- sturges_bw = _hist_bin_sturges(x)
+ fd_bw = _hist_bin_fd(x, range)
+ sturges_bw = _hist_bin_sturges(x, range)
+ del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
@@ -208,7 +261,8 @@ def _hist_bin_auto(x):
return sturges_bw
# Private dict initialized at module load time
-_hist_bin_selectors = {'auto': _hist_bin_auto,
+_hist_bin_selectors = {'stone': _hist_bin_stone,
+ 'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
@@ -343,7 +397,7 @@ def _get_bin_edges(a, bins, range, weights):
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
- width = _hist_bin_selectors[bin_name](a)
+ width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
@@ -400,6 +454,11 @@ def _search_sorted_inclusive(a, v):
))
+def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
Function to calculate only the edges of the bins used by the `histogram` function.
@@ -440,6 +499,11 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
Less robust estimator that that takes into account data
variability and data size.
+ 'stone'
+ Estimator based on leave-one-out cross-validation estimate of
+ the integrated squared error. Can be regarded as a generalization
+ of Scott's rule.
+
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
@@ -594,6 +658,12 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
return bin_edges
+def _histogram_dispatcher(
+ a, bins=None, range=None, normed=None, weights=None, density=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
@@ -606,8 +676,8 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
- sequence, it defines the bin edges, including the rightmost
- edge, allowing for non-uniform bin widths.
+ sequence, it defines a monotonically increasing array of bin edges,
+ including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
@@ -846,6 +916,12 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
return n, bin_edges
+def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,
+ weights=None, density=None):
+ return (sample, bins, weights)
+
+
+@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
density=None):
"""
@@ -868,7 +944,8 @@ def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
bins : sequence or int, optional
The bin specification:
- * A sequence of arrays describing the bin edges along each dimension.
+ * A sequence of arrays describing the monotonically increasing bin
+ edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index d2139338e..56abe293a 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+import functools
import sys
import math
@@ -9,13 +10,18 @@ from numpy.core.numeric import (
)
from numpy.core.numerictypes import find_common_type, issubdtype
-from . import function_base
import numpy.matrixlib as matrixlib
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
+from numpy.core.overrides import set_module
+from numpy.core import overrides, linspace
from numpy.lib.stride_tricks import as_strided
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
__all__ = [
'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
@@ -23,6 +29,11 @@ __all__ = [
]
+def _ix__dispatcher(*args):
+ return args
+
+
+@array_function_dispatch(_ix__dispatcher)
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
@@ -121,39 +132,13 @@ class nd_grid(object):
Notes
-----
Two instances of `nd_grid` are made available in the NumPy namespace,
- `mgrid` and `ogrid`::
+ `mgrid` and `ogrid`, approximately defined as::
mgrid = nd_grid(sparse=False)
ogrid = nd_grid(sparse=True)
Users should use these pre-defined instances instead of using `nd_grid`
directly.
-
- Examples
- --------
- >>> mgrid = np.lib.index_tricks.nd_grid()
- >>> mgrid[0:5,0:5]
- array([[[0, 0, 0, 0, 0],
- [1, 1, 1, 1, 1],
- [2, 2, 2, 2, 2],
- [3, 3, 3, 3, 3],
- [4, 4, 4, 4, 4]],
- [[0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4],
- [0, 1, 2, 3, 4]]])
- >>> mgrid[-1:1:5j]
- array([-1. , -0.5, 0. , 0.5, 1. ])
-
- >>> ogrid = np.lib.index_tricks.nd_grid(sparse=True)
- >>> ogrid[0:5,0:5]
- [array([[0],
- [1],
- [2],
- [3],
- [4]]), array([[0, 1, 2, 3, 4]])]
-
"""
def __init__(self, sparse=False):
@@ -220,13 +205,97 @@ class nd_grid(object):
else:
return _nx.arange(start, stop, step)
- def __len__(self):
- return 0
-mgrid = nd_grid(sparse=False)
-ogrid = nd_grid(sparse=True)
-mgrid.__doc__ = None # set in numpy.add_newdocs
-ogrid.__doc__ = None # set in numpy.add_newdocs
+class MGridClass(nd_grid):
+ """
+ `nd_grid` instance which returns a dense multi-dimensional "meshgrid".
+
+ An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
+ (or fleshed out) mesh-grid when indexed, so that each returned argument
+ has the same shape. The dimensions and number of the output arrays are
+ equal to the number of indexing dimensions. If the step length is not a
+ complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ ----------
+ mesh-grid `ndarrays` all of the same dimensions
+
+ See Also
+ --------
+ numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ ogrid : like mgrid but returns open (not fleshed out) mesh grids
+ r_ : array concatenator
+
+ Examples
+ --------
+ >>> np.mgrid[0:5,0:5]
+ array([[[0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1],
+ [2, 2, 2, 2, 2],
+ [3, 3, 3, 3, 3],
+ [4, 4, 4, 4, 4]],
+ [[0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4],
+ [0, 1, 2, 3, 4]]])
+ >>> np.mgrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+
+ """
+ def __init__(self):
+ super(MGridClass, self).__init__(sparse=False)
+
+mgrid = MGridClass()
+
+class OGridClass(nd_grid):
+ """
+ `nd_grid` instance which returns an open multi-dimensional "meshgrid".
+
+ An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
+ (i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
+ of each returned array is greater than 1. The dimension and number of the
+ output arrays are equal to the number of indexing dimensions. If the step
+ length is not a complex number, then the stop is not inclusive.
+
+ However, if the step length is a **complex number** (e.g. 5j), then
+ the integer part of its magnitude is interpreted as specifying the
+ number of points to create between the start and stop values, where
+ the stop value **is inclusive**.
+
+ Returns
+ ----------
+ mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
+
+ See Also
+ --------
+ np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
+ mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
+ r_ : array concatenator
+
+ Examples
+ --------
+ >>> from numpy import ogrid
+ >>> ogrid[-1:1:5j]
+ array([-1. , -0.5, 0. , 0.5, 1. ])
+ >>> ogrid[0:5,0:5]
+ [array([[0],
+ [1],
+ [2],
+ [3],
+ [4]]), array([[0, 1, 2, 3, 4]])]
+
+ """
+ def __init__(self):
+ super(OGridClass, self).__init__(sparse=True)
+
+ogrid = OGridClass()
+
class AxisConcatenator(object):
"""
@@ -277,7 +346,7 @@ class AxisConcatenator(object):
step = 1
if isinstance(step, complex):
size = int(abs(step))
- newobj = function_base.linspace(start, stop, num=size)
+ newobj = linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
@@ -476,8 +545,11 @@ class CClass(AxisConcatenator):
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
+
c_ = CClass()
+
+@set_module('numpy')
class ndenumerate(object):
"""
Multidimensional index iterator.
@@ -528,6 +600,7 @@ class ndenumerate(object):
next = __next__
+@set_module('numpy')
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
@@ -668,6 +741,12 @@ s_ = IndexExpression(maketuple=False)
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
+
+def _fill_diagonal_dispatcher(a, val, wrap=None):
+ return (a,)
+
+
+@array_function_dispatch(_fill_diagonal_dispatcher)
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
@@ -782,6 +861,7 @@ def fill_diagonal(a, val, wrap=False):
a.flat[:end:step] = val
+@set_module('numpy')
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
@@ -850,6 +930,11 @@ def diag_indices(n, ndim=2):
return (idx,) * ndim
+def _diag_indices_from(arr):
+ return (arr,)
+
+
+@array_function_dispatch(_diag_indices_from)
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index 0379ecb1a..52ad45b68 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -69,9 +69,6 @@ class NDArrayOperatorsMixin(object):
deferring to the ``__array_ufunc__`` method, which subclasses must
implement.
- This class does not yet implement the special operators corresponding
- to ``matmul`` (``@``), because ``np.matmul`` is not yet a NumPy ufunc.
-
It is useful for writing classes that do not inherit from `numpy.ndarray`,
but that should support arithmetic and numpy universal functions like
arrays as described in `A Mechanism for Overriding Ufuncs
@@ -155,6 +152,8 @@ class NDArrayOperatorsMixin(object):
__add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
__sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
+ __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
+ um.matmul, 'matmul')
if sys.version_info.major < 3:
# Python 3 uses only __truediv__ and __floordiv__
__div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div')
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index abd2da1a2..d73d84467 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -22,9 +22,15 @@ Functions
"""
from __future__ import division, absolute_import, print_function
+import functools
import warnings
import numpy as np
from numpy.lib import function_base
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
__all__ = [
@@ -188,6 +194,11 @@ def _divide_by_count(a, b, out=None):
return np.divide(a, b, out=out, casting='unsafe')
+def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmin_dispatcher)
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
@@ -296,6 +307,11 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
return res
+def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmax_dispatcher)
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
@@ -404,6 +420,11 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
return res
+def _nanargmin_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmin_dispatcher)
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
@@ -448,6 +469,11 @@ def nanargmin(a, axis=None):
return res
+def _nanargmax_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmax_dispatcher)
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
@@ -493,6 +519,11 @@ def nanargmax(a, axis=None):
return res
+def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nansum_dispatcher)
def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
@@ -583,6 +614,11 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanprod_dispatcher)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
@@ -648,6 +684,11 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumsum_dispatcher)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
@@ -713,6 +754,11 @@ def nancumsum(a, axis=None, dtype=None, out=None):
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
+def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumprod_dispatcher)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
@@ -775,6 +821,11 @@ def nancumprod(a, axis=None, dtype=None, out=None):
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
+def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmean_dispatcher)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
@@ -928,6 +979,12 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
return m.filled(np.nan)
+def _nanmedian_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmedian_dispatcher)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
@@ -1026,6 +1083,12 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu
return r
+def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_nanpercentile_dispatcher)
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
@@ -1146,6 +1209,12 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_nanquantile_dispatcher)
def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
@@ -1178,13 +1247,15 @@ def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
- * linear: ``i + (j - i) * fraction``, where ``fraction``
- is the fractional part of the index surrounded by ``i``
- and ``j``.
- * lower: ``i``.
- * higher: ``j``.
- * nearest: ``i`` or ``j``, whichever is nearest.
- * midpoint: ``(i + j) / 2``.
+
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option, the
@@ -1306,6 +1377,12 @@ def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'):
arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation)
+def _nanvar_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanvar_dispatcher)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
@@ -1447,6 +1524,12 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
return var
+def _nanstd_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanstd_dispatcher)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 390927601..db6a8e5eb 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
-import io
import sys
import os
import re
+import functools
import itertools
import warnings
import weakref
@@ -12,7 +12,10 @@ from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
+from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
+from numpy.core.overrides import set_module
+from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
@@ -21,18 +24,18 @@ from ._iotools import (
from numpy.compat import (
asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode,
- is_pathlib_path
+ os_fspath, os_PathLike
)
+from numpy.core.numeric import pickle
if sys.version_info[0] >= 3:
- import pickle
from collections.abc import Mapping
else:
- import cPickle as pickle
from future_builtins import map
from collections import Mapping
+@set_module('numpy')
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
@@ -48,6 +51,10 @@ __all__ = [
]
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
class BagObj(object):
"""
BagObj(obj)
@@ -105,8 +112,8 @@ def zipfile_factory(file, *args, **kwargs):
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
- if is_pathlib_path(file):
- file = str(file)
+ if not hasattr(file, 'read'):
+ file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
@@ -277,6 +284,7 @@ class NpzFile(Mapping):
return self.keys()
+@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
@@ -379,16 +387,6 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
memmap([4, 5, 6])
"""
- own_fid = False
- if isinstance(file, basestring):
- fid = open(file, "rb")
- own_fid = True
- elif is_pathlib_path(file):
- fid = file.open("rb")
- own_fid = True
- else:
- fid = file
-
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
@@ -409,21 +407,30 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
# Nothing to do on Python 2
pickle_kwargs = {}
+ # TODO: Use contextlib.ExitStack once we drop Python 2
+ if hasattr(file, 'read'):
+ fid = file
+ own_fid = False
+ else:
+ fid = open(os_fspath(file), "rb")
+ own_fid = True
+
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
+ _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
- if magic.startswith(_ZIP_PREFIX):
+ if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
- tmp = own_fid
+ ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
own_fid = False
- return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
- pickle_kwargs=pickle_kwargs)
+ return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
@@ -434,8 +441,8 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
else:
# Try a pickle
if not allow_pickle:
- raise ValueError("allow_pickle=False, but file does not contain "
- "non-pickled data")
+ raise ValueError("Cannot load file containing pickled data "
+ "when allow_pickle=False")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
@@ -446,6 +453,11 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
fid.close()
+def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
+ return (arr,)
+
+
+@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
@@ -496,18 +508,14 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
"""
own_fid = False
- if isinstance(file, basestring):
+ if hasattr(file, 'read'):
+ fid = file
+ else:
+ file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
- elif is_pathlib_path(file):
- if not file.name.endswith('.npy'):
- file = file.parent / (file.name + '.npy')
- fid = file.open("wb")
- own_fid = True
- else:
- fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
@@ -524,6 +532,14 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
fid.close()
+def _savez_dispatcher(file, *args, **kwds):
+ for a in args:
+ yield a
+ for v in kwds.values():
+ yield v
+
+
+@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
@@ -603,6 +619,14 @@ def savez(file, *args, **kwds):
_savez(file, args, kwds, False)
+def _savez_compressed_dispatcher(file, *args, **kwds):
+ for a in args:
+ yield a
+ for v in kwds.values():
+ yield v
+
+
+@array_function_dispatch(_savez_compressed_dispatcher)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
@@ -672,12 +696,10 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# component of the so-called standard library.
import zipfile
- if isinstance(file, basestring):
+ if not hasattr(file, 'read'):
+ file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
- elif is_pathlib_path(file):
- if not file.name.endswith('.npz'):
- file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
@@ -770,9 +792,11 @@ def _getconv(dtype):
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
+
+@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
- ndmin=0, encoding='bytes'):
+ ndmin=0, encoding='bytes', max_rows=None):
"""
Load data from a text file.
@@ -834,6 +858,11 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
+ max_rows : int, optional
+ Read `max_rows` lines of content after `skiprows` lines. The default
+ is to read all the lines.
+
+ .. versionadded:: 1.16.0
Returns
-------
@@ -920,8 +949,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fown = False
try:
- if is_pathlib_path(fname):
- fname = str(fname)
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
@@ -943,7 +972,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
- def flatten_dtype_internal(dt):
+ @recursive
+ def flatten_dtype_internal(self, dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
@@ -963,7 +993,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
- flat_dt, flat_packing = flatten_dtype_internal(tp)
+ flat_dt, flat_packing = self(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
@@ -972,7 +1002,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
packing.append((len(flat_dt), flat_packing))
return (types, packing)
- def pack_items(items, packing):
+ @recursive
+ def pack_items(self, items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
@@ -984,7 +1015,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
start = 0
ret = []
for length, subpacking in packing:
- ret.append(pack_items(items[start:start+length], subpacking))
+ ret.append(self(items[start:start+length], subpacking))
start += length
return tuple(ret)
@@ -1013,7 +1044,9 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
"""
X = []
- for i, line in enumerate(itertools.chain([first_line], fh)):
+ line_iter = itertools.chain([first_line], fh)
+ line_iter = itertools.islice(line_iter, max_rows)
+ for i, line in enumerate(line_iter):
vals = split_line(line)
if len(vals) == 0:
continue
@@ -1110,11 +1143,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
finally:
if fown:
fh.close()
- # recursive closures have a cyclic reference to themselves, which
- # requires gc to collect (gh-10620). To avoid this problem, for
- # performance and PyPy friendliness, we break the cycle:
- flatten_dtype_internal = None
- pack_items = None
if X is None:
X = np.array([], dtype)
@@ -1149,6 +1177,13 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
return X
+def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
+ header=None, footer=None, comments=None,
+ encoding=None):
+ return (X,)
+
+
+@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
@@ -1259,8 +1294,8 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
References
----------
.. [1] `Format Specification Mini-Language
- <http://docs.python.org/library/string.html#
- format-specification-mini-language>`_, Python Documentation.
+ <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
+ Python Documentation.
Examples
--------
@@ -1310,8 +1345,8 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
self.write = self.write_bytes
own_fh = False
- if is_pathlib_path(fname):
- fname = str(fname)
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
@@ -1399,6 +1434,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
fh.close()
+@set_module('numpy')
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
@@ -1497,6 +1533,7 @@ def fromregex(file, regexp, dtype, encoding=None):
#####--------------------------------------------------------------------------
+@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
@@ -1624,7 +1661,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
- <http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
+ <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
@@ -1694,8 +1731,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
- if is_pathlib_path(fname):
- fname = str(fname)
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding))
own_fhd = True
@@ -2089,10 +2126,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if names is None:
# If the dtype is uniform (before sizing strings)
- base = set([
+ base = {
c_type
for c, c_type in zip(converters, column_types)
- if c._checked])
+ if c._checked}
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 078608bbb..e3defdca2 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -8,17 +8,26 @@ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
+import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
+from numpy.core import overrides
+from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+@set_module('numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
@@ -29,6 +38,12 @@ class RankWarning(UserWarning):
"""
pass
+
+def _poly_dispatcher(seq_of_zeros):
+ return seq_of_zeros
+
+
+@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
@@ -145,6 +160,12 @@ def poly(seq_of_zeros):
return a
+
+def _roots_dispatcher(p):
+ return p
+
+
+@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
@@ -229,6 +250,12 @@ def roots(p):
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
+
+def _polyint_dispatcher(p, m=None, k=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
@@ -245,7 +272,7 @@ def polyint(p, m=1, k=None):
Parameters
----------
p : array_like or poly1d
- Polynomial to differentiate.
+ Polynomial to integrate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
@@ -322,6 +349,12 @@ def polyint(p, m=1, k=None):
return poly1d(val)
return val
+
+def _polyder_dispatcher(p, m=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
@@ -390,13 +423,23 @@ def polyder(p, m=1):
val = poly1d(val)
return val
+
+def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
+ return (x, y, w)
+
+
+@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
- the squared error.
+ the squared error in the order `deg`, `deg-1`, ... `0`.
+
+ The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
+ method is recommended for new code as it is more stable numerically. See
+ the documentation of the method for more information.
Parameters
----------
@@ -420,9 +463,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
- cov : bool, optional
- Return the estimate and the covariance matrix of the estimate
- If full is True, then cov is not returned.
+ cov : bool or str, optional
+ If given and not `False`, return not just the estimate but also its
+ covariance matrix. By default, the covariance are scaled by
+ chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable
+ except in a relative sense and everything is scaled such that the
+ reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``,
+ as is relevant for the case that the weights are 1/sigma**2, with
+ sigma known to be a reliable estimate of the uncertainty.
Returns
-------
@@ -494,9 +542,9 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
References
----------
.. [1] Wikipedia, "Curve fitting",
- http://en.wikipedia.org/wiki/Curve_fitting
+ https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
- http://en.wikipedia.org/wiki/Polynomial_interpolation
+ https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
@@ -590,14 +638,17 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
- # Some literature ignores the extra -2.0 factor in the denominator, but
- # it is included here because the covariance of Multivariate Student-T
- # (which is implied by a Bayesian uncertainty analysis) includes it.
- # Plus, it gives a slightly more conservative estimate of uncertainty.
- if len(x) <= order + 2:
- raise ValueError("the number of data points must exceed order + 2 "
- "for Bayesian estimate the covariance matrix")
- fac = resids / (len(x) - order - 2.0)
+ if cov == "unscaled":
+ fac = 1
+ else:
+ if len(x) <= order:
+ raise ValueError("the number of data points must exceed order "
+ "to scale the covariance matrix")
+ # note, this used to be: fac = resids / (len(x) - order - 2.0)
+ # it was deciced that the "- 2" (originally justified by "Bayesian
+ # uncertainty analysis") is not was the user expects
+ # (see gh-11196 and gh-11197)
+ fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
@@ -606,6 +657,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
return c
+def _polyval_dispatcher(p, x):
+ return (p, x)
+
+
+@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
@@ -675,6 +731,12 @@ def polyval(p, x):
y = y * x + p[i]
return y
+
+def _binary_op_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
@@ -735,6 +797,8 @@ def polyadd(a1, a2):
val = poly1d(val)
return val
+
+@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
@@ -782,6 +846,7 @@ def polysub(a1, a2):
return val
+@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
@@ -838,6 +903,12 @@ def polymul(a1, a2):
val = poly1d(val)
return val
+
+def _polydiv_dispatcher(u, v):
+ return (u, v)
+
+
+@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
@@ -931,6 +1002,7 @@ def _raise_power(astr, wrap=70):
return output + astr[n:]
+@set_module('numpy')
class poly1d(object):
"""
A one-dimensional polynomial class.
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index b6453d5a2..fcc0d9a7a 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -14,8 +14,10 @@ import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
+from numpy.core.overrides import array_function_dispatch
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
+from numpy.testing import suppress_warnings
if sys.version_info[0] < 3:
from future_builtins import zip
@@ -31,6 +33,11 @@ __all__ = [
]
+def _recursive_fill_fields_dispatcher(input, output):
+ return (input, output)
+
+
+@array_function_dispatch(_recursive_fill_fields_dispatcher)
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
@@ -96,7 +103,7 @@ def get_fieldspec(dtype):
fields = ((name, dtype.fields[name]) for name in dtype.names)
# keep any titles, if present
return [
- (name if len(f) == 2 else (f[2], name), f[0])
+ (name if len(f) == 2 else (f[2], name), f[0])
for name, f in fields
]
@@ -189,6 +196,11 @@ def flatten_descr(ndtype):
return tuple(descr)
+def _zip_dtype_dispatcher(seqarrays, flatten=None):
+ return seqarrays
+
+
+@array_function_dispatch(_zip_dtype_dispatcher)
def zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
@@ -205,6 +217,7 @@ def zip_dtype(seqarrays, flatten=False):
return np.dtype(newdtype)
+@array_function_dispatch(_zip_dtype_dispatcher)
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
@@ -297,6 +310,11 @@ def _izip_fields(iterable):
yield element
+def _izip_records_dispatcher(seqarrays, fill_value=None, flatten=None):
+ return seqarrays
+
+
+@array_function_dispatch(_izip_records_dispatcher)
def izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
@@ -357,6 +375,12 @@ def _fix_defaults(output, defaults=None):
return output
+def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
+ usemask=None, asrecarray=None):
+ return seqarrays
+
+
+@array_function_dispatch(_merge_arrays_dispatcher)
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
@@ -494,6 +518,11 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
return output
+def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
+ return (base,)
+
+
+@array_function_dispatch(_drop_fields_dispatcher)
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
@@ -583,6 +612,11 @@ def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+def _rec_drop_fields_dispatcher(base, drop_names):
+ return (base,)
+
+
+@array_function_dispatch(_rec_drop_fields_dispatcher)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
@@ -590,6 +624,11 @@ def rec_drop_fields(base, drop_names):
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
+def _rename_fields_dispatcher(base, namemapper):
+ return (base,)
+
+
+@array_function_dispatch(_rename_fields_dispatcher)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
@@ -629,6 +668,14 @@ def rename_fields(base, namemapper):
return base.view(newdtype)
+def _append_fields_dispatcher(base, names, data, dtypes=None,
+ fill_value=None, usemask=None, asrecarray=None):
+ yield base
+ for d in data:
+ yield d
+
+
+@array_function_dispatch(_append_fields_dispatcher)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
@@ -699,6 +746,13 @@ def append_fields(base, names, data, dtypes=None,
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
+ yield base
+ for d in data:
+ yield d
+
+
+@array_function_dispatch(_rec_append_fields_dispatcher)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
@@ -732,6 +786,12 @@ def rec_append_fields(base, names, data, dtypes=None):
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
+
+def _repack_fields_dispatcher(a, align=None, recurse=None):
+ return (a,)
+
+
+@array_function_dispatch(_repack_fields_dispatcher)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
@@ -811,6 +871,351 @@ def repack_fields(a, align=False, recurse=False):
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
+def _get_fields_and_offsets(dt, offset=0):
+ """
+ Returns a flat list of (dtype, count, offset) tuples of all the
+ scalar fields in the dtype "dt", including nested fields, in left
+ to right order.
+ """
+ fields = []
+ for name in dt.names:
+ field = dt.fields[name]
+ if field[0].names is None:
+ count = 1
+ for size in field[0].shape:
+ count *= size
+ fields.append((field[0], count, field[1] + offset))
+ else:
+ fields.extend(_get_fields_and_offsets(field[0], field[1] + offset))
+ return fields
+
+
+def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
+ casting=None):
+ return (arr,)
+
+@array_function_dispatch(_structured_to_unstructured_dispatcher)
+def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
+ """
+ Converts and n-D structured array into an (n+1)-D unstructured array.
+
+ The new array will have a new last dimension equal in size to the
+ number of field-elements of the input array. If not supplied, the output
+ datatype is determined from the numpy type promotion rules applied to all
+ the field datatypes.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ as a single field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Structured array or dtype to convert. Cannot contain object datatype.
+ dtype : dtype, optional
+ The dtype of the output unstructured array.
+ copy : bool, optional
+ See copy argument to `ndarray.astype`. If true, always return a copy.
+ If false, and `dtype` requirements are satisfied, a view is returned.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `ndarray.astype`. Controls what kind of data
+ casting may occur.
+
+ Returns
+ -------
+ unstructured : ndarray
+ Unstructured array with one more dimension.
+
+ Examples
+ --------
+
+ >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a
+ array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
+ (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+ >>> structured_to_unstructured(arr)
+ array([[0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.]])
+
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ fields = _get_fields_and_offsets(arr.dtype)
+ n_fields = len(fields)
+ dts, counts, offsets = zip(*fields)
+ names = ['f{}'.format(n) for n in range(n_fields)]
+
+ if dtype is None:
+ out_dtype = np.result_type(*[dt.base for dt in dts])
+ else:
+ out_dtype = dtype
+
+ # Use a series of views and casts to convert to an unstructured array:
+
+ # first view using flattened fields (doesn't work for object arrays)
+ # Note: dts may include a shape for subarrays
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': arr.dtype.itemsize})
+ with suppress_warnings() as sup: # until 1.16 (gh-12447)
+ sup.filter(FutureWarning, "Numpy has detected")
+ arr = arr.view(flattened_fields)
+
+ # next cast to a packed format with all fields converted to new dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(out_dtype, c) for c in counts]})
+ arr = arr.astype(packed_fields, copy=copy, casting=casting)
+
+ # finally is it safe to view the packed fields as the unstructured type
+ return arr.view((out_dtype, sum(counts)))
+
+def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
+ align=None, copy=None, casting=None):
+ return (arr,)
+
+@array_function_dispatch(_unstructured_to_structured_dispatcher)
+def unstructured_to_structured(arr, dtype=None, names=None, align=False,
+ copy=False, casting='unsafe'):
+ """
+ Converts and n-D unstructured array into an (n-1)-D structured array.
+
+ The last dimension of the input array is converted into a structure, with
+ number of field-elements equal to the size of the last dimension of the
+ input array. By default all output fields have the input array's dtype, but
+ an output structured dtype with an equal number of fields-elements can be
+ supplied instead.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ towards the number of field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Unstructured array or dtype to convert.
+ dtype : dtype, optional
+ The structured dtype of the output array
+ names : list of strings, optional
+ If dtype is not supplied, this specifies the field names for the output
+ dtype, in order. The field dtypes will be the same as the input array.
+ align : boolean, optional
+ Whether to create an aligned memory layout.
+ copy : bool, optional
+ See copy argument to `ndarray.astype`. If true, always return a copy.
+ If false, and `dtype` requirements are satisfied, a view is returned.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `ndarray.astype`. Controls what kind of data
+ casting may occur.
+
+ Returns
+ -------
+ structured : ndarray
+ Structured array with fewer dimensions.
+
+ Examples
+ --------
+
+ >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a = np.arange(20).reshape((4,5))
+ >>> a
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14],
+ [15, 16, 17, 18, 19]])
+ >>> unstructured_to_structured(a, dt)
+ array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+
+ """
+ if arr.shape == ():
+ raise ValueError('arr must have at least one dimension')
+ n_elem = arr.shape[-1]
+
+ if dtype is None:
+ if names is None:
+ names = ['f{}'.format(n) for n in range(n_elem)]
+ out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
+ fields = _get_fields_and_offsets(out_dtype)
+ dts, counts, offsets = zip(*fields)
+ else:
+ if names is not None:
+ raise ValueError("don't supply both dtype and names")
+ # sanity check of the input dtype
+ fields = _get_fields_and_offsets(dtype)
+ dts, counts, offsets = zip(*fields)
+ if n_elem != sum(counts):
+ raise ValueError('The length of the last dimension of arr must '
+ 'be equal to the number of fields in dtype')
+ out_dtype = dtype
+ if align and not out_dtype.isalignedstruct:
+ raise ValueError("align was True but dtype is not aligned")
+
+ names = ['f{}'.format(n) for n in range(len(fields))]
+
+ # Use a series of views and casts to convert to a structured array:
+
+ # first view as a packed structured array of one dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(arr.dtype, c) for c in counts]})
+ arr = np.ascontiguousarray(arr).view(packed_fields)
+
+ # next cast to an unpacked but flattened format with varied dtypes
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': out_dtype.itemsize})
+ arr = arr.astype(flattened_fields, copy=copy, casting=casting)
+
+ # finally view as the final nested dtype and remove the last axis
+ return arr.view(out_dtype)[..., 0]
+
+def _apply_along_fields_dispatcher(func, arr):
+ return (arr,)
+
+@array_function_dispatch(_apply_along_fields_dispatcher)
+def apply_along_fields(func, arr):
+ """
+ Apply function 'func' as a reduction across fields of a structured array.
+
+ This is similar to `apply_along_axis`, but treats the fields of a
+ structured array as an extra axis. The fields are all first cast to a
+ common type following the type-promotion rules from `numpy.result_type`
+ applied to the field's dtypes.
+
+ Parameters
+ ----------
+ func : function
+ Function to apply on the "field" dimension. This function must
+ support an `axis` argument, like np.mean, np.sum, etc.
+ arr : ndarray
+ Structured array for which to apply func.
+
+ Returns
+ -------
+ out : ndarray
+ Result of the recution operation
+
+ Examples
+ --------
+
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> apply_along_fields(np.mean, b)
+ array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
+ >>> apply_along_fields(np.mean, b[['x', 'z']])
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ uarr = structured_to_unstructured(arr)
+ return func(uarr, axis=-1)
+ # works and avoids axis requirement, but very, very slow:
+ #return np.apply_along_axis(func, -1, uarr)
+
+def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
+ return dst, src
+
+@array_function_dispatch(_assign_fields_by_name_dispatcher)
+def assign_fields_by_name(dst, src, zero_unassigned=True):
+ """
+ Assigns values from one structured array to another by field name.
+
+ Normally in numpy >= 1.14, assignment of one structured array to another
+ copies fields "by position", meaning that the first field from the src is
+ copied to the first field of the dst, and so on, regardless of field name.
+
+ This function instead copies "by field name", such that fields in the dst
+ are assigned from the identically named field in the src. This applies
+ recursively for nested structures. This is how structure assignment worked
+ in numpy >= 1.6 to <= 1.13.
+
+ Parameters
+ ----------
+ dst : ndarray
+ src : ndarray
+ The source and destination arrays during assignment.
+ zero_unassigned : bool, optional
+ If True, fields in the dst for which there was no matching
+ field in the src are filled with the value 0 (zero). This
+ was the behavior of numpy <= 1.13. If False, those fields
+ are not modified.
+ """
+
+ if dst.dtype.names is None:
+ dst[...] = src
+ return
+
+ for name in dst.dtype.names:
+ if name not in src.dtype.names:
+ if zero_unassigned:
+ dst[name] = 0
+ else:
+ assign_fields_by_name(dst[name], src[name],
+ zero_unassigned)
+
+def _require_fields_dispatcher(array, required_dtype):
+ return (array,)
+
+@array_function_dispatch(_require_fields_dispatcher)
+def require_fields(array, required_dtype):
+ """
+ Casts a structured array to a new dtype using assignment by field-name.
+
+ This function assigns from the old to the new array by name, so the
+ value of a field in the output array is the value of the field with the
+ same name in the source array. This has the effect of creating a new
+ ndarray containing only the fields "required" by the required_dtype.
+
+ If a field name in the required_dtype does not exist in the
+ input array, that field is created and set to 0 in the output array.
+
+ Parameters
+ ----------
+ a : ndarray
+ array to cast
+ required_dtype : dtype
+ datatype for output array
+
+ Returns
+ -------
+ out : ndarray
+ array with the new dtype, with field values copied from the fields in
+ the input array with the same name
+
+ Examples
+ --------
+
+ >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ >>> require_fields(a, [('b', 'f4'), ('c', 'u1')])
+ array([(1., 1), (1., 1), (1., 1), (1., 1)],
+ dtype=[('b', '<f4'), ('c', 'u1')])
+ >>> require_fields(a, [('b', 'f4'), ('newf', 'u1')])
+ array([(1., 0), (1., 0), (1., 0), (1., 0)],
+ dtype=[('b', '<f4'), ('newf', 'u1')])
+
+ """
+ out = np.empty(array.shape, dtype=required_dtype)
+ assign_fields_by_name(out, array)
+ return out
+
+
+def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
+ asrecarray=None, autoconvert=None):
+ return arrays
+
+
+@array_function_dispatch(_stack_arrays_dispatcher)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
@@ -897,6 +1302,12 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
usemask=usemask, asrecarray=asrecarray)
+def _find_duplicates_dispatcher(
+ a, key=None, ignoremask=None, return_index=None):
+ return (a,)
+
+
+@array_function_dispatch(_find_duplicates_dispatcher)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
@@ -951,8 +1362,15 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
return duplicates
+def _join_by_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None, usemask=None, asrecarray=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_join_by_dispatcher)
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
- defaults=None, usemask=True, asrecarray=False):
+ defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
@@ -1130,6 +1548,13 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
return _fix_output(_fix_defaults(output, defaults), **kwargs)
+def _rec_join_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_rec_join_dispatcher)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index f1838fee6..9ca006841 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -20,6 +20,7 @@ from __future__ import division, absolute_import, print_function
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
+from numpy.core.overrides import array_function_dispatch
from numpy.lib.type_check import isreal
@@ -94,6 +95,7 @@ def _tocomplex(arr):
else:
return arr.astype(nt.cdouble)
+
def _fix_real_lt_zero(x):
"""Convert `x` to complex if it has real, negative components.
@@ -121,6 +123,7 @@ def _fix_real_lt_zero(x):
x = _tocomplex(x)
return x
+
def _fix_int_lt_zero(x):
"""Convert `x` to double if it has real, negative components.
@@ -147,6 +150,7 @@ def _fix_int_lt_zero(x):
x = x * 1.0
return x
+
def _fix_real_abs_gt_1(x):
"""Convert `x` to complex if it has real components x_i with abs(x_i)>1.
@@ -173,6 +177,12 @@ def _fix_real_abs_gt_1(x):
x = _tocomplex(x)
return x
+
+def _unary_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_unary_dispatcher)
def sqrt(x):
"""
Compute the square root of x.
@@ -215,6 +225,8 @@ def sqrt(x):
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
+
+@array_function_dispatch(_unary_dispatcher)
def log(x):
"""
Compute the natural logarithm of `x`.
@@ -261,6 +273,8 @@ def log(x):
x = _fix_real_lt_zero(x)
return nx.log(x)
+
+@array_function_dispatch(_unary_dispatcher)
def log10(x):
"""
Compute the logarithm base 10 of `x`.
@@ -309,6 +323,12 @@ def log10(x):
x = _fix_real_lt_zero(x)
return nx.log10(x)
+
+def _logn_dispatcher(n, x):
+ return (n, x,)
+
+
+@array_function_dispatch(_logn_dispatcher)
def logn(n, x):
"""
Take log base n of x.
@@ -318,8 +338,8 @@ def logn(n, x):
Parameters
----------
- n : int
- The base in which the log is taken.
+ n : array_like
+ The integer base(s) in which the log is taken.
x : array_like
The value(s) whose log base `n` is (are) required.
@@ -343,6 +363,8 @@ def logn(n, x):
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
+
+@array_function_dispatch(_unary_dispatcher)
def log2(x):
"""
Compute the logarithm base 2 of `x`.
@@ -389,6 +411,12 @@ def log2(x):
x = _fix_real_lt_zero(x)
return nx.log2(x)
+
+def _power_dispatcher(x, p):
+ return (x, p)
+
+
+@array_function_dispatch(_power_dispatcher)
def power(x, p):
"""
Return x to the power p, (x**p).
@@ -432,6 +460,8 @@ def power(x, p):
p = _fix_int_lt_zero(p)
return nx.power(x, p)
+
+@array_function_dispatch(_unary_dispatcher)
def arccos(x):
"""
Compute the inverse cosine of x.
@@ -475,6 +505,8 @@ def arccos(x):
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
+
+@array_function_dispatch(_unary_dispatcher)
def arcsin(x):
"""
Compute the inverse sine of x.
@@ -519,6 +551,8 @@ def arcsin(x):
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
+
+@array_function_dispatch(_unary_dispatcher)
def arctanh(x):
"""
Compute the inverse hyperbolic tangent of `x`.
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 65104115a..f56c4f4db 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+import functools
import warnings
import numpy.core.numeric as _nx
@@ -8,7 +9,10 @@ from numpy.core.numeric import (
)
from numpy.core.fromnumeric import product, reshape, transpose
from numpy.core.multiarray import normalize_axis_index
+from numpy.core import overrides
from numpy.core import vstack, atleast_3d
+from numpy.core.shape_base import (
+ _arrays_for_stack_dispatcher, _warn_for_nonsequence)
from numpy.lib.index_tricks import ndindex
from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
@@ -21,6 +25,10 @@ __all__ = [
]
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
def _make_along_axis_idx(arr_shape, indices, axis):
# compute dimensions to iterate over
if not _nx.issubdtype(indices.dtype, _nx.integer):
@@ -44,6 +52,11 @@ def _make_along_axis_idx(arr_shape, indices, axis):
return tuple(fancy_index)
+def _take_along_axis_dispatcher(arr, indices, axis):
+ return (arr, indices)
+
+
+@array_function_dispatch(_take_along_axis_dispatcher)
def take_along_axis(arr, indices, axis):
"""
Take values from the input array by matching 1d index and data slices.
@@ -160,6 +173,11 @@ def take_along_axis(arr, indices, axis):
return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+def _put_along_axis_dispatcher(arr, indices, values, axis):
+ return (arr, indices, values)
+
+
+@array_function_dispatch(_put_along_axis_dispatcher)
def put_along_axis(arr, indices, values, axis):
"""
Put values into the destination array by matching 1d index and data slices.
@@ -245,6 +263,11 @@ def put_along_axis(arr, indices, values, axis):
arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
+ return (arr,)
+
+
+@array_function_dispatch(_apply_along_axis_dispatcher)
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
@@ -392,6 +415,11 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
return res.__array_wrap__(out_arr)
+def _apply_over_axes_dispatcher(func, a, axes):
+ return (a,)
+
+
+@array_function_dispatch(_apply_over_axes_dispatcher)
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
@@ -474,9 +502,15 @@ def apply_over_axes(func, a, axes):
val = res
else:
raise ValueError("function is not returning "
- "an array of the correct shape")
+ "an array of the correct shape")
return val
+
+def _expand_dims_dispatcher(a, axis):
+ return (a,)
+
+
+@array_function_dispatch(_expand_dims_dispatcher)
def expand_dims(a, axis):
"""
Expand the shape of an array.
@@ -536,7 +570,11 @@ def expand_dims(a, axis):
True
"""
- a = asarray(a)
+ if isinstance(a, matrix):
+ a = asarray(a)
+ else:
+ a = asanyarray(a)
+
shape = a.shape
if axis > a.ndim or axis < -a.ndim - 1:
# 2017-05-17, 1.13.0
@@ -550,8 +588,15 @@ def expand_dims(a, axis):
# axis = normalize_axis_index(axis, a.ndim + 1)
return a.reshape(shape[:axis] + (1,) + shape[axis:])
+
row_stack = vstack
+
+def _column_stack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_column_stack_dispatcher)
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
@@ -585,6 +630,7 @@ def column_stack(tup):
[3, 4]])
"""
+ _warn_for_nonsequence(tup)
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
@@ -593,6 +639,12 @@ def column_stack(tup):
arrays.append(arr)
return _nx.concatenate(arrays, 1)
+
+def _dstack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_dstack_dispatcher)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
@@ -643,8 +695,10 @@ def dstack(tup):
[[3, 4]]])
"""
+ _warn_for_nonsequence(tup)
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
+
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if _nx.ndim(sub_arys[i]) == 0:
@@ -653,6 +707,12 @@ def _replace_zero_by_x_arrays(sub_arys):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
+
+def _array_split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_array_split_dispatcher)
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
@@ -684,7 +744,7 @@ def array_split(ary, indices_or_sections, axis=0):
except AttributeError:
Ntotal = len(ary)
try:
- # handle scalar case.
+ # handle array case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError:
@@ -696,7 +756,7 @@ def array_split(ary, indices_or_sections, axis=0):
section_sizes = ([0] +
extras * [Neach_section+1] +
(Nsections-extras) * [Neach_section])
- div_points = _nx.array(section_sizes).cumsum()
+ div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
@@ -708,7 +768,12 @@ def array_split(ary, indices_or_sections, axis=0):
return sub_arys
-def split(ary,indices_or_sections,axis=0):
+def _split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
@@ -785,6 +850,12 @@ def split(ary,indices_or_sections,axis=0):
res = array_split(ary, indices_or_sections, axis)
return res
+
+def _hvdsplit_dispatcher(ary, indices_or_sections):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
@@ -847,6 +918,8 @@ def hsplit(ary, indices_or_sections):
else:
return split(ary, indices_or_sections, 0)
+
+@array_function_dispatch(_hvdsplit_dispatcher)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
@@ -898,6 +971,8 @@ def vsplit(ary, indices_or_sections):
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
+
+@array_function_dispatch(_hvdsplit_dispatcher)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
@@ -967,6 +1042,12 @@ def get_array_wrap(*args):
return wrappers[-1][-1]
return None
+
+def _kron_dispatcher(a, b):
+ return (a, b)
+
+
+@array_function_dispatch(_kron_dispatcher)
def kron(a, b):
"""
Kronecker product of two arrays.
@@ -1066,6 +1147,11 @@ def kron(a, b):
return result
+def _tile_dispatcher(A, reps):
+ return (A, reps)
+
+
+@array_function_dispatch(_tile_dispatcher)
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 2abe5cdd1..0dc36e41c 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -8,6 +8,7 @@ NumPy reference guide.
from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.core.overrides import array_function_dispatch
__all__ = ['broadcast_to', 'broadcast_arrays']
@@ -135,6 +136,11 @@ def _broadcast_to(array, shape, subok, readonly):
return result
+def _broadcast_to_dispatcher(array, shape, subok=None):
+ return (array,)
+
+
+@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
@@ -195,6 +201,11 @@ def _broadcast_shape(*args):
return b.shape
+def _broadcast_arrays_dispatcher(*args, **kwargs):
+ return args
+
+
+@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
def broadcast_arrays(*args, **kwargs):
"""
Broadcast any number of arrays against each other.
@@ -219,23 +230,19 @@ def broadcast_arrays(*args, **kwargs):
Examples
--------
>>> x = np.array([[1,2,3]])
- >>> y = np.array([[1],[2],[3]])
+ >>> y = np.array([[4],[5]])
>>> np.broadcast_arrays(x, y)
[array([[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]]), array([[1, 1, 1],
- [2, 2, 2],
- [3, 3, 3]])]
+ [1, 2, 3]]), array([[4, 4, 4],
+ [5, 5, 5]])]
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
- [1, 2, 3],
- [1, 2, 3]]), array([[1, 1, 1],
- [2, 2, 2],
- [3, 3, 3]])]
+ [1, 2, 3]]), array([[4, 4, 4],
+ [5, 5, 5]])]
"""
# nditer is not used here to avoid the limit of 32 arrays.
@@ -246,7 +253,7 @@ def broadcast_arrays(*args, **kwargs):
subok = kwargs.pop('subok', False)
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword '
- 'argument {!r}'.format(kwargs.keys()[0]))
+ 'argument {!r}'.format(list(kwargs.keys())[0]))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index 85788941c..8eac16b58 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -8,7 +8,7 @@ from shutil import rmtree
import numpy.lib._datasource as datasource
from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_warns, SkipTest
+ assert_, assert_equal, assert_raises, assert_warns
)
if sys.version_info[0] >= 3:
@@ -137,7 +137,7 @@ class TestDataSourceOpen(object):
import gzip
except ImportError:
# We don't have the gzip capabilities to test.
- raise SkipTest
+ pytest.skip()
# Test datasource's internal file_opener for Gzip files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
fp = gzip.open(filepath, 'w')
@@ -153,7 +153,7 @@ class TestDataSourceOpen(object):
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
- raise SkipTest
+ pytest.skip()
# Test datasource's internal file_opener for BZip2 files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
@@ -170,7 +170,7 @@ class TestDataSourceOpen(object):
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
- raise SkipTest
+ pytest.skip()
# Test datasource's internal file_opener for BZip2 files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
@@ -361,3 +361,18 @@ class TestOpenFunc(object):
fp = datasource.open(local_file)
assert_(fp)
fp.close()
+
+def test_del_attr_handling():
+ # DataSource __del__ can be called
+ # even if __init__ fails when the
+ # Exception object is caught by the
+ # caller as happens in refguide_check
+ # is_deprecated() function
+
+ ds = datasource.DataSource()
+ # simulate failed __init__ by removing key attribute
+ # produced within __init__ and expected by __del__
+ del ds._istmpdest
+ # should not raise an AttributeError if __del__
+ # gracefully handles failed __init__:
+ ds.__del__()
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index b4888f1bd..e04fdc808 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import sys
import time
from datetime import date
@@ -246,7 +245,7 @@ class TestStringConverter(object):
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(
- converter.missing_values, set(['', 'N/A']))
+ converter.missing_values, {'', 'N/A'})
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 8ba0370b0..20f6e4a1b 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -3,9 +3,97 @@
"""
from __future__ import division, absolute_import, print_function
+import pytest
+
import numpy as np
-from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,)
+from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
+ assert_equal)
from numpy.lib import pad
+from numpy.lib.arraypad import _as_pairs
+
+
+class TestAsPairs(object):
+
+ def test_single_value(self):
+ """Test casting for a single value."""
+ expected = np.array([[3, 3]] * 10)
+ for x in (3, [3], [[3]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # Test with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(obj, 10),
+ np.array([[obj, obj]] * 10)
+ )
+
+ def test_two_values(self):
+ """Test proper casting for two different values."""
+ # Broadcasting in the first dimension with numbers
+ expected = np.array([[3, 4]] * 10)
+ for x in ([3, 4], [[3, 4]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # and with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(["a", obj], 10),
+ np.array([["a", obj]] * 10)
+ )
+
+ # Broadcasting in the second / last dimension with numbers
+ assert_equal(
+ _as_pairs([[3], [4]], 2),
+ np.array([[3, 3], [4, 4]])
+ )
+ # and with dtype=object
+ assert_equal(
+ _as_pairs([["a"], [obj]], 2),
+ np.array([["a", "a"], [obj, obj]])
+ )
+
+ def test_with_none(self):
+ expected = ((None, None), (None, None), (None, None))
+ assert_equal(
+ _as_pairs(None, 3, as_index=False),
+ expected
+ )
+ assert_equal(
+ _as_pairs(None, 3, as_index=True),
+ expected
+ )
+
+ def test_pass_through(self):
+ """Test if `x` already matching desired output are passed through."""
+ expected = np.arange(12).reshape((6, 2))
+ assert_equal(
+ _as_pairs(expected, 6),
+ expected
+ )
+
+ def test_as_index(self):
+ """Test results if `as_index=True`."""
+ assert_equal(
+ _as_pairs([2.6, 3.3], 10, as_index=True),
+ np.array([[3, 3]] * 10, dtype=np.intp)
+ )
+ assert_equal(
+ _as_pairs([2.6, 4.49], 10, as_index=True),
+ np.array([[3, 4]] * 10, dtype=np.intp)
+ )
+ for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
+ [[1, 2]] * 9 + [[1, -2]]):
+ with pytest.raises(ValueError, match="negative values"):
+ _as_pairs(x, 10, as_index=True)
+
+ def test_exceptions(self):
+ """Ensure faulty usage is discovered."""
+ with pytest.raises(ValueError, match="more dimensions than allowed"):
+ _as_pairs([[[3]]], 10)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs([[1, 2], [3, 4]], 3)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs(np.ones((2, 3)), 3)
class TestConditionalShortcuts(object):
@@ -344,6 +432,20 @@ class TestStatistic(object):
)
assert_array_equal(a, b)
+ @pytest.mark.parametrize("mode", [
+ pytest.param("mean", marks=pytest.mark.xfail(reason="gh-11216")),
+ "median",
+ "minimum",
+ "maximum"
+ ])
+ def test_same_prepend_append(self, mode):
+ """ Test that appended and prepended values are equal """
+ # This test is constructed to trigger floating point rounding errors in
+ # a way that caused gh-11216 for mode=='mean'
+ a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)
+ a = np.pad(a, (1, 1), mode)
+ assert_equal(a[0], a[-1])
+
class TestConstant(object):
def test_check_constant(self):
@@ -502,6 +604,22 @@ class TestConstant(object):
expected = np.full(7, int64_max, dtype=np.int64)
assert_array_equal(test, expected)
+ def test_check_object_array(self):
+ arr = np.empty(1, dtype=object)
+ obj_a = object()
+ arr[0] = obj_a
+ obj_b = object()
+ obj_c = object()
+ arr = np.pad(arr, pad_width=1, mode='constant',
+ constant_values=(obj_b, obj_c))
+
+ expected = np.empty((3,), dtype=object)
+ expected[0] = obj_b
+ expected[1] = obj_a
+ expected[2] = obj_c
+
+ assert_array_equal(arr, expected)
+
class TestLinearRamp(object):
def test_check_simple(self):
@@ -542,6 +660,25 @@ class TestLinearRamp(object):
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
+ @pytest.mark.xfail(exceptions=(AssertionError,))
+ def test_object_array(self):
+ from fractions import Fraction
+ arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
+ actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
+
+ # deliberately chosen to have a non-power-of-2 denominator such that
+ # rounding to floats causes a failure.
+ expected = np.array([
+ Fraction( 0, 12),
+ Fraction( 3, 12),
+ Fraction( 6, 12),
+ Fraction(-6, 12),
+ Fraction(-4, 12),
+ Fraction(-2, 12),
+ Fraction(-0, 12),
+ ])
+ assert_equal(actual, expected)
+
class TestReflect(object):
def test_check_simple(self):
@@ -887,6 +1024,11 @@ class TestWrap(object):
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
+ def test_pad_with_zero(self):
+ a = np.ones((3, 5))
+ b = np.pad(a, (0, 5), mode="wrap")
+ assert_array_equal(a, b[:-5, :-5])
+
class TestStatLen(object):
def test_check_simple(self):
@@ -1009,6 +1151,21 @@ class TestUnicodeInput(object):
assert_array_equal(a, b)
+class TestObjectInput(object):
+ def test_object_input(self):
+ # Regression test for issue gh-11395.
+ a = np.full((4, 3), None)
+ pad_amt = ((2, 3), (3, 2))
+ b = np.full((9, 8), None)
+ modes = ['edge',
+ 'symmetric',
+ 'reflect',
+ 'wrap',
+ ]
+ for mode in modes:
+ assert_array_equal(pad(a, pad_amt, mode=mode), b)
+
+
class TestValueError1(object):
def test_check_simple(self):
arr = np.arange(30)
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index c76afb8e5..a17fc66e5 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -4,12 +4,14 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-import sys
-from numpy.testing import assert_array_equal, assert_equal, assert_raises
+from numpy.testing import (assert_array_equal, assert_equal,
+ assert_raises, assert_raises_regex)
from numpy.lib.arraysetops import (
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
+import pytest
+
class TestSetOps(object):
@@ -125,6 +127,68 @@ class TestSetOps(object):
assert_array_equal([7,1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5,6,1], ediff1d(two_elem, to_begin=[5,6]))
+ @pytest.mark.parametrize("ary, prepend, append", [
+ # should fail because trying to cast
+ # np.nan standard floating point value
+ # into an integer array:
+ (np.array([1, 2, 3], dtype=np.int64),
+ None,
+ np.nan),
+ # should fail because attempting
+ # to downcast to smaller int type:
+ (np.array([1, 2, 3], dtype=np.int32),
+ np.array([5, 7, 2], dtype=np.int64),
+ None),
+ # should fail because attempting to cast
+ # two special floating point values
+ # to integers (on both sides of ary):
+ (np.array([1., 3., 9.], dtype=np.int8),
+ np.nan,
+ np.nan),
+ ])
+ def test_ediff1d_forbidden_type_casts(self, ary, prepend, append):
+ # verify resolution of gh-11490
+
+ # specifically, raise an appropriate
+ # Exception when attempting to append or
+ # prepend with an incompatible type
+ msg = 'must be compatible'
+ with assert_raises_regex(TypeError, msg):
+ ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+
+ @pytest.mark.parametrize("ary,"
+ "prepend,"
+ "append,"
+ "expected", [
+ (np.array([1, 2, 3], dtype=np.int16),
+ 0,
+ None,
+ np.array([0, 1, 1], dtype=np.int16)),
+ (np.array([1, 2, 3], dtype=np.int32),
+ 0,
+ 0,
+ np.array([0, 1, 1, 0], dtype=np.int32)),
+ (np.array([1, 2, 3], dtype=np.int64),
+ 3,
+ -9,
+ np.array([3, 1, 1, -9], dtype=np.int64)),
+ ])
+ def test_ediff1d_scalar_handling(self,
+ ary,
+ prepend,
+ append,
+ expected):
+ # maintain backwards-compatibility
+ # of scalar prepend / append behavior
+ # in ediff1d following fix for gh-11490
+ actual = np.ediff1d(ary=ary,
+ to_end=append,
+ to_begin=prepend)
+ assert_equal(actual, expected)
+
+
def test_isin(self):
# the tests for in1d cover most of isin's behavior
# if in1d is removed, would need to change those tests to test
@@ -323,6 +387,13 @@ class TestSetOps(object):
a = np.array((), np.uint32)
assert_equal(setdiff1d(a, []).dtype, np.uint32)
+ def test_setdiff1d_unique(self):
+ a = np.array([3, 2, 1])
+ b = np.array([7, 5, 2])
+ expected = np.array([3, 1])
+ actual = setdiff1d(a, b, assume_unique=True)
+ assert_equal(actual, expected)
+
def test_setdiff1d_char_array(self):
a = np.array(['a', 'b', 'c'])
b = np.array(['a', 'b', 's'])
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index fd227595a..077507082 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -286,7 +286,7 @@ from io import BytesIO
import numpy as np
from numpy.testing import (
- assert_, assert_array_equal, assert_raises, raises, SkipTest
+ assert_, assert_array_equal, assert_raises, assert_raises_regex,
)
from numpy.lib import format
@@ -523,6 +523,30 @@ def test_compressed_roundtrip():
assert_array_equal(arr, arr1)
+# aligned
+dt1 = np.dtype('i1, i4, i1', align=True)
+# non-aligned, explicit offsets
+dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6]})
+# nested struct-in-struct
+dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})
+# field with '' name
+dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3})
+# titles
+dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6], 'titles': ['aa', 'bb']})
+
+@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5])
+def test_load_padded_dtype(dt):
+ arr = np.zeros(3, dt)
+ for i in range(3):
+ arr[i] = i + 5
+ npz_file = os.path.join(tempdir, 'aligned.npz')
+ np.savez(npz_file, arr=arr)
+ arr1 = np.load(npz_file)['arr']
+ assert_array_equal(arr, arr1)
+
+
def test_python2_python3_interoperability():
if sys.version_info[0] >= 3:
fname = 'win64python2.npy'
@@ -532,7 +556,6 @@ def test_python2_python3_interoperability():
data = np.load(path)
assert_array_equal(data, np.ones(2))
-
def test_pickle_python2_python3():
# Test that loading object arrays saved on Python 2 works both on
# Python 2 and Python 3 and vice versa
@@ -678,12 +701,9 @@ def test_write_version():
(255, 255),
]
for version in bad_versions:
- try:
+ with assert_raises_regex(ValueError,
+ 'we only support format version.*'):
format.write_array(f, arr, version=version)
- except ValueError:
- pass
- else:
- raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,))
bad_version_magic = [
@@ -809,7 +829,7 @@ def test_bad_header():
def test_large_file_support():
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
- raise SkipTest("Unknown if Windows has sparse filesystems")
+ pytest.skip("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
tf_name = os.path.join(tempdir, 'sparse_file')
try:
@@ -819,7 +839,7 @@ def test_large_file_support():
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
except Exception:
- raise SkipTest("Could not create 5GB large file")
+ pytest.skip("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
f.seek(5368709120)
@@ -841,7 +861,7 @@ def test_large_archive():
try:
a = np.empty((2**30, 2), dtype=np.uint8)
except MemoryError:
- raise SkipTest("Could not create large file")
+ pytest.skip("Could not create large file")
fname = os.path.join(tempdir, "large_archive")
@@ -852,3 +872,10 @@ def test_large_archive():
new_a = np.load(f)["arr"]
assert_(a.shape == new_a.shape)
+
+
+def test_empty_npz():
+ # Test for gh-9989
+ fname = os.path.join(tempdir, "nothing.npz")
+ np.savez(fname)
+ np.load(fname)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 4103a9eb3..3d4b0e3b2 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -11,17 +11,15 @@ from numpy import ma
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose,
- assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
- HAS_REFCOUNT,
+ assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT,
)
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,
delete, diff, digitize, extract, flipud, gradient, hamming, hanning,
- histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort,
- piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros,
- unwrap, unique, vectorize
+ i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90,
+ select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize
)
from numpy.compat import long
@@ -734,6 +732,58 @@ class TestDiff(object):
assert_array_equal(out3.mask, [[], [], [], [], []])
assert_(type(out3) is type(x))
+ def test_prepend(self):
+ x = np.arange(5) + 1
+ assert_array_equal(diff(x, prepend=0), np.ones(5))
+ assert_array_equal(diff(x, prepend=[0]), np.ones(5))
+ assert_array_equal(np.cumsum(np.diff(x, prepend=0)), x)
+ assert_array_equal(diff(x, prepend=[-1, 0]), np.ones(6))
+
+ x = np.arange(4).reshape(2, 2)
+ result = np.diff(x, axis=1, prepend=0)
+ expected = [[0, 1], [2, 1]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=1, prepend=[[0], [0]])
+ assert_array_equal(result, expected)
+
+ result = np.diff(x, axis=0, prepend=0)
+ expected = [[0, 1], [2, 2]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=0, prepend=[[0, 0]])
+ assert_array_equal(result, expected)
+
+ assert_raises(ValueError, np.diff, x, prepend=np.zeros((3,3)))
+
+ assert_raises(np.AxisError, diff, x, prepend=0, axis=3)
+
+ def test_append(self):
+ x = np.arange(5)
+ result = diff(x, append=0)
+ expected = [1, 1, 1, 1, -4]
+ assert_array_equal(result, expected)
+ result = diff(x, append=[0])
+ assert_array_equal(result, expected)
+ result = diff(x, append=[0, 2])
+ expected = expected + [2]
+ assert_array_equal(result, expected)
+
+ x = np.arange(4).reshape(2, 2)
+ result = np.diff(x, axis=1, append=0)
+ expected = [[1, -1], [1, -3]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=1, append=[[0], [0]])
+ assert_array_equal(result, expected)
+
+ result = np.diff(x, axis=0, append=0)
+ expected = [[2, 2], [-2, -3]]
+ assert_array_equal(result, expected)
+ result = np.diff(x, axis=0, append=[[0, 0]])
+ assert_array_equal(result, expected)
+
+ assert_raises(ValueError, np.diff, x, append=np.zeros((3,3)))
+
+ assert_raises(np.AxisError, diff, x, append=0, axis=3)
+
class TestDelete(object):
@@ -1043,6 +1093,16 @@ class TestAngle(object):
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
+ def test_subclass(self):
+ x = np.ma.array([1 + 3j, 1, np.sqrt(2)/2 * (1 + 1j)])
+ x[1] = np.ma.masked
+ expected = np.ma.array([np.arctan(3.0 / 1.0), 0, np.arctan(1.0)])
+ expected[1] = np.ma.masked
+ actual = angle(x)
+ assert_equal(type(actual), type(expected))
+ assert_equal(actual.mask, expected.mask)
+ assert_equal(actual, expected)
+
class TestTrimZeros(object):
@@ -1510,6 +1570,18 @@ class TestDigitize(object):
assert_(not isinstance(digitize(b, a, False), A))
assert_(not isinstance(digitize(b, a, True), A))
+ def test_large_integers_increasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x - 1, x + 1]), 1)
+
+ @pytest.mark.xfail(
+ reason="gh-11022: np.core.multiarray._monoticity loses precision")
+ def test_large_integers_decreasing(self):
+ # gh-11022
+ x = 2**54 # loses precision in a float
+ assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
+
class TestUnwrap(object):
@@ -2237,6 +2309,14 @@ class TestInterp(object):
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
+ def test_non_finite_behavior(self):
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.inf, np.inf, 4])
+ fp = [1, 2, np.nan, 4]
+ assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
+
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
@@ -2251,6 +2331,12 @@ class TestInterp(object):
x0 = 2.0
right = 2 + 3.0j
assert_almost_equal(np.interp(x0, x, y, right=right), right)
+ # test complex non finite
+ x = [1, 2, 2.5, 3, 4]
+ xp = [1, 2, 3, 4]
+ fp = [1, 2+1j, np.inf, 4]
+ y = [1, 2+1j, np.inf+0.5j, np.inf, 4]
+ assert_almost_equal(np.interp(x, xp, fp), y)
# test complex periodic
x = [-180, -170, -185, 185, -10, -5, 0, 365]
xp = [190, -190, 350, -350]
@@ -3026,3 +3112,29 @@ class TestAdd_newdoc(object):
assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
+
+class TestSortComplex(object):
+
+ @pytest.mark.parametrize("type_in, type_out", [
+ ('l', 'D'),
+ ('h', 'F'),
+ ('H', 'F'),
+ ('b', 'F'),
+ ('B', 'F'),
+ ('g', 'G'),
+ ])
+ def test_sort_real(self, type_in, type_out):
+ # sort_complex() type casting for real input types
+ a = np.array([5, 3, 6, 2, 1], dtype=type_in)
+ actual = np.sort_complex(a)
+ expected = np.sort(a).astype(type_out)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
+
+ def test_sort_complex(self):
+ # sort_complex() handling of complex input
+ a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')
+ expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')
+ actual = np.sort_complex(a)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index a71060a46..c96b01d42 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -6,7 +6,7 @@ from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose,
- assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ assert_array_max_ulp, assert_raises_regex, suppress_warnings,
)
@@ -119,6 +119,13 @@ class TestHistogram(object):
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
assert_equal(h, w[1:-1])
+ def test_arr_weights_mismatch(self):
+ a = np.arange(10) + .5
+ w = np.arange(11) + .5
+ with assert_raises_regex(ValueError, "same shape as"):
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
+
+
def test_type(self):
# Check the type of the returned histogram
a = np.arange(10) + .5
@@ -242,6 +249,12 @@ class TestHistogram(object):
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
+ def test_invalid_range(self):
+ # start of range must be < end of range
+ vals = np.linspace(0.0, 1.0, num=100)
+ with assert_raises_regex(ValueError, "max must be larger than"):
+ np.histogram(vals, range=[0.1, 0.01])
+
def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
@@ -258,6 +271,13 @@ class TestHistogram(object):
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
assert_equal(hist[-1], 1)
+ def test_bin_array_dims(self):
+ # gracefully handle bins object > 1 dimension
+ vals = np.linspace(0.0, 1.0, num=100)
+ bins = np.array([[0, 0.5], [0.6, 1.0]])
+ with assert_raises_regex(ValueError, "must be 1d"):
+ np.histogram(vals, bins=bins)
+
def test_unsigned_monotonicity_check(self):
# Ensures ValueError is raised if bins not increasing monotonically
# when bins contain unsigned values (see #9222)
@@ -269,13 +289,13 @@ class TestHistogram(object):
def test_object_array_of_0d(self):
# gh-7864
assert_raises(ValueError,
- histogram, [np.array([0.4]) for i in range(10)] + [-np.inf])
+ histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
assert_raises(ValueError,
- histogram, [np.array([0.4]) for i in range(10)] + [np.inf])
+ histogram, [np.array(0.4) for i in range(10)] + [np.inf])
# these should not crash
- np.histogram([np.array([0.5]) for i in range(10)] + [.500000000000001])
- np.histogram([np.array([0.5]) for i in range(10)] + [.5])
+ np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001])
+ np.histogram([np.array(0.5) for i in range(10)] + [.5])
def test_some_nan_values(self):
# gh-7503
@@ -411,7 +431,7 @@ class TestHistogramOptimBinNums(object):
def test_empty(self):
estimator_list = ['fd', 'scott', 'rice', 'sturges',
- 'doane', 'sqrt', 'auto']
+ 'doane', 'sqrt', 'auto', 'stone']
# check it can deal with empty data
for estimator in estimator_list:
a, b = histogram([], bins=estimator)
@@ -427,11 +447,11 @@ class TestHistogramOptimBinNums(object):
# Some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
- 'doane': 8, 'sqrt': 8, 'auto': 7},
+ 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
- 'doane': 12, 'sqrt': 23, 'auto': 10},
+ 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
- 'doane': 17, 'sqrt': 71, 'auto': 17}}
+ 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
for testlen, expectedResults in basic_test.items():
# Create some sort of non uniform data to test with
@@ -451,11 +471,11 @@ class TestHistogramOptimBinNums(object):
precalculated.
"""
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
- 'doane': 1, 'sqrt': 1},
+ 'doane': 1, 'sqrt': 1, 'stone': 1},
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
- 'doane': 1, 'sqrt': 2},
+ 'doane': 1, 'sqrt': 2, 'stone': 1},
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
- 'doane': 3, 'sqrt': 2}}
+ 'doane': 3, 'sqrt': 2, 'stone': 1}}
for testlen, expectedResults in small_dat.items():
testdat = np.arange(testlen)
@@ -479,7 +499,7 @@ class TestHistogramOptimBinNums(object):
"""
novar_dataset = np.ones(100)
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
- 'doane': 1, 'sqrt': 1, 'auto': 1}
+ 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
for estimator, numbins in novar_resultdict.items():
a, b = np.histogram(novar_dataset, estimator)
@@ -518,12 +538,32 @@ class TestHistogramOptimBinNums(object):
xcenter = np.linspace(-10, 10, 50)
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
- outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11}
+ outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
for estimator, numbins in outlier_resultdict.items():
a, b = np.histogram(outlier_dataset, estimator)
assert_equal(len(a), numbins)
+ def test_scott_vs_stone(self):
+ """Verify that Scott's rule and Stone's rule converges for normally distributed data"""
+
+ def nbins_ratio(seed, size):
+ rng = np.random.RandomState(seed)
+ x = rng.normal(loc=0, scale=2, size=size)
+ a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
+ return a / (a + b)
+
+ ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
+ for seed in range(256)]
+
+ # the average difference between the two methods decreases as the dataset size increases.
+ assert_almost_equal(abs(np.mean(ll, axis=0) - 0.5),
+ [0.1065248,
+ 0.0968844,
+ 0.0331818,
+ 0.0178057],
+ decimal=3)
+
def test_simple_range(self):
"""
Straightforward testing with a mixture of linspace data (for
@@ -535,11 +575,11 @@ class TestHistogramOptimBinNums(object):
# Checking for the correct number of bins
basic_test = {
50: {'fd': 8, 'scott': 8, 'rice': 15,
- 'sturges': 14, 'auto': 14},
+ 'sturges': 14, 'auto': 14, 'stone': 8},
500: {'fd': 15, 'scott': 16, 'rice': 32,
- 'sturges': 20, 'auto': 20},
+ 'sturges': 20, 'auto': 20, 'stone': 80},
5000: {'fd': 33, 'scott': 33, 'rice': 69,
- 'sturges': 27, 'auto': 33}
+ 'sturges': 27, 'auto': 33, 'stone': 80}
}
for testlen, expectedResults in basic_test.items():
@@ -774,3 +814,20 @@ class TestHistogramdd(object):
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
assert_equal(hist, hist_dd)
assert_equal(edges, edges_dd[0])
+
+ def test_density_via_normed(self):
+ # normed should simply alias to density argument
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ hist, edges = histogram(v, bins, density=True)
+ hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True)
+ assert_equal(hist, hist_dd)
+ assert_equal(edges, edges_dd[0])
+
+ def test_density_normed_redundancy(self):
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ with assert_raises_regex(TypeError, "Cannot specify both"):
+ hist_dd, edges_dd = histogramdd((v,), (bins,),
+ density=True,
+ normed=True)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 315251daa..3246f68ff 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -1,9 +1,12 @@
from __future__ import division, absolute_import, print_function
+import pytest
+
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_raises, assert_raises_regex
+ assert_array_almost_equal, assert_raises, assert_raises_regex,
+ assert_warns
)
from numpy.lib.index_tricks import (
mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
@@ -14,6 +17,33 @@ from numpy.lib.index_tricks import (
class TestRavelUnravelIndex(object):
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
+
+ # test backwards compatibility with older dims
+ # keyword argument; see Issue #10586
+ with assert_warns(DeprecationWarning):
+ # we should achieve the correct result
+ # AND raise the appropriate warning
+ # when using older "dims" kw argument
+ assert_equal(np.unravel_index(indices=2,
+ dims=(2, 2)),
+ (1, 0))
+
+ # test that new shape argument works properly
+ assert_equal(np.unravel_index(indices=2,
+ shape=(2, 2)),
+ (1, 0))
+
+ # test that an invalid second keyword argument
+ # is properly handled
+ with assert_raises(TypeError):
+ np.unravel_index(indices=2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(254, ims=(17, 94))
+
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
@@ -113,7 +143,6 @@ class TestRavelUnravelIndex(object):
assert_(x.flags.writeable)
assert_(y.flags.writeable)
-
def test_0d(self):
# gh-580
x = np.unravel_index(0, ())
@@ -165,6 +194,22 @@ class TestGrid(object):
for f, b in zip(grid_full, grid_broadcast):
assert_equal(f, b)
+ @pytest.mark.parametrize("start, stop, step, expected", [
+ (None, 10, 10j, (200, 10)),
+ (-10, 20, None, (1800, 30)),
+ ])
+ def test_mgrid_size_none_handling(self, start, stop, step, expected):
+ # regression test None value handling for
+ # start and step values used by mgrid;
+ # internally, this aims to cover previously
+ # unexplored code paths in nd_grid()
+ grid = mgrid[start:stop:step, start:stop:step]
+ # need a smaller grid to explore one of the
+ # untested code paths
+ grid_small = mgrid[start:stop:step]
+ assert_equal(grid.size, expected[0])
+ assert_equal(grid_small.size, expected[1])
+
class TestConcatenator(object):
def test_1d(self):
@@ -181,6 +226,11 @@ class TestConcatenator(object):
g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
assert_(g.dtype == 'f8')
+ def test_complex_step(self):
+ # Regression test for #12262
+ g = r_[0:36:100j]
+ assert_(g.shape == (100,))
+
def test_2d(self):
b = np.random.rand(5, 5)
c = np.random.rand(5, 5)
@@ -319,6 +369,19 @@ class TestFillDiagonal(object):
i = np.array([0, 1, 2])
assert_equal(np.where(a != 0), (i, i, i, i))
+ def test_low_dim_handling(self):
+ # raise error with low dimensionality
+ a = np.zeros(3, int)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ fill_diagonal(a, 5)
+
+ def test_hetero_shape_handling(self):
+ # raise error with high dimensionality and
+ # shape mismatch
+ a = np.zeros((3,3,7,3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ fill_diagonal(a, 2)
+
def test_diag_indices():
di = diag_indices(4)
@@ -348,11 +411,23 @@ def test_diag_indices():
)
-def test_diag_indices_from():
- x = np.random.random((4, 4))
- r, c = diag_indices_from(x)
- assert_array_equal(r, np.arange(4))
- assert_array_equal(c, np.arange(4))
+class TestDiagIndicesFrom(object):
+
+ def test_diag_indices_from(self):
+ x = np.random.random((4, 4))
+ r, c = diag_indices_from(x)
+ assert_array_equal(r, np.arange(4))
+ assert_array_equal(c, np.arange(4))
+
+ def test_error_small_input(self):
+ x = np.ones(7)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ diag_indices_from(x)
+
+ def test_error_shape_mismatch(self):
+ x = np.zeros((3, 3, 2, 3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ diag_indices_from(x)
def test_ndindex():
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index f58c9e33d..7ef25538b 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -6,7 +6,6 @@ import os
import threading
import time
import warnings
-import gc
import io
import re
import pytest
@@ -18,10 +17,10 @@ import locale
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
-from numpy.compat import asbytes, bytes, unicode, Path
+from numpy.compat import asbytes, bytes, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
- assert_warns, assert_, SkipTest, assert_raises_regex, assert_raises,
+ assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
)
@@ -348,7 +347,6 @@ class TestSaveTxt(object):
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
-
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
@@ -356,6 +354,16 @@ class TestSaveTxt(object):
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
+ @pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+ def test_multifield_view(self):
+ a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
+ v = a[['x', 'z']]
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ np.save(path, v)
+ data = np.load(path)
+ assert_array_equal(data, v)
+
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
@@ -568,12 +576,12 @@ class LoadTxtBase(object):
@pytest.mark.skipif(not HAS_BZ2, reason="Needs bz2")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
- def test_compressed_gzip(self):
+ def test_compressed_bz2(self):
self.check_compressed(bz2.open, ('.bz2',))
@pytest.mark.skipif(not HAS_LZMA, reason="Needs lzma")
@pytest.mark.skipif(MAJVER == 2, reason="Needs Python version >= 3")
- def test_compressed_gzip(self):
+ def test_compressed_lzma(self):
self.check_compressed(lzma.open, ('.xz', '.lzma'))
def test_encoding(self):
@@ -1069,6 +1077,55 @@ class TestLoadTxt(LoadTxtBase):
x = [b'5,6,7,\xc3\x95scarscar', b'15,2,3,hello', b'20,2,3,\xc3\x95scar']
assert_array_equal(x, np.array(x, dtype="S"))
+ def test_max_rows(self):
+ c = TextIO()
+ c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ max_rows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_with_skiprows(self):
+ c = TextIO()
+ c.write('comments\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=1)
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=2)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_with_read_continuation(self):
+ c = TextIO()
+ c.write('1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ max_rows=2)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8]], int)
+ assert_array_equal(x, a)
+ # test continuation
+ x = np.loadtxt(c, dtype=int, delimiter=',')
+ a = np.array([2,1,4,5], int)
+ assert_array_equal(x, a)
+
+ def test_max_rows_larger(self):
+ #test max_rows > num rows
+ c = TextIO()
+ c.write('comment\n1,2,3,5\n4,5,7,8\n2,1,4,5')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ skiprows=1, max_rows=6)
+ a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
+ assert_array_equal(x, a)
+
class Testfromregex(object):
def test_record(self):
c = TextIO()
@@ -1455,14 +1512,10 @@ M 33 21.99
assert_equal(test, control)
ndtype = [('nest', [('idx', int), ('code', object)])]
- try:
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
- except NotImplementedError:
- pass
- else:
- errmsg = "Nested dtype involving objects should be supported."
- raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
@@ -2005,7 +2058,6 @@ M 33 21.99
def test_utf8_file(self):
utf8 = b"\xcf\x96"
- latin1 = b"\xf6\xfc\xf6"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
@@ -2025,7 +2077,6 @@ M 33 21.99
assert_equal(test['f0'], 0)
assert_equal(test['f1'], "testNonethe" + utf8.decode("UTF-8"))
-
def test_utf8_file_nodtype_unicode(self):
# bytes encoding with non-latin1 -> unicode upcast
utf8 = u'\u03d6'
@@ -2039,8 +2090,8 @@ M 33 21.99
encoding = locale.getpreferredencoding()
utf8.encode(encoding)
except (UnicodeError, ImportError):
- raise SkipTest('Skipping test_utf8_file_nodtype_unicode, '
- 'unable to encode utf8 in preferred encoding')
+ pytest.skip('Skipping test_utf8_file_nodtype_unicode, '
+ 'unable to encode utf8 in preferred encoding')
with temppath() as path:
with io.open(path, "wt") as f:
@@ -2252,11 +2303,35 @@ class TestPathUsage(object):
assert_array_equal(x, a)
def test_save_load(self):
- # Test that pathlib.Path instances can be used with savez.
+ # Test that pathlib.Path instances can be used with save.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path)
+ assert_array_equal(data, a)
+
+ def test_save_load_memmap(self):
+ # Test that pathlib.Path instances can be loaded mem-mapped.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path, mmap_mode='r')
+ assert_array_equal(data, a)
+ # close the mem-mapped file
+ del data
+
+ def test_save_load_memmap_readwrite(self):
+ # Test that pathlib.Path instances can be written mem-mapped.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
np.save(path, a)
+ b = np.load(path, mmap_mode='r+')
+ a[0][0] = 5
+ b[0][0] = 5
+ del b # closes the file
data = np.load(path)
assert_array_equal(data, a)
@@ -2418,3 +2493,9 @@ def test_load_refcount():
with assert_no_gc_cycles():
np.load(f)
+
+ f.seek(0)
+ dt = [("a", 'u1', 2), ("b", 'u1', 2)]
+ with assert_no_gc_cycles():
+ x = np.loadtxt(TextIO("0 1 2 3"), dtype=dt)
+ assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
index f2d915502..3dd5346b6 100644
--- a/numpy/lib/tests/test_mixins.py
+++ b/numpy/lib/tests/test_mixins.py
@@ -199,6 +199,17 @@ class TestNDArrayOperatorsMixin(object):
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+ def test_matmul(self):
+ array = np.array([1, 2], dtype=np.float64)
+ array_like = ArrayLike(array)
+ expected = ArrayLike(np.float64(5))
+ _assert_equal_type_and_value(expected, np.matmul(array_like, array))
+ if not PY2:
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array_like, array))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array, array_like))
+
def test_ufunc_at(self):
array = ArrayLike(np.array([1, 2, 3, 4]))
assert_(np.negative.at(array, np.array([0, 1])) is None)
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 7f6fca4a4..77414ba7c 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -1,93 +1,79 @@
-'''
->>> p = np.poly1d([1.,2,3])
->>> p
-poly1d([1., 2., 3.])
->>> print(p)
- 2
-1 x + 2 x + 3
->>> q = np.poly1d([3.,2,1])
->>> q
-poly1d([3., 2., 1.])
->>> print(q)
- 2
-3 x + 2 x + 1
->>> print(np.poly1d([1.89999+2j, -3j, -5.12345678, 2+1j]))
- 3 2
-(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)
->>> print(np.poly1d([-3, -2, -1]))
- 2
--3 x - 2 x - 1
-
->>> p(0)
-3.0
->>> p(5)
-38.0
->>> q(0)
-1.0
->>> q(5)
-86.0
-
->>> p * q
-poly1d([ 3., 8., 14., 8., 3.])
->>> p / q
-(poly1d([0.33333333]), poly1d([1.33333333, 2.66666667]))
->>> p + q
-poly1d([4., 4., 4.])
->>> p - q
-poly1d([-2., 0., 2.])
->>> p ** 4
-poly1d([ 1., 8., 36., 104., 214., 312., 324., 216., 81.])
-
->>> p(q)
-poly1d([ 9., 12., 16., 8., 6.])
->>> q(p)
-poly1d([ 3., 12., 32., 40., 34.])
-
->>> np.asarray(p)
-array([1., 2., 3.])
->>> len(p)
-2
-
->>> p[0], p[1], p[2], p[3]
-(3.0, 2.0, 1.0, 0)
-
->>> p.integ()
-poly1d([0.33333333, 1. , 3. , 0. ])
->>> p.integ(1)
-poly1d([0.33333333, 1. , 3. , 0. ])
->>> p.integ(5)
-poly1d([0.00039683, 0.00277778, 0.025 , 0. , 0. ,
- 0. , 0. , 0. ])
->>> p.deriv()
-poly1d([2., 2.])
->>> p.deriv(2)
-poly1d([2.])
-
->>> q = np.poly1d([1.,2,3], variable='y')
->>> print(q)
- 2
-1 y + 2 y + 3
->>> q = np.poly1d([1.,2,3], variable='lambda')
->>> print(q)
- 2
-1 lambda + 2 lambda + 3
-
->>> np.polydiv(np.poly1d([1,0,-1]), np.poly1d([1,1]))
-(poly1d([ 1., -1.]), poly1d([0.]))
-
-'''
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_raises, rundocs
+ assert_array_almost_equal, assert_raises, assert_allclose
)
-class TestDocs(object):
- def test_doctests(self):
- return rundocs()
+class TestPolynomial(object):
+ def test_poly1d_str_and_repr(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(repr(p), 'poly1d([1., 2., 3.])')
+ assert_equal(str(p),
+ ' 2\n'
+ '1 x + 2 x + 3')
+
+ q = np.poly1d([3., 2, 1])
+ assert_equal(repr(q), 'poly1d([3., 2., 1.])')
+ assert_equal(str(q),
+ ' 2\n'
+ '3 x + 2 x + 1')
+
+ r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])
+ assert_equal(str(r),
+ ' 3 2\n'
+ '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')
+
+ assert_equal(str(np.poly1d([-3, -2, -1])),
+ ' 2\n'
+ '-3 x - 2 x - 1')
+
+ def test_poly1d_resolution(self):
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p(0), 3.0)
+ assert_equal(p(5), 38.0)
+ assert_equal(q(0), 1.0)
+ assert_equal(q(5), 86.0)
+
+ def test_poly1d_math(self):
+ # here we use some simple coeffs to make calculations easier
+ p = np.poly1d([1., 2, 4])
+ q = np.poly1d([4., 2, 1])
+ assert_equal(p/q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))
+ assert_equal(p.integ(), np.poly1d([1/3, 1., 4., 0.]))
+ assert_equal(p.integ(1), np.poly1d([1/3, 1., 4., 0.]))
+
+ p = np.poly1d([1., 2, 3])
+ q = np.poly1d([3., 2, 1])
+ assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))
+ assert_equal(p + q, np.poly1d([4., 4., 4.]))
+ assert_equal(p - q, np.poly1d([-2., 0., 2.]))
+ assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))
+ assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))
+ assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))
+ assert_equal(p.deriv(), np.poly1d([2., 2.]))
+ assert_equal(p.deriv(2), np.poly1d([2.]))
+ assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),
+ (np.poly1d([1., -1.]), np.poly1d([0.])))
+
+ def test_poly1d_misc(self):
+ p = np.poly1d([1., 2, 3])
+ assert_equal(np.asarray(p), np.array([1., 2., 3.]))
+ assert_equal(len(p), 2)
+ assert_equal((p[0], p[1], p[2], p[3]), (3.0, 2.0, 1.0, 0))
+
+ def test_poly1d_variable_arg(self):
+ q = np.poly1d([1., 2, 3], variable='y')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 y + 2 y + 3')
+ q = np.poly1d([1., 2, 3], variable='lambda')
+ assert_equal(str(q),
+ ' 2\n'
+ '1 lambda + 2 lambda + 3')
def test_poly(self):
assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),
@@ -136,27 +122,34 @@ class TestDocs(object):
weights = np.arange(8, 1, -1)**2/7.0
# Check exception when too few points for variance estimate. Note that
- # the Bayesian estimate requires the number of data points to exceed
- # degree + 3.
+ # the estimate requires the number of data points to exceed
+ # degree + 1
assert_raises(ValueError, np.polyfit,
- [0, 1, 3], [0, 1, 3], deg=0, cov=True)
+ [1], [1], deg=0, cov=True)
# check 1D case
m, cov = np.polyfit(x, y+err, 2, cov=True)
est = [3.8571, 0.2857, 1.619]
assert_almost_equal(est, m, decimal=4)
- val0 = [[2.9388, -5.8776, 1.6327],
- [-5.8776, 12.7347, -4.2449],
- [1.6327, -4.2449, 2.3220]]
+ val0 = [[ 1.4694, -2.9388, 0.8163],
+ [-2.9388, 6.3673, -2.1224],
+ [ 0.8163, -2.1224, 1.161 ]]
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
- val = [[8.7929, -10.0103, 0.9756],
- [-10.0103, 13.6134, -1.8178],
- [0.9756, -1.8178, 0.6674]]
+ val = [[ 4.3964, -5.0052, 0.4878],
+ [-5.0052, 6.8067, -0.9089],
+ [ 0.4878, -0.9089, 0.3337]]
assert_almost_equal(val, cov2, decimal=4)
+ m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled")
+ assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4)
+ val = [[ 0.1473, -0.1677, 0.0163],
+ [-0.1677, 0.228 , -0.0304],
+ [ 0.0163, -0.0304, 0.0112]]
+ assert_almost_equal(val, cov3, decimal=4)
+
# check 2D (n,1) case
y = y[:, np.newaxis]
c = c[:, np.newaxis]
@@ -172,6 +165,29 @@ class TestDocs(object):
assert_almost_equal(val0, cov[:, :, 0], decimal=4)
assert_almost_equal(val0, cov[:, :, 1], decimal=4)
+ # check order 1 (deg=0) case, were the analytic results are simple
+ np.random.seed(123)
+ y = np.random.normal(size=(4, 10000))
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True)
+ # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5.
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # Without scaling, since reduced chi2 is 1, the result should be the same.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]),
+ deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.5)
+ # If we estimate our errors wrong, no change with scaling:
+ w = np.full(y.shape[0], 1./0.5)
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True)
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # But if we do not scale, our estimate for the error in the mean will
+ # differ.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.25)
+
def test_objects(self):
from decimal import Decimal
p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index d4828bc1f..069693613 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -10,7 +10,8 @@ from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
- repack_fields)
+ repack_fields, unstructured_to_structured, structured_to_unstructured,
+ apply_along_fields, require_fields, assign_fields_by_name)
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
@@ -204,6 +205,77 @@ class TestRecFunctions(object):
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
+ def test_structured_to_unstructured(self):
+ a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ out = structured_to_unstructured(a)
+ assert_equal(out, np.zeros((4,5), dtype='f8'))
+
+ b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
+
+ c = np.arange(20).reshape((4,5))
+ out = unstructured_to_structured(c, a.dtype)
+ want = np.array([( 0, ( 1., 2), [ 3., 4.]),
+ ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]),
+ (15, (16., 17), [18., 19.])],
+ dtype=[('a', 'i4'),
+ ('b', [('f0', 'f4'), ('f1', 'u2')]),
+ ('c', 'f4', (2,))])
+ assert_equal(out, want)
+
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ assert_equal(apply_along_fields(np.mean, d),
+ np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
+ assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
+ np.array([ 3. , 5.5, 9. , 11. ]))
+
+ # check that for uniform field dtypes we get a view, not a copy:
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(dd.base is d)
+ assert_(ddd.base is d)
+
+ # test that nested fields with identical names don't break anything
+ point = np.dtype([('x', int), ('y', int)])
+ triangle = np.dtype([('a', point), ('b', point), ('c', point)])
+ arr = np.zeros(10, triangle)
+ res = structured_to_unstructured(arr, dtype=int)
+ assert_equal(res, np.zeros((10, 6), dtype=int))
+
+
+ def test_field_assignment_by_name(self):
+ a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ newdt = [('b', 'f4'), ('c', 'u1')]
+
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+
+ b = np.array([(1,2), (3,4)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
+
+ # test nested fields
+ a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
+ newdt = [('a', [('c', 'u1')])]
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+ b = np.array([((2,),), ((3,),)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
+
+ # test unstructured code path for 0d arrays
+ a, b = np.array(3), np.array(0)
+ assign_fields_by_name(b, a)
+ assert_equal(b[()], 3)
+
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
@@ -541,12 +613,8 @@ class TestStackArrays(object):
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
- try:
- test = stack_arrays((a, b), autoconvert=False)
- except TypeError:
- pass
- else:
- raise AssertionError
+ with assert_raises(TypeError):
+ stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index c95894f94..01ea028bb 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -3,6 +3,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import warnings
import functools
+import sys
+import pytest
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
@@ -14,6 +16,9 @@ from numpy.testing import (
)
+IS_64BIT = sys.maxsize > 2**32
+
+
def _add_keepdims(func):
""" hack in keepdims behavior into a function taking an axis """
@functools.wraps(func)
@@ -255,8 +260,8 @@ class TestApplyAlongAxis(object):
def test_with_iterable_object(self):
# from issue 5248
d = np.array([
- [set([1, 11]), set([2, 22]), set([3, 33])],
- [set([4, 44]), set([5, 55]), set([6, 66])]
+ [{1, 11}, {2, 22}, {3, 33}],
+ [{4, 44}, {5, 55}, {6, 66}]
])
actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
@@ -293,6 +298,15 @@ class TestExpandDims(object):
assert_warns(DeprecationWarning, expand_dims, a, -6)
assert_warns(DeprecationWarning, expand_dims, a, 5)
+ def test_subclasses(self):
+ a = np.arange(10).reshape((2, 5))
+ a = np.ma.array(a, mask=a%3 == 0)
+
+ expanded = np.expand_dims(a, axis=1)
+ assert_(isinstance(expanded, np.ma.MaskedArray))
+ assert_equal(expanded.shape, (2, 1, 5))
+ assert_equal(expanded.mask.shape, (2, 1, 5))
+
class TestArraySplit(object):
def test_integer_0_split(self):
@@ -394,6 +408,15 @@ class TestArraySplit(object):
assert_(a.dtype.type is res[-1].dtype.type)
# perhaps should check higher dimensions
+ @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")
+ def test_integer_split_2D_rows_greater_max_int32(self):
+ a = np.broadcast_to([0], (1 << 32, 2))
+ res = array_split(a, 4)
+ chunk = np.broadcast_to([0], (1 << 30, 2))
+ tgt = [chunk] * 4
+ for i in range(len(tgt)):
+ assert_equal(res[i].shape, tgt[i].shape)
+
def test_index_split_simple(self):
a = np.arange(10)
indices = [1, 5, 7]
@@ -434,10 +457,35 @@ class TestSplit(object):
a = np.arange(10)
assert_raises(ValueError, split, a, 3)
+
class TestColumnStack(object):
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
+ def test_1D_arrays(self):
+ # example from docstring
+ a = np.array((1, 2, 3))
+ b = np.array((2, 3, 4))
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_2D_arrays(self):
+ # same as hstack 2D docstring example
+ a = np.array([[1], [2], [3]])
+ b = np.array([[2], [3], [4]])
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ column_stack((np.arange(3) for _ in range(2)))
+
class TestDstack(object):
def test_non_iterable(self):
@@ -471,6 +519,10 @@ class TestDstack(object):
desired = np.array([[[1, 1], [2, 2]]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ dstack((np.arange(3) for _ in range(2)))
+
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 3c2ca8b87..b2bd7da3e 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -3,7 +3,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
- assert_equal, assert_array_equal, assert_raises, assert_
+ assert_equal, assert_array_equal, assert_raises, assert_,
+ assert_raises_regex
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
@@ -57,6 +58,17 @@ def test_same():
assert_array_equal(x, bx)
assert_array_equal(y, by)
+def test_broadcast_kwargs():
+ # ensure that a TypeError is appropriately raised when
+ # np.broadcast_arrays() is called with any keyword
+ # argument other than 'subok'
+ x = np.arange(10)
+ y = np.arange(10)
+
+ with assert_raises_regex(TypeError,
+ r'broadcast_arrays\(\) got an unexpected keyword*'):
+ broadcast_arrays(x, y, dtype='float64')
+
def test_one_off():
x = np.array([[1, 2, 3]])
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 5604b3744..0f06876a1 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -4,8 +4,8 @@ import numpy as np
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
- assert_, assert_equal, assert_array_equal, assert_warns
- )
+ assert_, assert_equal, assert_array_equal, assert_warns, assert_raises
+)
class TestUfunclike(object):
@@ -21,6 +21,10 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
+ a = a.astype(np.complex)
+ with assert_raises(TypeError):
+ ufl.isposinf(a)
+
def test_isneginf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
@@ -32,6 +36,10 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
+ a = a.astype(np.complex)
+ with assert_raises(TypeError):
+ ufl.isneginf(a)
+
def test_fix(self):
a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
out = nx.zeros(a.shape, float)
@@ -52,7 +60,8 @@ class TestUfunclike(object):
return res
def __array_wrap__(self, obj, context=None):
- obj.metadata = self.metadata
+ if isinstance(obj, MyArray):
+ obj.metadata = self.metadata
return obj
def __array_finalize__(self, obj):
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index c27c3cbf5..2723f3440 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -56,10 +56,34 @@ def test_safe_eval_nameconstant():
utils.safe_eval('None')
-def test_byte_bounds():
- a = arange(12).reshape(3, 4)
- low, high = utils.byte_bounds(a)
- assert_equal(high - low, a.size * a.itemsize)
+class TestByteBounds(object):
+
+ def test_byte_bounds(self):
+ # pointer difference matches size * itemsize
+ # due to contiguity
+ a = arange(12).reshape(3, 4)
+ low, high = utils.byte_bounds(a)
+ assert_equal(high - low, a.size * a.itemsize)
+
+ def test_unusual_order_positive_stride(self):
+ a = arange(12).reshape(3, 4)
+ b = a.T
+ low, high = utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_unusual_order_negative_stride(self):
+ a = arange(12).reshape(3, 4)
+ b = a.T[::-1]
+ low, high = utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_strided(self):
+ a = arange(12)
+ b = a[::2]
+ low, high = utils.byte_bounds(b)
+ # the largest pointer address is lost (even numbers only in the
+ # stride), and compensate addresses for striding by 2
+ assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
def test_assert_raises_regex_context_manager():
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 98efba191..27d848608 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -3,11 +3,15 @@
"""
from __future__ import division, absolute_import, print_function
+import functools
+
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from numpy.core import iinfo, transpose
@@ -17,6 +21,10 @@ __all__ = [
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
@@ -33,6 +41,11 @@ def _min_int(low, high):
return int64
+def _flip_dispatcher(m):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
@@ -83,6 +96,7 @@ def fliplr(m):
return m[:, ::-1]
+@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
@@ -137,6 +151,7 @@ def flipud(m):
return m[::-1, ...]
+@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
@@ -194,6 +209,11 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
return m
+def _diag_dispatcher(v, k=None):
+ return (v,)
+
+
+@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
@@ -265,6 +285,7 @@ def diag(v, k=0):
raise ValueError("Input must be 1- or 2-d.")
+@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
@@ -324,6 +345,7 @@ def diagflat(v, k=0):
return wrap(res)
+@set_module('numpy')
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
@@ -373,6 +395,11 @@ def tri(N, M=None, k=0, dtype=float):
return m
+def _trilu_dispatcher(m, k=None):
+ return (m,)
+
+
+@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
@@ -411,6 +438,7 @@ def tril(m, k=0):
return where(mask, m, zeros(1, m.dtype))
+@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
@@ -439,7 +467,12 @@ def triu(m, k=0):
return where(mask, zeros(1, m.dtype), m)
+def _vander_dispatcher(x, N=None, increasing=None):
+ return (x,)
+
+
# Originally borrowed from John Hunter and matplotlib
+@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
@@ -530,6 +563,12 @@ def vander(x, N=None, increasing=False):
return v
+def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
+ weights=None, density=None):
+ return (x, y, bins, weights)
+
+
+@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
@@ -662,6 +701,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
return hist, edges[0], edges[1]
+@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
@@ -732,6 +772,7 @@ def mask_indices(n, mask_func, k=0):
return nonzero(a != 0)
+@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
@@ -812,6 +853,11 @@ def tril_indices(n, k=0, m=None):
return nonzero(tri(n, m, k=k, dtype=bool))
+def _trilu_indices_form_dispatcher(arr, k=None):
+ return (arr,)
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
@@ -840,6 +886,7 @@ def tril_indices_from(arr, k=0):
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
+@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
@@ -922,6 +969,7 @@ def triu_indices(n, k=0, m=None):
return nonzero(~tri(n, m, k=k-1, dtype=bool))
+@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 1664e6ebb..90b1e9a6e 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -2,6 +2,8 @@
"""
from __future__ import division, absolute_import, print_function
+import functools
+import warnings
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
@@ -9,12 +11,21 @@ __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'common_type']
import numpy.core.numeric as _nx
-from numpy.core.numeric import asarray, asanyarray, array, isnan, zeros
+from numpy.core.numeric import asarray, asanyarray, isnan, zeros
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from .ufunclike import isneginf, isposinf
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
-def mintypecode(typechars,typeset='GDFgdf',default='d'):
+
+@set_module('numpy')
+def mintypecode(typechars, typeset='GDFgdf', default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
@@ -64,13 +75,16 @@ def mintypecode(typechars,typeset='GDFgdf',default='d'):
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
- l = []
- for t in intersection:
- i = _typecodes_by_elsize.index(t)
- l.append((i, t))
+ l = [(_typecodes_by_elsize.index(t), t) for t in intersection]
l.sort()
return l[0][1]
+
+def _asfarray_dispatcher(a, dtype=None):
+ return (a,)
+
+
+@array_function_dispatch(_asfarray_dispatcher)
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to a float type.
@@ -103,6 +117,11 @@ def asfarray(a, dtype=_nx.float_):
return asarray(a, dtype=dtype)
+def _real_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_real_dispatcher)
def real(val):
"""
Return the real part of the complex argument.
@@ -144,6 +163,11 @@ def real(val):
return asanyarray(val).real
+def _imag_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_imag_dispatcher)
def imag(val):
"""
Return the imaginary part of the complex argument.
@@ -182,6 +206,11 @@ def imag(val):
return asanyarray(val).imag
+def _is_type_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_is_type_dispatcher)
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
@@ -215,8 +244,10 @@ def iscomplex(x):
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
- return +res # convert to array-scalar if needed
+ return res[()] # convert to scalar if needed
+
+@array_function_dispatch(_is_type_dispatcher)
def isreal(x):
"""
Returns a bool array, where True if input element is real.
@@ -247,6 +278,8 @@ def isreal(x):
"""
return imag(x) == 0
+
+@array_function_dispatch(_is_type_dispatcher)
def iscomplexobj(x):
"""
Check for a complex type or an array of complex numbers.
@@ -287,6 +320,7 @@ def iscomplexobj(x):
return issubclass(type_, _nx.complexfloating)
+@array_function_dispatch(_is_type_dispatcher)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
@@ -328,6 +362,12 @@ def _getmaxmin(t):
f = getlimits.finfo(t)
return f.max, f.min
+
+def _nan_to_num_dispatcher(x, copy=None):
+ return (x,)
+
+
+@array_function_dispatch(_nan_to_num_dispatcher)
def nan_to_num(x, copy=True):
"""
Replace NaN with zero and infinity with large finite numbers.
@@ -410,7 +450,12 @@ def nan_to_num(x, copy=True):
#-----------------------------------------------------------------------------
-def real_if_close(a,tol=100):
+def _real_if_close_dispatcher(a, tol=None):
+ return (a,)
+
+
+@array_function_dispatch(_real_if_close_dispatcher)
+def real_if_close(a, tol=100):
"""
If complex input returns a real array if complex parts are close to zero.
@@ -465,10 +510,19 @@ def real_if_close(a,tol=100):
return a
+def _asscalar_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_asscalar_dispatcher)
def asscalar(a):
"""
Convert an array of size 1 to its scalar equivalent.
+ .. deprecated:: 1.16
+
+ Deprecated, use `numpy.ndarray.item()` instead.
+
Parameters
----------
a : ndarray
@@ -486,6 +540,10 @@ def asscalar(a):
24
"""
+
+ # 2018-10-10, 1.16
+ warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use '
+ 'a.item() instead', DeprecationWarning, stacklevel=1)
return a.item()
#-----------------------------------------------------------------------------
@@ -514,6 +572,7 @@ _namefromtype = {'S1': 'character',
'O': 'object'
}
+@set_module('numpy')
def typename(char):
"""
Return a description for the given data type code.
@@ -577,6 +636,13 @@ array_precision = {_nx.half: 0,
_nx.csingle: 1,
_nx.cdouble: 2,
_nx.clongdouble: 3}
+
+
+def _common_type_dispatcher(*arrays):
+ return arrays
+
+
+@array_function_dispatch(_common_type_dispatcher)
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index e0bd95182..9a9e6f9dd 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -8,9 +8,11 @@ from __future__ import division, absolute_import, print_function
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
+from numpy.core.overrides import array_function_dispatch, ENABLE_ARRAY_FUNCTION
import warnings
import functools
+
def _deprecate_out_named_y(f):
"""
Allow the out argument to be passed as the name `y` (deprecated)
@@ -36,7 +38,34 @@ def _deprecate_out_named_y(f):
return func
+def _fix_out_named_y(f):
+ """
+ Allow the out argument to be passed as the name `y` (deprecated)
+
+ This decorator should only be used if _deprecate_out_named_y is used on
+ a corresponding dispatcher fucntion.
+ """
+ @functools.wraps(f)
+ def func(x, out=None, **kwargs):
+ if 'y' in kwargs:
+ # we already did error checking in _deprecate_out_named_y
+ out = kwargs.pop('y')
+ return f(x, out=out, **kwargs)
+
+ return func
+
+
+if not ENABLE_ARRAY_FUNCTION:
+ _fix_out_named_y = _deprecate_out_named_y
+
+
@_deprecate_out_named_y
+def _dispatcher(x, out=None):
+ return (x, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_out_named_y
def fix(x, out=None):
"""
Round to nearest integer towards zero.
@@ -81,7 +110,9 @@ def fix(x, out=None):
res = res[()]
return res
-@_deprecate_out_named_y
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_out_named_y
def isposinf(x, out=None):
"""
Test element-wise for positive infinity, return result as bool array.
@@ -116,8 +147,9 @@ def isposinf(x, out=None):
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
- Errors result if the second argument is also supplied when `x` is a
- scalar input, or if first and second arguments have different shapes.
+ Errors result if the second argument is also supplied when x is a scalar
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values
Examples
--------
@@ -138,10 +170,18 @@ def isposinf(x, out=None):
array([0, 0, 1])
"""
- return nx.logical_and(nx.isinf(x), ~nx.signbit(x), out)
-
-
-@_deprecate_out_named_y
+ is_inf = nx.isinf(x)
+ try:
+ signbit = ~nx.signbit(x)
+ except TypeError:
+ raise TypeError('This operation is not supported for complex values '
+ 'because it would be ambiguous.')
+ else:
+ return nx.logical_and(is_inf, signbit, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_out_named_y
def isneginf(x, out=None):
"""
Test element-wise for negative infinity, return result as bool array.
@@ -178,7 +218,8 @@ def isneginf(x, out=None):
(IEEE 754).
Errors result if the second argument is also supplied when x is a scalar
- input, or if first and second arguments have different shapes.
+ input, if first and second arguments have different shapes, or if the
+ first argument has complex values.
Examples
--------
@@ -199,4 +240,11 @@ def isneginf(x, out=None):
array([1, 0, 0])
"""
- return nx.logical_and(nx.isinf(x), nx.signbit(x), out)
+ is_inf = nx.isinf(x)
+ try:
+ signbit = nx.signbit(x)
+ except TypeError:
+ raise TypeError('This operation is not supported for complex values '
+ 'because it would be ambiguous.')
+ else:
+ return nx.logical_and(is_inf, signbit, out)
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 1ecd334af..84edf4021 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -7,6 +7,7 @@ import re
import warnings
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
+from numpy.core.overrides import set_module
from numpy.core import ndarray, ufunc, asarray
import numpy as np
@@ -80,7 +81,6 @@ class _Deprecate(object):
new_name = self.new_name
message = self.message
- import warnings
if old_name is None:
try:
old_name = func.__name__
@@ -165,13 +165,6 @@ def deprecate(*args, **kwargs):
fn = args[0]
args = args[1:]
- # backward compatibility -- can be removed
- # after next release
- if 'newname' in kwargs:
- kwargs['new_name'] = kwargs.pop('newname')
- if 'oldname' in kwargs:
- kwargs['old_name'] = kwargs.pop('oldname')
-
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
@@ -440,6 +433,7 @@ def _info(obj, output=sys.stdout):
print("type: %s" % obj.dtype, file=output)
+@set_module('numpy')
def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
@@ -645,6 +639,7 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
print(inspect.getdoc(object), file=output)
+@set_module('numpy')
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a NumPy object.
@@ -702,6 +697,8 @@ _lookfor_caches = {}
# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
+
+@set_module('numpy')
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
@@ -982,12 +979,12 @@ def _getmembers(item):
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
-# ASPN Python Cookbook recipe:
-# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
+# ASPN Python Cookbook recipe: https://code.activestate.com/recipes/364469/
+#
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
-# http://www.python.org/license
+# https://en.wikipedia.org/wiki/Python_License
# It has been modified to:
# * handle unary -/+
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index 37bd27574..4b696c883 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -50,6 +50,6 @@ from .info import __doc__
from .linalg import *
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 5aad6c006..8363d7377 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -16,6 +16,7 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
+import functools
import operator
import warnings
@@ -28,9 +29,16 @@ from numpy.core import (
swapaxes, divide, count_nonzero, isnan
)
from numpy.core.multiarray import normalize_axis_index
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.linalg')
+
+
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
@@ -40,7 +48,8 @@ _L = b'L'
fortran_int = intc
-# Error object
+
+@set_module('numpy.linalg')
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
@@ -68,7 +77,6 @@ class LinAlgError(Exception):
numpy.linalg.LinAlgError: Singular matrix
"""
- pass
def _determine_error_states():
@@ -133,11 +141,6 @@ def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
-_complex_types_map = {single : csingle,
- double : cdouble,
- csingle : csingle,
- cdouble : cdouble}
-
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
@@ -203,11 +206,6 @@ def _assertRankAtLeast2(*arrays):
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
-def _assertSquareness(*arrays):
- for a in arrays:
- if max(a.shape) != min(a.shape):
- raise LinAlgError('Array must be square')
-
def _assertNdSquareness(*arrays):
for a in arrays:
m, n = a.shape[-2:]
@@ -247,6 +245,11 @@ def transpose(a):
# Linear equations
+def _tensorsolve_dispatcher(a, b, axes=None):
+ return (a, b)
+
+
+@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
@@ -316,6 +319,12 @@ def tensorsolve(a, b, axes=None):
res.shape = oldshape
return res
+
+def _solve_dispatcher(a, b):
+ return (a, b)
+
+
+@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
@@ -396,6 +405,11 @@ def solve(a, b):
return wrap(r.astype(result_t, copy=False))
+def _tensorinv_dispatcher(a, ind=None):
+ return (a,)
+
+
+@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
@@ -465,6 +479,11 @@ def tensorinv(a, ind=2):
# Matrix inversion
+def _unary_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
@@ -533,6 +552,11 @@ def inv(a):
return wrap(ainv.astype(result_t, copy=False))
+def _matrix_power_dispatcher(a, n):
+ return (a,)
+
+
+@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
@@ -650,6 +674,8 @@ def matrix_power(a, n):
# Cholesky decomposition
+
+@array_function_dispatch(_unary_dispatcher)
def cholesky(a):
"""
Cholesky decomposition.
@@ -733,8 +759,14 @@ def cholesky(a):
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
+
# QR decompostion
+def _qr_dispatcher(a, mode=None):
+ return (a,)
+
+
+@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
@@ -792,7 +824,7 @@ def qr(a, mode='reduced'):
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
- http://en.wikipedia.org/wiki/QR_factorization
+ https://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
@@ -870,13 +902,13 @@ def qr(a, mode='reduced'):
a, wrap = _makearray(a)
_assertRank2(a)
- _assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
+
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
@@ -887,14 +919,14 @@ def qr(a, mode='reduced'):
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
- results = lapack_routine(m, n, a, m, tau, work, -1, 0)
+ results = lapack_routine(m, n, a, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
- lwork = int(abs(work[0]))
+ lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
- results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
+ results = lapack_routine(m, n, a, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
@@ -930,14 +962,14 @@ def qr(a, mode='reduced'):
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
- results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
+ results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
- lwork = int(abs(work[0]))
+ lwork = max(1, n, int(abs(work[0])))
work = zeros((lwork,), t)
- results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
+ results = lapack_routine(m, mc, mn, q, max(1, m), tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
@@ -950,6 +982,7 @@ def qr(a, mode='reduced'):
# Eigenvalues
+@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
@@ -977,8 +1010,10 @@ def eigvals(a):
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
- eigvalsh : eigenvalues of symmetric or Hermitian arrays.
- eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
+ eigvalsh : eigenvalues of real symmetric or complex Hermitian
+ (conjugate symmetric) arrays.
+ eigh : eigenvalues and eigenvectors of real symmetric or complex
+ Hermitian (conjugate symmetric) arrays.
Notes
-----
@@ -1037,9 +1072,15 @@ def eigvals(a):
return w.astype(result_t, copy=False)
+
+def _eigvalsh_dispatcher(a, UPLO=None):
+ return (a,)
+
+
+@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
- Compute the eigenvalues of a Hermitian or real symmetric matrix.
+ Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
@@ -1069,7 +1110,8 @@ def eigvalsh(a, UPLO='L'):
See Also
--------
- eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
+ eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian
+ (conjugate symmetric) arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
@@ -1137,6 +1179,7 @@ def _convertarray(a):
# Eigenvectors
+@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
@@ -1171,11 +1214,11 @@ def eig(a):
--------
eigvals : eigenvalues of a non-symmetric array.
- eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
- (conjugate symmetric) array.
+ eigh : eigenvalues and eigenvectors of a real symmetric or complex
+ Hermitian (conjugate symmetric) array.
- eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
- array.
+ eigvalsh : eigenvalues of a real symmetric or complex Hermitian
+ (conjugate symmetric) array.
Notes
-----
@@ -1278,9 +1321,11 @@ def eig(a):
return w.astype(result_t, copy=False), wrap(vt)
+@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
- Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
+ Return the eigenvalues and eigenvectors of a complex Hermitian
+ (conjugate symmetric) or a real symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
@@ -1289,7 +1334,7 @@ def eigh(a, UPLO='L'):
Parameters
----------
a : (..., M, M) array
- Hermitian/Symmetric matrices whose eigenvalues and
+ Hermitian or real symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
@@ -1316,7 +1361,8 @@ def eigh(a, UPLO='L'):
See Also
--------
- eigvalsh : eigenvalues of symmetric or Hermitian arrays.
+ eigvalsh : eigenvalues of real symmetric or complex Hermitian
+ (conjugate symmetric) arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
@@ -1415,6 +1461,11 @@ def eigh(a, UPLO='L'):
# Singular value decomposition
+def _svd_dispatcher(a, full_matrices=None, compute_uv=None):
+ return (a,)
+
+
+@array_function_dispatch(_svd_dispatcher)
def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
@@ -1539,7 +1590,6 @@ def svd(a, full_matrices=True, compute_uv=True):
"""
a, wrap = _makearray(a)
- _assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
@@ -1576,6 +1626,11 @@ def svd(a, full_matrices=True, compute_uv=True):
return s
+def _cond_dispatcher(x, p=None):
+ return (x,)
+
+
+@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
@@ -1656,6 +1711,7 @@ def cond(x, p=None):
"""
x = asarray(x) # in case we have a matrix
+ _assertNoEmpty2d(x)
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
@@ -1692,6 +1748,11 @@ def cond(x, p=None):
return r
+def _matrix_rank_dispatcher(M, tol=None, hermitian=None):
+ return (M,)
+
+
+@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
@@ -1762,7 +1823,7 @@ def matrix_rank(M, tol=None, hermitian=False):
References
----------
.. [1] MATLAB reference documention, "Rank"
- http://www.mathworks.com/help/techdoc/ref/rank.html
+ https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
@@ -1796,7 +1857,12 @@ def matrix_rank(M, tol=None, hermitian=False):
# Generalized inverse
-def pinv(a, rcond=1e-15 ):
+def _pinv_dispatcher(a, rcond=None):
+ return (a,)
+
+
+@array_function_dispatch(_pinv_dispatcher)
+def pinv(a, rcond=1e-15):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
@@ -1880,8 +1946,11 @@ def pinv(a, rcond=1e-15 ):
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
+
# Determinant
+
+@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
@@ -1967,6 +2036,8 @@ def slogdet(a):
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
+
+@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
@@ -2023,8 +2094,14 @@ def det(a):
r = r.astype(result_t, copy=False)
return r
+
# Linear Least Squares
+def _lstsq_dispatcher(a, b, rcond=None):
+ return (a, b)
+
+
+@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
@@ -2122,13 +2199,13 @@ def lstsq(a, b, rcond="warn"):
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
- _assertNoEmpty2d(a, b) # TODO: relax this constraint
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
+ # FIXME: real_t is unused
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
@@ -2153,7 +2230,16 @@ def lstsq(a, b, rcond="warn"):
signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'
extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)
+ if n_rhs == 0:
+ # lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis
+ b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)
x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)
+ if m == 0:
+ x[...] = 0
+ if n_rhs == 0:
+ # remove the item we added
+ x = x[..., :n_rhs]
+ resids = resids[..., :n_rhs]
# remove the axis we added
if is_1d:
@@ -2200,6 +2286,11 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
return result
+def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
+ return (x,)
+
+
+@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
@@ -2442,6 +2533,11 @@ def norm(x, ord=None, axis=None, keepdims=False):
# multi_dot
+def _multidot_dispatcher(arrays):
+ return arrays
+
+
+@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
@@ -2480,7 +2576,7 @@ def multi_dot(arrays):
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
- .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
+ .. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 07c7813c9..235488c6e 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -13,13 +13,14 @@ import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
-from numpy import multiply, atleast_2d, inf, asarray, matrix
+from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
- assert_almost_equal, assert_allclose, SkipTest, suppress_warnings
+ assert_almost_equal, assert_allclose, suppress_warnings,
+ assert_raises_regex,
)
@@ -462,12 +463,10 @@ class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
class TestSolve(SolveCases):
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- assert_equal(linalg.solve(x, x).dtype, dtype)
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
@@ -531,12 +530,10 @@ class InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
class TestInv(InvCases):
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- assert_equal(linalg.inv(x).dtype, dtype)
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.inv(x).dtype, dtype)
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
@@ -564,14 +561,12 @@ class EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
class TestEigvals(EigvalsCases):
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- assert_equal(linalg.eigvals(x).dtype, dtype)
- x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
- assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(linalg.eigvals(x).dtype, dtype)
+ x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
+ assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
@@ -603,20 +598,17 @@ class EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
class TestEig(EigCases):
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- w, v = np.linalg.eig(x)
- assert_equal(w.dtype, dtype)
- assert_equal(v.dtype, dtype)
-
- x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
- w, v = np.linalg.eig(x)
- assert_equal(w.dtype, get_complex_dtype(dtype))
- assert_equal(v.dtype, get_complex_dtype(dtype))
-
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w, v = np.linalg.eig(x)
+ assert_equal(w.dtype, dtype)
+ assert_equal(v.dtype, dtype)
+
+ x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
+ w, v = np.linalg.eig(x)
+ assert_equal(w.dtype, get_complex_dtype(dtype))
+ assert_equal(v.dtype, get_complex_dtype(dtype))
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
@@ -644,10 +636,6 @@ class TestEig(EigCases):
class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
- if 'size-0' in tags:
- assert_raises(LinAlgError, linalg.svd, a, 0)
- return
-
u, s, vt = linalg.svd(a, 0)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
@@ -657,28 +645,29 @@ class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
class TestSVD(SVDCases):
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- u, s, vh = linalg.svd(x)
- assert_equal(u.dtype, dtype)
- assert_equal(s.dtype, get_real_dtype(dtype))
- assert_equal(vh.dtype, dtype)
- s = linalg.svd(x, compute_uv=False)
- assert_equal(s.dtype, get_real_dtype(dtype))
-
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
-
- def test_0_size(self):
- # These raise errors currently
- # (which does not mean that it may not make sense)
- a = np.zeros((0, 0), dtype=np.complex64)
- assert_raises(linalg.LinAlgError, linalg.svd, a)
- a = np.zeros((0, 1), dtype=np.complex64)
- assert_raises(linalg.LinAlgError, linalg.svd, a)
- a = np.zeros((1, 0), dtype=np.complex64)
- assert_raises(linalg.LinAlgError, linalg.svd, a)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ u, s, vh = linalg.svd(x)
+ assert_equal(u.dtype, dtype)
+ assert_equal(s.dtype, get_real_dtype(dtype))
+ assert_equal(vh.dtype, dtype)
+ s = linalg.svd(x, compute_uv=False)
+ assert_equal(s.dtype, get_real_dtype(dtype))
+
+ def test_empty_identity(self):
+ """ Empty input should put an identity matrix in u or vh """
+ x = np.empty((4, 0))
+ u, s, vh = linalg.svd(x, compute_uv=True)
+ assert_equal(u.shape, (4, 4))
+ assert_equal(vh.shape, (0, 0))
+ assert_equal(u, np.eye(4))
+
+ x = np.empty((0, 4))
+ u, s, vh = linalg.svd(x, compute_uv=True)
+ assert_equal(u.shape, (0, 0))
+ assert_equal(vh.shape, (4, 4))
+ assert_equal(vh, np.eye(4))
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
@@ -842,15 +831,13 @@ class TestDet(DetCases):
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- assert_equal(np.linalg.det(x).dtype, dtype)
- ph, s = np.linalg.slogdet(x)
- assert_equal(s.dtype, get_real_dtype(dtype))
- assert_equal(ph.dtype, dtype)
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ assert_equal(np.linalg.det(x).dtype, dtype)
+ ph, s = np.linalg.slogdet(x)
+ assert_equal(s.dtype, get_real_dtype(dtype))
+ assert_equal(ph.dtype, dtype)
def test_0_size(self):
a = np.zeros((0, 0), dtype=np.complex64)
@@ -875,14 +862,12 @@ class TestDet(DetCases):
class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
- if 'size-0' in tags:
- assert_raises(LinAlgError, linalg.lstsq, a, b)
- return
-
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
+ if m == 0:
+ assert_((x == 0).all())
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
@@ -923,6 +908,38 @@ class TestLstsq(LstsqCases):
# Warning should be raised exactly once (first command)
assert_(len(w) == 1)
+ @pytest.mark.parametrize(["m", "n", "n_rhs"], [
+ (4, 2, 2),
+ (0, 4, 1),
+ (0, 4, 2),
+ (4, 0, 1),
+ (4, 0, 2),
+ (4, 2, 0),
+ (0, 0, 0)
+ ])
+ def test_empty_a_b(self, m, n, n_rhs):
+ a = np.arange(m * n).reshape(m, n)
+ b = np.ones((m, n_rhs))
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
+ if m == 0:
+ assert_((x == 0).all())
+ assert_equal(x.shape, (n, n_rhs))
+ assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))
+ if m > n and n_rhs > 0:
+ # residuals are exactly the squared norms of b's columns
+ r = b - np.dot(a, x)
+ assert_almost_equal(residuals, (r * r).sum(axis=-2))
+ assert_equal(rank, min(m, n))
+ assert_equal(s.shape, (min(m, n),))
+
+ def test_incompatible_dims(self):
+ # use modified version of docstring example
+ x = np.array([0, 1, 2, 3])
+ y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
+ A = np.vstack([x, np.ones(len(x))]).T
+ with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
+ linalg.lstsq(A, y, rcond=None)
+
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
class TestMatrixPower(object):
@@ -937,9 +954,7 @@ class TestMatrixPower(object):
#FIXME the 'e' dtype might work in future
dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
-
def test_large_power(self, dt):
- power = matrix_power
rshft = self.rshft_1.astype(dt)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
@@ -1000,7 +1015,6 @@ class TestMatrixPower(object):
assert_raises(TypeError, matrix_power, mat, 1.5)
assert_raises(TypeError, matrix_power, mat, [1])
-
def test_exceptions_non_square(self, dt):
assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)
assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)
@@ -1029,13 +1043,11 @@ class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
class TestEigvalsh(object):
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- w = np.linalg.eigvalsh(x)
- assert_equal(w.dtype, get_real_dtype(dtype))
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w = np.linalg.eigvalsh(x)
+ assert_equal(w.dtype, get_real_dtype(dtype))
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
@@ -1107,14 +1119,12 @@ class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
class TestEigh(object):
- def test_types(self):
- def check(dtype):
- x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
- w, v = np.linalg.eigh(x)
- assert_equal(w.dtype, get_real_dtype(dtype))
- assert_equal(v.dtype, dtype)
- for dtype in [single, double, csingle, cdouble]:
- check(dtype)
+ @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
+ def test_types(self, dtype):
+ x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
+ w, v = np.linalg.eigh(x)
+ assert_equal(w.dtype, get_real_dtype(dtype))
+ assert_equal(v.dtype, dtype)
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
@@ -1599,9 +1609,23 @@ class TestQR(object):
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
- def test_qr_empty(self):
- a = np.zeros((0, 2))
- assert_raises(linalg.LinAlgError, linalg.qr, a)
+
+ @pytest.mark.parametrize(["m", "n"], [
+ (3, 0),
+ (0, 3),
+ (0, 0)
+ ])
+ def test_qr_empty(self, m, n):
+ k = min(m, n)
+ a = np.empty((m, n))
+
+ self.check_qr(a)
+
+ h, tau = np.linalg.qr(a, mode='raw')
+ assert_equal(h.dtype, np.double)
+ assert_equal(tau.dtype, np.double)
+ assert_equal(h.shape, (n, m))
+ assert_equal(tau.shape, (k,))
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
@@ -1642,15 +1666,6 @@ class TestQR(object):
self.check_qr(m2)
self.check_qr(m2.T)
- def test_0_size(self):
- # There may be good ways to do (some of this) reasonably:
- a = np.zeros((0, 0))
- assert_raises(linalg.LinAlgError, linalg.qr, a)
- a = np.zeros((0, 1))
- assert_raises(linalg.LinAlgError, linalg.qr, a)
- a = np.zeros((1, 0))
- assert_raises(linalg.LinAlgError, linalg.qr, a)
-
class TestCholesky(object):
# TODO: are there no other tests for cholesky?
@@ -1742,7 +1757,7 @@ def test_xerbla_override():
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
- raise SkipTest("Not POSIX or fork failed.")
+ pytest.skip("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
@@ -1777,7 +1792,7 @@ def test_xerbla_override():
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
- raise SkipTest('Numpy xerbla not linked in.')
+ pytest.skip('Numpy xerbla not linked in.')
def test_sdot_bug_8577():
@@ -1826,6 +1841,14 @@ class TestMultiDot(object):
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
+ def test_basic_function_with_two_arguments(self):
+ # separate code path with two arguments
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+
+ assert_almost_equal(multi_dot([A, B]), A.dot(B))
+ assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
+
def test_basic_function_with_dynamic_programing_optimization(self):
# multi_dot with four or more arguments uses the dynamic programing
# optimization and therefore deserve a separate
@@ -1898,3 +1921,44 @@ class TestMultiDot(object):
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
+
+
+class TestTensorinv(object):
+
+ @pytest.mark.parametrize("arr, ind", [
+ (np.ones((4, 6, 8, 2)), 2),
+ (np.ones((3, 3, 2)), 1),
+ ])
+ def test_non_square_handling(self, arr, ind):
+ with assert_raises(LinAlgError):
+ linalg.tensorinv(arr, ind=ind)
+
+ @pytest.mark.parametrize("shape, ind", [
+ # examples from docstring
+ ((4, 6, 8, 3), 2),
+ ((24, 8, 3), 1),
+ ])
+ def test_tensorinv_shape(self, shape, ind):
+ a = np.eye(24)
+ a.shape = shape
+ ainv = linalg.tensorinv(a=a, ind=ind)
+ expected = a.shape[ind:] + a.shape[:ind]
+ actual = ainv.shape
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("ind", [
+ 0, -2,
+ ])
+ def test_tensorinv_ind_limit(self, ind):
+ a = np.eye(24)
+ a.shape = (4, 6, 8, 3)
+ with assert_raises(ValueError):
+ linalg.tensorinv(a=a, ind=ind)
+
+ def test_tensorinv_result(self):
+ # mimic a docstring example
+ a = np.eye(24)
+ a.shape = (24, 8, 3)
+ ainv = linalg.tensorinv(a, ind=1)
+ b = np.ones(24)
+ assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 7dc1cb0cb..9fc68a7aa 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -2735,19 +2735,18 @@ static NPY_INLINE void
(fortran_int)dimensions[0],
(fortran_int)dimensions[1])) {
LINEARIZE_DATA_t a_in, u_out, s_out, v_out;
+ fortran_int min_m_n = params.M < params.N ? params.M : params.N;
init_linearize_data(&a_in, params.N, params.M, steps[1], steps[0]);
if ('N' == params.JOBZ) {
/* only the singular values are wanted */
- fortran_int min_m_n = params.M < params.N? params.M : params.N;
init_linearize_data(&s_out, 1, min_m_n, 0, steps[2]);
} else {
fortran_int u_columns, v_rows;
- fortran_int min_m_n = params.M < params.N? params.M : params.N;
if ('S' == params.JOBZ) {
u_columns = min_m_n;
v_rows = min_m_n;
- } else {
+ } else { /* JOBZ == 'A' */
u_columns = params.M;
v_rows = params.N;
}
@@ -2771,6 +2770,15 @@ static NPY_INLINE void
if ('N' == params.JOBZ) {
delinearize_@REALTYPE@_matrix(args[1], params.S, &s_out);
} else {
+ if ('A' == params.JOBZ && min_m_n == 0) {
+ /* Lapack has betrayed us and left these uninitialized,
+ * so produce an identity matrix for whichever of u
+ * and v is not empty.
+ */
+ identity_@TYPE@_matrix(params.U, params.M);
+ identity_@TYPE@_matrix(params.VT, params.N);
+ }
+
delinearize_@TYPE@_matrix(args[1], params.U, &u_out);
delinearize_@REALTYPE@_matrix(args[2], params.S, &s_out);
delinearize_@TYPE@_matrix(args[3], params.VT, &v_out);
diff --git a/numpy/ma/README.txt b/numpy/ma/README.txt
index ef9635e57..47f20d645 100644
--- a/numpy/ma/README.txt
+++ b/numpy/ma/README.txt
@@ -4,7 +4,7 @@ A Guide to Masked Arrays in NumPy
.. Contents::
-See http://www.scipy.org/scipy/numpy/wiki/MaskedArray
+See http://www.scipy.org/scipy/numpy/wiki/MaskedArray (dead link)
for updates of this document.
@@ -18,7 +18,7 @@ that could store some additional information along with numerical values,
while keeping the possibility for missing data (picture storing a series
of dates along with measurements, what would later become the `TimeSeries
Scikit <http://projects.scipy.org/scipy/scikits/wiki/TimeSeries>`__
-.
+(dead link).
I started to implement such a class, but then quickly realized that
any additional information disappeared when processing these subarrays
diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py
index 34f21b8b1..36ceb1f6e 100644
--- a/numpy/ma/__init__.py
+++ b/numpy/ma/__init__.py
@@ -51,6 +51,6 @@ __all__ = ['core', 'extras']
__all__ += core.__all__
__all__ += extras.__all__
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index a8cf02336..96d7207bd 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -43,16 +43,13 @@ from numpy.lib.function_base import angle
from numpy.compat import (
getargspec, formatargspec, long, basestring, unicode, bytes
)
-from numpy import expand_dims as n_expand_dims
+from numpy import expand_dims
from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
+from numpy.core._internal import recursive
+from numpy.core.numeric import pickle
-if sys.version_info[0] >= 3:
- import pickle
-else:
- import cPickle as pickle
-
__all__ = [
'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
@@ -215,7 +212,7 @@ def _recursive_fill_value(dtype, f):
"""
Recursively produce a fill value for `dtype`, calling f on scalar dtypes
"""
- if dtype.names:
+ if dtype.names is not None:
vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names)
return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
elif dtype.subdtype:
@@ -433,7 +430,7 @@ def _recursive_set_fill_value(fillvalue, dt):
if cdtype.subdtype:
cdtype = cdtype.subdtype[0]
- if cdtype.names:
+ if cdtype.names is not None:
output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
else:
output_value.append(np.array(fval, dtype=cdtype).item())
@@ -452,17 +449,15 @@ def _check_fill_value(fill_value, ndtype):
"""
ndtype = np.dtype(ndtype)
- fields = ndtype.fields
if fill_value is None:
fill_value = default_fill_value(ndtype)
- elif fields:
- fdtype = [(_[0], _[1]) for _ in ndtype.descr]
+ elif ndtype.names is not None:
if isinstance(fill_value, (ndarray, np.void)):
try:
- fill_value = np.array(fill_value, copy=False, dtype=fdtype)
+ fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
- raise ValueError(err_msg % (fill_value, fdtype))
+ raise ValueError(err_msg % (fill_value, ndtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
@@ -783,6 +778,10 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
a._data[invalid] = fill_value
return a
+def is_string_or_list_of_strings(val):
+ return (isinstance(val, basestring) or
+ (isinstance(val, list) and val and
+ builtins.all(isinstance(s, basestring) for s in val)))
###############################################################################
# Ufuncs #
@@ -1285,7 +1284,7 @@ def _replace_dtype_fields_recursive(dtype, primitive_dtype):
_recurse = _replace_dtype_fields_recursive
# Do we have some name fields ?
- if dtype.names:
+ if dtype.names is not None:
descr = []
for name in dtype.names:
field = dtype.fields[name]
@@ -1550,7 +1549,7 @@ def _shrink_mask(m):
"""
Shrink a mask to nomask if possible
"""
- if not m.dtype.names and not m.any():
+ if m.dtype.names is None and not m.any():
return nomask
else:
return m
@@ -1732,12 +1731,13 @@ def mask_or(m1, m2, copy=False, shrink=True):
"""
- def _recursive_mask_or(m1, m2, newmask):
+ @recursive
+ def _recursive_mask_or(self, m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
- if current1.dtype.names:
- _recursive_mask_or(current1, m2[name], newmask[name])
+ if current1.dtype.names is not None:
+ self(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
return
@@ -1753,7 +1753,7 @@ def mask_or(m1, m2, copy=False, shrink=True):
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if (dtype1 != dtype2):
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
- if dtype1.names:
+ if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)
_recursive_mask_or(m1, m2, newmask)
@@ -1796,7 +1796,7 @@ def flatten_mask(mask):
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
- if mnames:
+ if mnames is not None:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
@@ -2434,7 +2434,7 @@ def _recursive_printoption(result, mask, printopt):
"""
names = result.dtype.names
- if names:
+ if names is not None:
for name in names:
curdata = result[name]
curmask = mask[name]
@@ -2486,7 +2486,7 @@ def _recursive_filled(a, mask, fill_value):
names = a.dtype.names
for name in names:
current = a[name]
- if current.dtype.names:
+ if current.dtype.names is not None:
_recursive_filled(current, mask[name], fill_value[name])
else:
np.copyto(current, fill_value[name], where=mask[name])
@@ -2873,12 +2873,12 @@ class MaskedArray(ndarray):
_data._mask = mask
_data._sharedmask = not copy
else:
- if _data.dtype.names:
+ if _data.dtype.names is not None:
def _recursive_or(a, b):
"do a|=b on each field of a, recursively"
for name in a.dtype.names:
(af, bf) = (a[name], b[name])
- if af.dtype.names:
+ if af.dtype.names is not None:
_recursive_or(af, bf)
else:
af |= bf
@@ -2965,7 +2965,7 @@ class MaskedArray(ndarray):
if isinstance(obj, ndarray):
# XX: This looks like a bug -- shouldn't it check self.dtype
# instead?
- if obj.dtype.names:
+ if obj.dtype.names is not None:
_mask = getmaskarray(obj)
else:
_mask = getmask(obj)
@@ -3247,7 +3247,7 @@ class MaskedArray(ndarray):
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
- if isinstance(indx, basestring):
+ if is_string_or_list_of_strings(indx):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
@@ -3300,15 +3300,14 @@ class MaskedArray(ndarray):
return
_dtype = _data.dtype
- nbfields = len(_dtype.names or ())
if value is masked:
# The mask wasn't set: create a full version.
if _mask is nomask:
_mask = self._mask = make_mask_none(self.shape, _dtype)
# Now, set the mask to its value.
- if nbfields:
- _mask[indx] = tuple([True] * nbfields)
+ if _dtype.names is not None:
+ _mask[indx] = tuple([True] * len(_dtype.names))
else:
_mask[indx] = True
return
@@ -3317,8 +3316,8 @@ class MaskedArray(ndarray):
dval = getattr(value, '_data', value)
# Get the _mask part of the new value
mval = getmask(value)
- if nbfields and mval is nomask:
- mval = tuple([False] * nbfields)
+ if _dtype.names is not None and mval is nomask:
+ mval = tuple([False] * len(_dtype.names))
if _mask is nomask:
# Set the data, then the mask
_data[indx] = dval
@@ -3333,7 +3332,7 @@ class MaskedArray(ndarray):
indx = indx * umath.logical_not(_mask)
_data[indx] = dval
else:
- if nbfields:
+ if _dtype.names is not None:
err_msg = "Flexible 'hard' masks are not yet supported."
raise NotImplementedError(err_msg)
mindx = mask_or(_mask[indx], mval, copy=True)
@@ -3714,7 +3713,7 @@ class MaskedArray(ndarray):
if self is masked_singleton:
return np.asanyarray(fill_value)
- if m.dtype.names:
+ if m.dtype.names is not None:
result = self._data.copy('K')
_recursive_filled(result, self._mask, fill_value)
elif not m.any():
@@ -3984,7 +3983,7 @@ class MaskedArray(ndarray):
mask = mask_or(smask, omask, copy=True)
odata = getdata(other)
- if mask.dtype.names:
+ if mask.dtype.names is not None:
# For possibly masked structured arrays we need to be careful,
# since the standard structured array comparison will use all
# fields, masked or not. To avoid masked fields influencing the
@@ -6341,7 +6340,7 @@ class MaskedConstant(MaskedArray):
def __copy__(self):
return self
-
+
def __deepcopy__(self, memo):
return self
@@ -6810,56 +6809,6 @@ def diag(v, k=0):
return output
-def expand_dims(x, axis):
- """
- Expand the shape of an array.
-
- Expands the shape of the array by including a new axis before the one
- specified by the `axis` parameter. This function behaves the same as
- `numpy.expand_dims` but preserves masked elements.
-
- See Also
- --------
- numpy.expand_dims : Equivalent function in top-level NumPy module.
-
- Examples
- --------
- >>> import numpy.ma as ma
- >>> x = ma.array([1, 2, 4])
- >>> x[1] = ma.masked
- >>> x
- masked_array(data = [1 -- 4],
- mask = [False True False],
- fill_value = 999999)
- >>> np.expand_dims(x, axis=0)
- array([[1, 2, 4]])
- >>> ma.expand_dims(x, axis=0)
- masked_array(data =
- [[1 -- 4]],
- mask =
- [[False True False]],
- fill_value = 999999)
-
- The same result can be achieved using slicing syntax with `np.newaxis`.
-
- >>> x[np.newaxis, :]
- masked_array(data =
- [[1 -- 4]],
- mask =
- [[False True False]],
- fill_value = 999999)
-
- """
- result = n_expand_dims(x, axis)
- if isinstance(x, MaskedArray):
- new_shape = result.shape
- result = x.view()
- result.shape = new_shape
- if result._mask is not nomask:
- result._mask.shape = new_shape
- return result
-
-
def left_shift(a, n):
"""
Shift the bits of an integer to the left.
@@ -7130,32 +7079,32 @@ size.__doc__ = np.size.__doc__
def where(condition, x=_NoValue, y=_NoValue):
"""
- Return a masked array with elements from x or y, depending on condition.
+ Return a masked array with elements from `x` or `y`, depending on condition.
- Returns a masked array, shaped like condition, where the elements
- are from `x` when `condition` is True, and from `y` otherwise.
- If neither `x` nor `y` are given, the function returns a tuple of
- indices where `condition` is True (the result of
- ``condition.nonzero()``).
+ .. note::
+ When only `condition` is provided, this function is identical to
+ `nonzero`. The rest of this documentation covers only the case where
+ all three arguments are provided.
Parameters
----------
condition : array_like, bool
- The condition to meet. For each True element, yield the corresponding
- element from `x`, otherwise from `y`.
+ Where True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
- out : MaskedArray or tuple of ndarrays
- The resulting masked array if `x` and `y` were given, otherwise
- the result of ``condition.nonzero()``.
+ out : MaskedArray
+ An masked array with `masked` elements where the condition is masked,
+ elements from `x` where `condition` is True, and elements from `y`
+ elsewhere.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
+ nonzero : The function that is called when x and y are omitted
Examples
--------
@@ -7166,9 +7115,6 @@ def where(condition, x=_NoValue, y=_NoValue):
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
- >>> np.ma.where(x > 5) # return the indices where x > 5
- (array([2, 2]), array([0, 2]))
-
>>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 90a5141b3..daf2f8770 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -29,7 +29,6 @@ from numpy.core.records import (
)
_byteorderconv = np.core.records._byteorderconv
-_typestr = ntypes._typestr
import numpy.ma as ma
from numpy.ma import (
@@ -48,24 +47,6 @@ __all__ = [
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
-def _getformats(data):
- """
- Returns the formats of arrays in arraylist as a comma-separated string.
-
- """
- if hasattr(data, 'dtype'):
- return ",".join([desc[1] for desc in data.dtype.descr])
-
- formats = ''
- for obj in data:
- obj = np.asarray(obj)
- formats += _typestr[obj.dtype.type]
- if issubclass(obj.dtype.type, ntypes.flexible):
- formats += repr(obj.itemsize)
- formats += ','
- return formats[:-1]
-
-
def _checknames(descr, names=None):
"""
Checks that field names ``descr`` are not reserved keywords.
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index a39e7dd3d..e0dbf1b1a 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -10,7 +10,6 @@ __author__ = "Pierre GF Gerard-Marchant"
import sys
import warnings
-import pickle
import operator
import itertools
import textwrap
@@ -27,7 +26,7 @@ from numpy.testing import (
assert_raises, assert_warns, suppress_warnings
)
from numpy import ndarray
-from numpy.compat import asbytes, asbytes_nested
+from numpy.compat import asbytes
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
@@ -50,6 +49,7 @@ from numpy.ma.core import (
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
+from numpy.core.numeric import pickle
pi = np.pi
@@ -233,7 +233,7 @@ class TestMaskedArray(object):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
'formats':['S4','i8'],
'offsets':[0,8]})
- data = array(x) # used to fail due to 'V' padding field in x.dtype.descr
+ array(x) # used to fail due to 'V' padding field in x.dtype.descr
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
@@ -519,8 +519,6 @@ class TestMaskedArray(object):
fill_value=999999)''')
)
-
-
def test_str_repr_legacy(self):
oldopts = np.get_printoptions()
np.set_printoptions(legacy='1.13')
@@ -562,50 +560,55 @@ class TestMaskedArray(object):
True, # Fully masked
False) # Fully unmasked
- for mask in masks:
- a.mask = mask
- a_pickled = pickle.loads(a.dumps())
- assert_equal(a_pickled._mask, a._mask)
- assert_equal(a_pickled._data, a._data)
- if dtype in (object, int):
- assert_equal(a_pickled.fill_value, 999)
- else:
- assert_equal(a_pickled.fill_value, dtype(999))
- assert_array_equal(a_pickled.mask, mask)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ for mask in masks:
+ a.mask = mask
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled._data, a._data)
+ if dtype in (object, int):
+ assert_equal(a_pickled.fill_value, 999)
+ else:
+ assert_equal(a_pickled.fill_value, dtype(999))
+ assert_array_equal(a_pickled.mask, mask)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(np.recarray)
a = masked_array(x, mask=[(True, False), (False, True)])
- a_pickled = pickle.loads(a.dumps())
- assert_equal(a_pickled._mask, a._mask)
- assert_equal(a_pickled, a)
- assert_(isinstance(a_pickled._data, np.recarray))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.recarray))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
- mc_pickled = pickle.loads(mc.dumps())
- assert_equal(mc_pickled._baseclass, mc._baseclass)
- assert_equal(mc_pickled._mask, mc._mask)
- assert_equal(mc_pickled._data, mc._data)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto))
+ assert_equal(mc_pickled._baseclass, mc._baseclass)
+ assert_equal(mc_pickled._mask, mc._mask)
+ assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
- a_pickled = pickle.loads(a.dumps())
- assert_equal(a_pickled._mask, a._mask)
- assert_equal(a_pickled, a)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
- test = pickle.loads(pickle.dumps(b))
- assert_equal(test, b)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ test = pickle.loads(pickle.dumps(b, protocol=proto))
+ assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
@@ -793,7 +796,6 @@ class TestMaskedArray(object):
control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)"
assert_equal(str(t_2d0), control)
-
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
@@ -2027,6 +2029,17 @@ class TestFillingValues(object):
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
+ def test_subarray_fillvalue(self):
+ # gh-10483 test multi-field index fill value
+ fields = array([(1, 1, 1)],
+ dtype=[('i', int), ('s', '|S8'), ('f', float)])
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "Numpy has detected")
+ subfields = fields[['i', 'f']]
+ assert_equal(tuple(subfields.fill_value), (999999, 1.e+20))
+ # test comparison does not raise:
+ subfields[1:] == subfields[:-1]
+
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
@@ -2388,9 +2401,9 @@ class TestMaskedArrayInPlaceArithmetics(object):
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
- id1 = x.data.ctypes._data
+ id1 = x.data.ctypes.data
x += 1.
- assert_(id1 == x.data.ctypes._data)
+ assert_(id1 == x.data.ctypes.data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
@@ -3952,12 +3965,8 @@ class TestMaskedArrayFunctions(object):
def test_masked_where_shape_constraint(self):
a = arange(10)
- try:
- test = masked_equal(1, a)
- except IndexError:
- pass
- else:
- raise AssertionError("Should have failed...")
+ with assert_raises(IndexError):
+ masked_equal(1, a)
test = masked_equal(a, 1)
assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
@@ -4951,13 +4960,13 @@ class TestMaskedConstant(object):
def test_pickle(self):
from io import BytesIO
- import pickle
- with BytesIO() as f:
- pickle.dump(np.ma.masked, f)
- f.seek(0)
- res = pickle.load(f)
- assert_(res is np.ma.masked)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ with BytesIO() as f:
+ pickle.dump(np.ma.masked, f, protocol=proto)
+ f.seek(0)
+ res = pickle.load(f)
+ assert_(res is np.ma.masked)
def test_copy(self):
# gh-9328
@@ -5179,3 +5188,18 @@ def test_astype_basic(dt1, dt2):
assert_(dst.fill_value.dtype == dt2)
assert_equal(src, dst)
+
+
+def test_fieldless_void():
+ dt = np.dtype([]) # a void dtype with no fields
+ x = np.empty(4, dt)
+
+ # these arrays contain no values, so there's little to test - but this
+ # shouldn't crash
+ mx = np.ma.array(x)
+ assert_equal(mx.dtype, x.dtype)
+ assert_equal(mx.shape, x.shape)
+
+ mx = np.ma.array(x, mask=x)
+ assert_equal(mx.dtype, x.dtype)
+ assert_equal(mx.shape, x.shape)
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index c29bec2bd..5243cf714 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -14,7 +14,7 @@ import itertools
import numpy as np
from numpy.testing import (
- assert_warns, suppress_warnings, assert_raises,
+ assert_warns, suppress_warnings
)
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal
@@ -29,9 +29,8 @@ from numpy.ma.extras import (
ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols,
mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous,
notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin,
- diagflat, stack, vstack, hstack
+ diagflat, stack, vstack
)
-import numpy.ma.extras as mae
class TestGeneric(object):
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index e08dc1326..dbbf1c8a1 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -7,9 +7,6 @@
"""
from __future__ import division, absolute_import, print_function
-import warnings
-import pickle
-
import numpy as np
import numpy.ma as ma
from numpy import recarray
@@ -26,6 +23,7 @@ from numpy.ma.testutils import (
assert_, assert_equal,
assert_equal_records,
)
+from numpy.core.numeric import pickle
class TestMRecords(object):
@@ -288,12 +286,13 @@ class TestMRecords(object):
# Test pickling
base = self.base.copy()
mrec = base.view(mrecarray)
- _ = pickle.dumps(mrec)
- mrec_ = pickle.loads(_)
- assert_equal(mrec_.dtype, mrec.dtype)
- assert_equal_records(mrec_._data, mrec._data)
- assert_equal(mrec_._mask, mrec._mask)
- assert_equal_records(mrec_._mask, mrec._mask)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ _ = pickle.dumps(mrec, protocol=proto)
+ mrec_ = pickle.loads(_)
+ assert_equal(mrec_.dtype, mrec.dtype)
+ assert_equal_records(mrec_._data, mrec._data)
+ assert_equal(mrec_._mask, mrec._mask)
+ assert_equal_records(mrec_._mask, mrec._mask)
def test_filled(self):
# Test filling the array
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index d7b1e3c18..2978be22c 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -8,7 +8,6 @@ import numpy.core.fromnumeric as fromnumeric
from numpy.testing import (
assert_, assert_raises, assert_equal,
)
-from numpy.ma.testutils import assert_array_equal
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
arange, arccos, arcsin, arctan, arctan2, array, average, choose,
@@ -22,6 +21,7 @@ from numpy.ma import (
repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
take, tan, tanh, transpose, where, zeros,
)
+from numpy.core.numeric import pickle
pi = np.pi
@@ -549,13 +549,13 @@ class TestMa(object):
def test_testPickle(self):
# Test of pickling
- import pickle
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4, 3)
- s = pickle.dumps(x)
- y = pickle.loads(s)
- assert_(eq(x, y))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ s = pickle.dumps(x, protocol=proto)
+ y = pickle.loads(s)
+ assert_(eq(x, y))
def test_testMasked(self):
# Test of masked element
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index 96c418a51..54f1bda7d 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import warnings
-
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_allclose, suppress_warnings
@@ -84,3 +82,8 @@ class TestRegression(object):
assert_(a.mask.shape == (2,))
assert_(b.shape == (2, 2))
assert_(b.mask.shape == (2, 2))
+
+ def test_empty_list_on_structured(self):
+ # See gh-12464. Indexing with empty list should give empty result.
+ ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
+ assert_array_equal(ma[[]], ma[:0])
diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py
index 3ad3a9549..777e0cd33 100644
--- a/numpy/matrixlib/__init__.py
+++ b/numpy/matrixlib/__init__.py
@@ -7,6 +7,6 @@ from .defmatrix import *
__all__ = defmatrix.__all__
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index 7baa401a8..93b344cd4 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -7,6 +7,7 @@ import warnings
import ast
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar
+from numpy.core.overrides import set_module
# While not in __all__, matrix_power used to be defined here, so we import
# it for backward compatibility.
from numpy.linalg import matrix_power
@@ -33,6 +34,8 @@ def _convert_from_string(data):
newdata.append(newrow)
return newdata
+
+@set_module('numpy')
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
@@ -67,6 +70,8 @@ def asmatrix(data, dtype=None):
"""
return matrix(data, dtype=dtype, copy=False)
+
+@set_module('numpy')
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
@@ -1023,6 +1028,7 @@ def _from_string(str, gdict, ldict):
return concatenate(rowtup, axis=0)
+@set_module('numpy')
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
diff --git a/numpy/matrixlib/setup.py b/numpy/matrixlib/setup.py
index 8c383cece..d0981d658 100644
--- a/numpy/matrixlib/setup.py
+++ b/numpy/matrixlib/setup.py
@@ -1,8 +1,6 @@
#!/usr/bin/env python
from __future__ import division, print_function
-import os
-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('matrixlib', parent_package, top_path)
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index 4cff5ee9b..aa6e08d64 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
@@ -268,21 +266,13 @@ class TestAlgebra(object):
[3., 4.]])
# __rpow__
- try:
+ with assert_raises(TypeError):
1.0**A
- except TypeError:
- pass
- else:
- self.fail("matrix.__rpow__ doesn't raise a TypeError")
# __mul__ with something not a list, ndarray, tuple, or scalar
- try:
+ with assert_raises(TypeError):
A*object()
- except TypeError:
- pass
- else:
- self.fail("matrix.__mul__ with non-numeric object doesn't raise"
- "a TypeError")
+
class TestMatrixReturn(object):
def test_instance_methods(self):
diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py
index 5ed8044aa..52fd18577 100644
--- a/numpy/matrixlib/tests/test_masked_matrix.py
+++ b/numpy/matrixlib/tests/test_masked_matrix.py
@@ -1,8 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pickle
-import pytest
-
import numpy as np
from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
assert_array_equal)
@@ -10,6 +7,7 @@ from numpy.ma.core import (masked_array, masked_values, masked, allequal,
MaskType, getmask, MaskedArray, nomask,
log, add, hypot, divide)
from numpy.ma.extras import mr_
+from numpy.core.numeric import pickle
class MMatrix(MaskedArray, np.matrix,):
@@ -79,10 +77,11 @@ class TestMaskedMatrix(object):
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
- a_pickled = pickle.loads(a.dumps())
- assert_equal(a_pickled._mask, a._mask)
- assert_equal(a_pickled, a)
- assert_(isinstance(a_pickled._data, np.matrix))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.matrix))
def test_count_mean_with_matrix(self):
m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2)))
diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py
index 8d31ec5b0..6fc733c2e 100644
--- a/numpy/matrixlib/tests/test_matrix_linalg.py
+++ b/numpy/matrixlib/tests/test_matrix_linalg.py
@@ -1,8 +1,6 @@
""" Test functions for linalg module using the matrix class."""
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.linalg.tests.test_linalg import (
diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py
index 8de0a7c6a..6d84bd477 100644
--- a/numpy/matrixlib/tests/test_multiarray.py
+++ b/numpy/matrixlib/tests/test_multiarray.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_equal
diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py
index e9f44e747..95e1c8001 100644
--- a/numpy/matrixlib/tests/test_numeric.py
+++ b/numpy/matrixlib/tests/test_numeric.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.testing import assert_equal
diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py
index 88654c76a..70e147279 100644
--- a/numpy/matrixlib/tests/test_regression.py
+++ b/numpy/matrixlib/tests/test_regression.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index c18bebedb..85cee9ce6 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -22,6 +22,6 @@ from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index 78392d2a2..c28e77e69 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -9,7 +9,7 @@ abc module from the stdlib, hence it is only available for Python >= 2.6.
from __future__ import division, absolute_import, print_function
from abc import ABCMeta, abstractmethod, abstractproperty
-from numbers import Number
+import numbers
import numpy as np
from . import polyutils as pu
@@ -17,7 +17,7 @@ from . import polyutils as pu
__all__ = ['ABCPolyBase']
class ABCPolyBase(object):
- """An abstract base class for series classes.
+ """An abstract base class for immutable series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
@@ -82,6 +82,10 @@ class ABCPolyBase(object):
def nickname(self):
pass
+ @abstractproperty
+ def basis_name(self):
+ pass
+
@abstractmethod
def _add(self):
pass
@@ -273,6 +277,82 @@ class ABCPolyBase(object):
name = self.nickname
return format % (name, coef)
+ @classmethod
+ def _repr_latex_term(cls, i, arg_str, needs_parens):
+ if cls.basis_name is None:
+ raise NotImplementedError(
+ "Subclasses must define either a basis name, or override "
+ "_repr_latex_term(i, arg_str, needs_parens)")
+ # since we always add parens, we don't care if the expression needs them
+ return "{{{basis}}}_{{{i}}}({arg_str})".format(
+ basis=cls.basis_name, i=i, arg_str=arg_str
+ )
+
+ @staticmethod
+ def _repr_latex_scalar(x):
+ # TODO: we're stuck with disabling math formatting until we handle
+ # exponents in this function
+ return r'\text{{{}}}'.format(x)
+
+ def _repr_latex_(self):
+ # get the scaled argument string to the basis functions
+ off, scale = self.mapparms()
+ if off == 0 and scale == 1:
+ term = 'x'
+ needs_parens = False
+ elif scale == 1:
+ term = '{} + x'.format(
+ self._repr_latex_scalar(off)
+ )
+ needs_parens = True
+ elif off == 0:
+ term = '{}x'.format(
+ self._repr_latex_scalar(scale)
+ )
+ needs_parens = True
+ else:
+ term = '{} + {}x'.format(
+ self._repr_latex_scalar(off),
+ self._repr_latex_scalar(scale)
+ )
+ needs_parens = True
+
+ mute = r"\color{{LightGray}}{{{}}}".format
+
+ parts = []
+ for i, c in enumerate(self.coef):
+ # prevent duplication of + and - signs
+ if i == 0:
+ coef_str = '{}'.format(self._repr_latex_scalar(c))
+ elif not isinstance(c, numbers.Real):
+ coef_str = ' + ({})'.format(self._repr_latex_scalar(c))
+ elif not np.signbit(c):
+ coef_str = ' + {}'.format(self._repr_latex_scalar(c))
+ else:
+ coef_str = ' - {}'.format(self._repr_latex_scalar(-c))
+
+ # produce the string for the term
+ term_str = self._repr_latex_term(i, term, needs_parens)
+ if term_str == '1':
+ part = coef_str
+ else:
+ part = r'{}\,{}'.format(coef_str, term_str)
+
+ if c == 0:
+ part = mute(part)
+
+ parts.append(part)
+
+ if parts:
+ body = ''.join(parts)
+ else:
+ # in case somehow there are no coefficients at all
+ body = '0'
+
+ return r'$x \mapsto {}$'.format(body)
+
+
+
# Pickle and copy
def __getstate__(self):
@@ -331,14 +411,14 @@ class ABCPolyBase(object):
return self.__class__(coef, self.domain, self.window)
def __div__(self, other):
- # set to __floordiv__, /, for now.
+ # this can be removed when python 2 support is dropped.
return self.__floordiv__(other)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
- if not isinstance(other, Number) or isinstance(other, bool):
+ if not isinstance(other, numbers.Number) or isinstance(other, bool):
form = "unsupported types for true division: '%s', '%s'"
raise TypeError(form % (type(self), type(other)))
return self.__floordiv__(other)
@@ -425,9 +505,6 @@ class ABCPolyBase(object):
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
- # Enhance me
- # some augmented arithmetic operations could be added here
-
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
@@ -773,7 +850,9 @@ class ABCPolyBase(object):
-------
new_series : series
A series that represents the least squares fit to the data and
- has the domain specified in the call.
+ has the domain and window specified in the call. If the
+ coefficients for the unscaled and unshifted basis polynomials are
+ of interest, do ``new_series.convert().coef``.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index 8add0acbc..92cdb18d2 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -21,9 +21,10 @@ Arithmetic
----------
- `chebadd` -- add two Chebyshev series.
- `chebsub` -- subtract one Chebyshev series from another.
+- `chebmulx` -- multiply a Chebyshev series in ``P_i(x)`` by ``x``.
- `chebmul` -- multiply two Chebyshev series.
- `chebdiv` -- divide one Chebyshev series by another.
-- `chebpow` -- raise a Chebyshev series to an positive integer power
+- `chebpow` -- raise a Chebyshev series to a positive integer power.
- `chebval` -- evaluate a Chebyshev series at given points.
- `chebval2d` -- evaluate a 2D Chebyshev series at given points.
- `chebval3d` -- evaluate a 3D Chebyshev series at given points.
@@ -83,12 +84,11 @@ References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
- (preprint: http://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
+ (preprint: https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
from __future__ import division, absolute_import, print_function
-import numbers
import warnings
import numpy as np
import numpy.linalg as la
@@ -365,7 +365,7 @@ def poly2cheb(pol):
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
- >>> P.poly2cheb(range(4))
+ >>> P.chebyshev.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
"""
@@ -417,7 +417,7 @@ def cheb2poly(c):
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([ -2., -8., 4., 12.], [-1., 1.])
- >>> P.cheb2poly(range(4))
+ >>> P.chebyshev.cheb2poly(range(4))
array([ -2., -8., 4., 12.])
"""
@@ -579,7 +579,7 @@ def chebadd(c1, c2):
See Also
--------
- chebsub, chebmul, chebdiv, chebpow
+ chebsub, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
@@ -629,7 +629,7 @@ def chebsub(c1, c2):
See Also
--------
- chebadd, chebmul, chebdiv, chebpow
+ chebadd, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
@@ -684,6 +684,12 @@ def chebmulx(c):
.. versionadded:: 1.5.0
+ Examples
+ --------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> C.chebmulx([1,2,3])
+ array([ 1., 2.5, 3., 1.5, 2.])
+
"""
# c is a trimmed copy
[c] = pu.as_series([c])
@@ -722,7 +728,7 @@ def chebmul(c1, c2):
See Also
--------
- chebadd, chebsub, chebdiv, chebpow
+ chebadd, chebsub, chebmulx, chebdiv, chebpow
Notes
-----
@@ -773,7 +779,7 @@ def chebdiv(c1, c2):
See Also
--------
- chebadd, chebsub, chebmul, chebpow
+ chebadd, chebsub, chemulx, chebmul, chebpow
Notes
-----
@@ -841,10 +847,13 @@ def chebpow(c, pow, maxpower=16):
See Also
--------
- chebadd, chebsub, chebmul, chebdiv
+ chebadd, chebsub, chebmulx, chebmul, chebdiv
Examples
--------
+ >>> from numpy.polynomial import chebyshev as C
+ >>> C.chebpow([1, 2, 3, 4], 2)
+ array([15.5, 22. , 16. , 14. , 12.5, 12. , 8. ])
"""
# c is a trimmed copy
@@ -1087,7 +1096,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
- t = c[j]/(2*j + 1)
+ t = c[j]/(2*j + 1) # FIXME: t never used
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
@@ -1708,7 +1717,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
References
----------
.. [1] Wikipedia, "Curve fitting",
- http://en.wikipedia.org/wiki/Curve_fitting
+ https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
@@ -2188,3 +2197,4 @@ class Chebyshev(ABCPolyBase):
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
+ basis_name = 'T'
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 58e9e180f..4905f366f 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -16,11 +16,12 @@ Constants
Arithmetic
----------
-- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
+- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
+- `hermpow` -- raise a Hermite series to a positive integer power.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
@@ -323,7 +324,7 @@ def hermadd(c1, c2):
See Also
--------
- hermsub, hermmul, hermdiv, hermpow
+ hermsub, hermmulx, hermmul, hermdiv, hermpow
Notes
-----
@@ -371,7 +372,7 @@ def hermsub(c1, c2):
See Also
--------
- hermadd, hermmul, hermdiv, hermpow
+ hermadd, hermmulx, hermmul, hermdiv, hermpow
Notes
-----
@@ -417,6 +418,10 @@ def hermmulx(c):
out : ndarray
Array representing the result of the multiplication.
+ See Also
+ --------
+ hermadd, hermsub, hermmul, hermdiv, hermpow
+
Notes
-----
The multiplication uses the recursion relationship for Hermite
@@ -469,7 +474,7 @@ def hermmul(c1, c2):
See Also
--------
- hermadd, hermsub, hermdiv, hermpow
+ hermadd, hermsub, hermmulx, hermdiv, hermpow
Notes
-----
@@ -537,7 +542,7 @@ def hermdiv(c1, c2):
See Also
--------
- hermadd, hermsub, hermmul, hermpow
+ hermadd, hermsub, hermmulx, hermmul, hermpow
Notes
-----
@@ -606,7 +611,7 @@ def hermpow(c, pow, maxpower=16):
See Also
--------
- hermadd, hermsub, hermmul, hermdiv
+ hermadd, hermsub, hermmulx, hermmul, hermdiv
Examples
--------
@@ -1476,7 +1481,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
References
----------
.. [1] Wikipedia, "Curve fitting",
- http://en.wikipedia.org/wiki/Curve_fitting
+ https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
@@ -1699,7 +1704,7 @@ def _normed_hermite_n(x, n):
"""
if n == 0:
- return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi))
+ return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi)))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(np.pi))
@@ -1851,3 +1856,4 @@ class Hermite(ABCPolyBase):
nickname = 'herm'
domain = np.array(hermdomain)
window = np.array(hermdomain)
+ basis_name = 'H'
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 47b2a9fb4..6cb044a55 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -16,11 +16,12 @@ Constants
Arithmetic
----------
-- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
+- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
+- `hermepow` -- raise a Hermite_e series to a positive integer power.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
@@ -324,7 +325,7 @@ def hermeadd(c1, c2):
See Also
--------
- hermesub, hermemul, hermediv, hermepow
+ hermesub, hermemulx, hermemul, hermediv, hermepow
Notes
-----
@@ -372,7 +373,7 @@ def hermesub(c1, c2):
See Also
--------
- hermeadd, hermemul, hermediv, hermepow
+ hermeadd, hermemulx, hermemul, hermediv, hermepow
Notes
-----
@@ -470,7 +471,7 @@ def hermemul(c1, c2):
See Also
--------
- hermeadd, hermesub, hermediv, hermepow
+ hermeadd, hermesub, hermemulx, hermediv, hermepow
Notes
-----
@@ -538,7 +539,7 @@ def hermediv(c1, c2):
See Also
--------
- hermeadd, hermesub, hermemul, hermepow
+ hermeadd, hermesub, hermemulx, hermemul, hermepow
Notes
-----
@@ -605,7 +606,7 @@ def hermepow(c, pow, maxpower=16):
See Also
--------
- hermeadd, hermesub, hermemul, hermediv
+ hermeadd, hermesub, hermemulx, hermemul, hermediv
Examples
--------
@@ -1473,7 +1474,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
References
----------
.. [1] Wikipedia, "Curve fitting",
- http://en.wikipedia.org/wiki/Curve_fitting
+ https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
@@ -1697,7 +1698,7 @@ def _normed_hermite_e_n(x, n):
"""
if n == 0:
- return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
+ return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi)))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
@@ -1848,3 +1849,4 @@ class HermiteE(ABCPolyBase):
nickname = 'herme'
domain = np.array(hermedomain)
window = np.array(hermedomain)
+ basis_name = 'He'
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index 5a9a5111a..a116d20a7 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -16,11 +16,12 @@ Constants
Arithmetic
----------
-- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
+- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
+- `lagpow` -- raise a Laguerre series to a positive integer power.
- `lagval` -- evaluate a Laguerre series at given points.
- `lagval2d` -- evaluate a 2D Laguerre series at given points.
- `lagval3d` -- evaluate a 3D Laguerre series at given points.
@@ -320,7 +321,7 @@ def lagadd(c1, c2):
See Also
--------
- lagsub, lagmul, lagdiv, lagpow
+ lagsub, lagmulx, lagmul, lagdiv, lagpow
Notes
-----
@@ -369,7 +370,7 @@ def lagsub(c1, c2):
See Also
--------
- lagadd, lagmul, lagdiv, lagpow
+ lagadd, lagmulx, lagmul, lagdiv, lagpow
Notes
-----
@@ -415,6 +416,10 @@ def lagmulx(c):
out : ndarray
Array representing the result of the multiplication.
+ See Also
+ --------
+ lagadd, lagsub, lagmul, lagdiv, lagpow
+
Notes
-----
The multiplication uses the recursion relationship for Laguerre
@@ -468,7 +473,7 @@ def lagmul(c1, c2):
See Also
--------
- lagadd, lagsub, lagdiv, lagpow
+ lagadd, lagsub, lagmulx, lagdiv, lagpow
Notes
-----
@@ -536,7 +541,7 @@ def lagdiv(c1, c2):
See Also
--------
- lagadd, lagsub, lagmul, lagpow
+ lagadd, lagsub, lagmulx, lagmul, lagpow
Notes
-----
@@ -603,7 +608,7 @@ def lagpow(c, pow, maxpower=16):
See Also
--------
- lagadd, lagsub, lagmul, lagdiv
+ lagadd, lagsub, lagmulx, lagmul, lagdiv
Examples
--------
@@ -1475,7 +1480,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
References
----------
.. [1] Wikipedia, "Curve fitting",
- http://en.wikipedia.org/wiki/Curve_fitting
+ https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
@@ -1801,3 +1806,4 @@ class Laguerre(ABCPolyBase):
nickname = 'lag'
domain = np.array(lagdomain)
window = np.array(lagdomain)
+ basis_name = 'L'
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index 0d4a49afc..e9c24594b 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -27,12 +27,12 @@ Arithmetic
.. autosummary::
:toctree: generated/
- legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
+ legmulx multiply a Legendre series in ``P_i(x)`` by ``x``.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
- legpow raise a Legendre series to an positive integer power
+ legpow raise a Legendre series to a positive integer power.
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
@@ -351,7 +351,7 @@ def legadd(c1, c2):
See Also
--------
- legsub, legmul, legdiv, legpow
+ legsub, legmulx, legmul, legdiv, legpow
Notes
-----
@@ -401,7 +401,7 @@ def legsub(c1, c2):
See Also
--------
- legadd, legmul, legdiv, legpow
+ legadd, legmulx, legmul, legdiv, legpow
Notes
-----
@@ -451,6 +451,10 @@ def legmulx(c):
out : ndarray
Array representing the result of the multiplication.
+ See Also
+ --------
+ legadd, legmul, legmul, legdiv, legpow
+
Notes
-----
The multiplication uses the recursion relationship for Legendre
@@ -460,6 +464,12 @@ def legmulx(c):
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
+ Examples
+ --------
+ >>> from numpy.polynomial import legendre as L
+ >>> L.legmulx([1,2,3])
+ array([ 0.66666667, 2.2, 1.33333333, 1.8])
+
"""
# c is a trimmed copy
[c] = pu.as_series([c])
@@ -500,7 +510,7 @@ def legmul(c1, c2):
See Also
--------
- legadd, legsub, legdiv, legpow
+ legadd, legsub, legmulx, legdiv, legpow
Notes
-----
@@ -570,7 +580,7 @@ def legdiv(c1, c2):
See Also
--------
- legadd, legsub, legmul, legpow
+ legadd, legsub, legmulx, legmul, legpow
Notes
-----
@@ -640,7 +650,7 @@ def legpow(c, pow, maxpower=16):
See Also
--------
- legadd, legsub, legmul, legdiv
+ legadd, legsub, legmulx, legmul, legdiv
Examples
--------
@@ -1509,7 +1519,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
References
----------
.. [1] Wikipedia, "Curve fitting",
- http://en.wikipedia.org/wiki/Curve_fitting
+ https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
@@ -1831,3 +1841,4 @@ class Legendre(ABCPolyBase):
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
+ basis_name = 'P'
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index adbf30234..259cd31f5 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -18,9 +18,10 @@ Arithmetic
----------
- `polyadd` -- add two polynomials.
- `polysub` -- subtract one polynomial from another.
+- `polymulx` -- multiply a polynomial in ``P_i(x)`` by ``x``.
- `polymul` -- multiply two polynomials.
- `polydiv` -- divide one polynomial by another.
-- `polypow` -- raise a polynomial to an positive integer power
+- `polypow` -- raise a polynomial to a positive integer power.
- `polyval` -- evaluate a polynomial at given points.
- `polyval2d` -- evaluate a 2D polynomial at given points.
- `polyval3d` -- evaluate a 3D polynomial at given points.
@@ -224,7 +225,7 @@ def polyadd(c1, c2):
See Also
--------
- polysub, polymul, polydiv, polypow
+ polysub, polymulx, polymul, polydiv, polypow
Examples
--------
@@ -269,7 +270,7 @@ def polysub(c1, c2):
See Also
--------
- polyadd, polymul, polydiv, polypow
+ polyadd, polymulx, polymul, polydiv, polypow
Examples
--------
@@ -312,6 +313,10 @@ def polymulx(c):
out : ndarray
Array representing the result of the multiplication.
+ See Also
+ --------
+ polyadd, polysub, polymul, polydiv, polypow
+
Notes
-----
@@ -351,7 +356,7 @@ def polymul(c1, c2):
See Also
--------
- polyadd, polysub, polydiv, polypow
+ polyadd, polysub, polymulx, polydiv, polypow
Examples
--------
@@ -388,7 +393,7 @@ def polydiv(c1, c2):
See Also
--------
- polyadd, polysub, polymul, polypow
+ polyadd, polysub, polymulx, polymul, polypow
Examples
--------
@@ -450,10 +455,13 @@ def polypow(c, pow, maxpower=None):
See Also
--------
- polyadd, polysub, polymul, polydiv
+ polyadd, polysub, polymulx, polymul, polydiv
Examples
--------
+ >>> from numpy.polynomial import polynomial as P
+ >>> P.polypow([1,2,3], 2)
+ array([ 1., 4., 10., 12., 9.])
"""
# c is a trimmed copy
@@ -1643,3 +1651,15 @@ class Polynomial(ABCPolyBase):
nickname = 'poly'
domain = np.array(polydomain)
window = np.array(polydomain)
+ basis_name = None
+
+ @staticmethod
+ def _repr_latex_term(i, arg_str, needs_parens):
+ if needs_parens:
+ arg_str = r'\left({}\right)'.format(arg_str)
+ if i == 0:
+ return '1'
+ elif i == 1:
+ return arg_str
+ else:
+ return '{}^{{{}}}'.format(arg_str, i)
diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py
index 439dfa08d..7fb7492c6 100644
--- a/numpy/polynomial/tests/test_chebyshev.py
+++ b/numpy/polynomial/tests/test_chebyshev.py
@@ -3,6 +3,8 @@
"""
from __future__ import division, absolute_import, print_function
+from functools import reduce
+
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
@@ -111,6 +113,15 @@ class TestArithmetic(object):
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
+ def test_chebpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = "At i=%d, j=%d" % (i, j)
+ c = np.arange(i + 1)
+ tgt = reduce(cheb.chebmul, [c]*j, np.array([1]))
+ res = cheb.chebpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index 738741668..15e24f92b 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -562,6 +562,56 @@ def test_ufunc_override(Poly):
assert_raises(TypeError, np.add, x, p)
+
+class TestLatexRepr(object):
+ """Test the latex repr used by ipython """
+
+ def as_latex(self, obj):
+ # right now we ignore the formatting of scalars in our tests, since
+ # it makes them too verbose. Ideally, the formatting of scalars will
+ # be fixed such that tests below continue to pass
+ obj._repr_latex_scalar = lambda x: str(x)
+ try:
+ return obj._repr_latex_()
+ finally:
+ del obj._repr_latex_scalar
+
+ def test_simple_polynomial(self):
+ # default input
+ p = Polynomial([1, 2, 3])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$')
+
+ # translated input
+ p = Polynomial([1, 2, 3], domain=[-2, 0])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$')
+
+ # scaled input
+ p = Polynomial([1, 2, 3], domain=[-0.5, 0.5])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$')
+
+ # affine input
+ p = Polynomial([1, 2, 3], domain=[-1, 0])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$')
+
+ def test_basis_func(self):
+ p = Chebyshev([1, 2, 3])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$')
+ # affine input - check no surplus parens are added
+ p = Chebyshev([1, 2, 3], domain=[-1, 0])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$')
+
+ def test_multichar_basis_func(self):
+ p = HermiteE([1, 2, 3])
+ assert_equal(self.as_latex(p),
+ r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$')
+
+
#
# Test class method that only exists for some classes
#
diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py
index 18c26af8f..1287ef3fe 100644
--- a/numpy/polynomial/tests/test_hermite.py
+++ b/numpy/polynomial/tests/test_hermite.py
@@ -3,6 +3,8 @@
"""
from __future__ import division, absolute_import, print_function
+from functools import reduce
+
import numpy as np
import numpy.polynomial.hermite as herm
from numpy.polynomial.polynomial import polyval
@@ -99,6 +101,15 @@ class TestArithmetic(object):
res = herm.hermadd(herm.hermmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
+ def test_hermpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = "At i=%d, j=%d" % (i, j)
+ c = np.arange(i + 1)
+ tgt = reduce(herm.hermmul, [c]*j, np.array([1]))
+ res = herm.hermpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py
index 58d74dae9..ccb44ad73 100644
--- a/numpy/polynomial/tests/test_hermite_e.py
+++ b/numpy/polynomial/tests/test_hermite_e.py
@@ -3,6 +3,8 @@
"""
from __future__ import division, absolute_import, print_function
+from functools import reduce
+
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
@@ -99,6 +101,15 @@ class TestArithmetic(object):
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
+ def test_hermepow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = "At i=%d, j=%d" % (i, j)
+ c = np.arange(i + 1)
+ tgt = reduce(herme.hermemul, [c]*j, np.array([1]))
+ res = herme.hermepow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py
index 3cb630e46..3ababec5e 100644
--- a/numpy/polynomial/tests/test_laguerre.py
+++ b/numpy/polynomial/tests/test_laguerre.py
@@ -3,6 +3,8 @@
"""
from __future__ import division, absolute_import, print_function
+from functools import reduce
+
import numpy as np
import numpy.polynomial.laguerre as lag
from numpy.polynomial.polynomial import polyval
@@ -96,6 +98,15 @@ class TestArithmetic(object):
res = lag.lagadd(lag.lagmul(quo, ci), rem)
assert_almost_equal(trim(res), trim(tgt), err_msg=msg)
+ def test_lagpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = "At i=%d, j=%d" % (i, j)
+ c = np.arange(i + 1)
+ tgt = reduce(lag.lagmul, [c]*j, np.array([1]))
+ res = lag.lagpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py
index aeecd8775..a23086d59 100644
--- a/numpy/polynomial/tests/test_legendre.py
+++ b/numpy/polynomial/tests/test_legendre.py
@@ -3,6 +3,8 @@
"""
from __future__ import division, absolute_import, print_function
+from functools import reduce
+
import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
@@ -100,6 +102,15 @@ class TestArithmetic(object):
res = leg.legadd(leg.legmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
+ def test_legpow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = "At i=%d, j=%d" % (i, j)
+ c = np.arange(i + 1)
+ tgt = reduce(leg.legmul, [c]*j, np.array([1]))
+ res = leg.legpow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index 67728e35e..0c93be278 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -3,6 +3,8 @@
"""
from __future__ import division, absolute_import, print_function
+from functools import reduce
+
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
@@ -102,6 +104,15 @@ class TestArithmetic(object):
res = poly.polyadd(poly.polymul(quo, ci), rem)
assert_equal(res, tgt, err_msg=msg)
+ def test_polypow(self):
+ for i in range(5):
+ for j in range(5):
+ msg = "At i=%d, j=%d" % (i, j)
+ c = np.arange(i + 1)
+ tgt = reduce(poly.polymul, [c]*j, np.array([1]))
+ res = poly.polypow(c, j)
+ assert_equal(trim(res), trim(tgt), err_msg=msg)
+
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 81cb94cc1..965ab5ea9 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -6,17 +6,15 @@ Random Number Generation
==================== =========================================================
Utility functions
==============================================================================
-random Uniformly distributed values of a given shape.
+random_sample Uniformly distributed floats over ``[0, 1)``.
+random Alias for `random_sample`.
bytes Uniformly distributed random bytes.
random_integers Uniformly distributed integers in a given range.
-random_sample Uniformly distributed floats in a given range.
-random Alias for random_sample
-ranf Alias for random_sample
-sample Alias for random_sample
-choice Generate a weighted random sample from a given array-like
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
seed Seed the random number generator.
+choice Random sample from 1-D array.
+
==================== =========================================================
==================== =========================================================
@@ -90,9 +88,55 @@ from __future__ import division, absolute_import, print_function
import warnings
-# To get sub-modules
-from .info import __doc__, __all__
-
+__all__ = [
+ 'beta',
+ 'binomial',
+ 'bytes',
+ 'chisquare',
+ 'choice',
+ 'dirichlet',
+ 'exponential',
+ 'f',
+ 'gamma',
+ 'geometric',
+ 'get_state',
+ 'gumbel',
+ 'hypergeometric',
+ 'laplace',
+ 'logistic',
+ 'lognormal',
+ 'logseries',
+ 'multinomial',
+ 'multivariate_normal',
+ 'negative_binomial',
+ 'noncentral_chisquare',
+ 'noncentral_f',
+ 'normal',
+ 'pareto',
+ 'permutation',
+ 'poisson',
+ 'power',
+ 'rand',
+ 'randint',
+ 'randn',
+ 'random_integers',
+ 'random_sample',
+ 'rayleigh',
+ 'seed',
+ 'set_state',
+ 'shuffle',
+ 'standard_cauchy',
+ 'standard_exponential',
+ 'standard_gamma',
+ 'standard_normal',
+ 'standard_t',
+ 'triangular',
+ 'uniform',
+ 'vonmises',
+ 'wald',
+ 'weibull',
+ 'zipf'
+]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
@@ -117,6 +161,6 @@ def __RandomState_ctor():
"""
return RandomState(seed=0)
-from numpy.testing._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/random/info.py b/numpy/random/info.py
index be9c8d9bd..b9fd7f26a 100644
--- a/numpy/random/info.py
+++ b/numpy/random/info.py
@@ -1,139 +1,5 @@
-"""
-========================
-Random Number Generation
-========================
-
-==================== =========================================================
-Utility functions
-==============================================================================
-random_sample Uniformly distributed floats over ``[0, 1)``.
-random Alias for `random_sample`.
-bytes Uniformly distributed random bytes.
-random_integers Uniformly distributed integers in a given range.
-permutation Randomly permute a sequence / generate a random sequence.
-shuffle Randomly permute a sequence in place.
-seed Seed the random number generator.
-choice Random sample from 1-D array.
-
-==================== =========================================================
-
-==================== =========================================================
-Compatibility functions
-==============================================================================
-rand Uniformly distributed values.
-randn Normally distributed values.
-ranf Uniformly distributed floating point numbers.
-randint Uniformly distributed integers in a given range.
-==================== =========================================================
-
-==================== =========================================================
-Univariate distributions
-==============================================================================
-beta Beta distribution over ``[0, 1]``.
-binomial Binomial distribution.
-chisquare :math:`\\chi^2` distribution.
-exponential Exponential distribution.
-f F (Fisher-Snedecor) distribution.
-gamma Gamma distribution.
-geometric Geometric distribution.
-gumbel Gumbel distribution.
-hypergeometric Hypergeometric distribution.
-laplace Laplace distribution.
-logistic Logistic distribution.
-lognormal Log-normal distribution.
-logseries Logarithmic series distribution.
-negative_binomial Negative binomial distribution.
-noncentral_chisquare Non-central chi-square distribution.
-noncentral_f Non-central F distribution.
-normal Normal / Gaussian distribution.
-pareto Pareto distribution.
-poisson Poisson distribution.
-power Power distribution.
-rayleigh Rayleigh distribution.
-triangular Triangular distribution.
-uniform Uniform distribution.
-vonmises Von Mises circular distribution.
-wald Wald (inverse Gaussian) distribution.
-weibull Weibull distribution.
-zipf Zipf's distribution over ranked data.
-==================== =========================================================
-
-==================== =========================================================
-Multivariate distributions
-==============================================================================
-dirichlet Multivariate generalization of Beta distribution.
-multinomial Multivariate generalization of the binomial distribution.
-multivariate_normal Multivariate generalization of the normal distribution.
-==================== =========================================================
-
-==================== =========================================================
-Standard distributions
-==============================================================================
-standard_cauchy Standard Cauchy-Lorentz distribution.
-standard_exponential Standard exponential distribution.
-standard_gamma Standard Gamma distribution.
-standard_normal Standard normal distribution.
-standard_t Standard Student's t-distribution.
-==================== =========================================================
-
-==================== =========================================================
-Internal functions
-==============================================================================
-get_state Get tuple representing internal state of generator.
-set_state Set state of generator.
-==================== =========================================================
-
-"""
from __future__ import division, absolute_import, print_function
-depends = ['core']
+from .. import __doc__
-__all__ = [
- 'beta',
- 'binomial',
- 'bytes',
- 'chisquare',
- 'choice',
- 'dirichlet',
- 'exponential',
- 'f',
- 'gamma',
- 'geometric',
- 'get_state',
- 'gumbel',
- 'hypergeometric',
- 'laplace',
- 'logistic',
- 'lognormal',
- 'logseries',
- 'multinomial',
- 'multivariate_normal',
- 'negative_binomial',
- 'noncentral_chisquare',
- 'noncentral_f',
- 'normal',
- 'pareto',
- 'permutation',
- 'poisson',
- 'power',
- 'rand',
- 'randint',
- 'randn',
- 'random_integers',
- 'random_sample',
- 'rayleigh',
- 'seed',
- 'set_state',
- 'shuffle',
- 'standard_cauchy',
- 'standard_exponential',
- 'standard_gamma',
- 'standard_normal',
- 'standard_t',
- 'triangular',
- 'uniform',
- 'vonmises',
- 'wald',
- 'weibull',
- 'zipf'
-]
+depends = ['core']
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index b7e157915..2548a646e 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -650,6 +650,9 @@ double rk_pareto(rk_state *state, double a)
double rk_weibull(rk_state *state, double a)
{
+ if (a == 0.0) {
+ return 0.0;
+ }
return pow(rk_standard_exponential(state), 1./a);
}
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 95b0accdc..21bc73e54 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -22,8 +22,8 @@
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
include "Python.pxi"
-include "randint_helpers.pxi"
include "numpy.pxd"
+include "randint_helpers.pxi"
include "cpython/pycapsule.pxd"
from libc cimport string
@@ -573,21 +573,21 @@ def _shape_from_size(size, d):
shape = tuple(size) + (d,)
return shape
-# Look up table for randint functions keyed by type name. The stored data
-# is a tuple (lbnd, ubnd, func), where lbnd is the smallest value for the
-# type, ubnd is one greater than the largest value, and func is the
+# Look up table for randint functions keyed by dtype.
+# The stored data is a tuple (lbnd, ubnd, func), where lbnd is the smallest
+# value for the type, ubnd is one greater than the largest value, and func is the
# function to call.
_randint_type = {
- 'bool': (0, 2, _rand_bool),
- 'int8': (-2**7, 2**7, _rand_int8),
- 'int16': (-2**15, 2**15, _rand_int16),
- 'int32': (-2**31, 2**31, _rand_int32),
- 'int64': (-2**63, 2**63, _rand_int64),
- 'uint8': (0, 2**8, _rand_uint8),
- 'uint16': (0, 2**16, _rand_uint16),
- 'uint32': (0, 2**32, _rand_uint32),
- 'uint64': (0, 2**64, _rand_uint64)
- }
+ np.dtype(np.bool_): (0, 2, _rand_bool),
+ np.dtype(np.int8): (-2**7, 2**7, _rand_int8),
+ np.dtype(np.int16): (-2**15, 2**15, _rand_int16),
+ np.dtype(np.int32): (-2**31, 2**31, _rand_int32),
+ np.dtype(np.int64): (-2**63, 2**63, _rand_int64),
+ np.dtype(np.uint8): (0, 2**8, _rand_uint8),
+ np.dtype(np.uint16): (0, 2**16, _rand_uint16),
+ np.dtype(np.uint32): (0, 2**32, _rand_uint32),
+ np.dtype(np.uint64): (0, 2**64, _rand_uint64)
+}
cdef class RandomState:
@@ -969,13 +969,12 @@ cdef class RandomState:
high = low
low = 0
- # '_randint_type' is defined in
- # 'generate_randint_helpers.py'
- key = np.dtype(dtype).name
- if key not in _randint_type:
- raise TypeError('Unsupported dtype "%s" for randint' % key)
-
- lowbnd, highbnd, randfunc = _randint_type[key]
+ raw_dtype = dtype
+ dtype = np.dtype(dtype)
+ try:
+ lowbnd, highbnd, randfunc = _randint_type[dtype]
+ except KeyError:
+ raise TypeError('Unsupported dtype "%s" for randint' % dtype)
# TODO: Do not cast these inputs to Python int
#
@@ -986,20 +985,20 @@ cdef class RandomState:
ihigh = int(high)
if ilow < lowbnd:
- raise ValueError("low is out of bounds for %s" % (key,))
+ raise ValueError("low is out of bounds for %s" % dtype)
if ihigh > highbnd:
- raise ValueError("high is out of bounds for %s" % (key,))
- if ilow >= ihigh:
- raise ValueError("low >= high")
-
+ raise ValueError("high is out of bounds for %s" % dtype)
+ if ilow >= ihigh and np.prod(size) != 0:
+ raise ValueError("Range cannot be empty (low >= high) unless no samples are taken")
+
with self.lock:
ret = randfunc(ilow, ihigh - 1, size, self.state_address)
- if size is None:
- if dtype in (np.bool, np.int, np.long):
- return dtype(ret)
+ # back-compat: keep python scalars when a python type is passed
+ if size is None and raw_dtype in (bool, int, np.long):
+ return raw_dtype(ret)
- return ret
+ return ret
def bytes(self, npy_intp length):
"""
@@ -1115,15 +1114,15 @@ cdef class RandomState:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
- raise ValueError("a must be 1-dimensional or an integer")
- if pop_size <= 0:
- raise ValueError("a must be greater than 0")
+ raise ValueError("'a' must be 1-dimensional or an integer")
+ if pop_size <= 0 and np.prod(size) != 0:
+ raise ValueError("'a' must be greater than 0 unless no samples are taken")
elif a.ndim != 1:
- raise ValueError("a must be 1-dimensional")
+ raise ValueError("'a' must be 1-dimensional")
else:
pop_size = a.shape[0]
- if pop_size is 0:
- raise ValueError("a must be non-empty")
+ if pop_size is 0 and np.prod(size) != 0:
+ raise ValueError("'a' cannot be empty unless no samples are taken")
if p is not None:
d = len(p)
@@ -1137,9 +1136,9 @@ cdef class RandomState:
pix = <double*>PyArray_DATA(p)
if p.ndim != 1:
- raise ValueError("p must be 1-dimensional")
+ raise ValueError("'p' must be 1-dimensional")
if p.size != pop_size:
- raise ValueError("a and p must have same size")
+ raise ValueError("'a' and 'p' must have same size")
if np.logical_or.reduce(p < 0):
raise ValueError("probabilities are not non-negative")
if abs(kahan_sum(pix, d) - 1.) > atol:
@@ -1607,7 +1606,7 @@ cdef class RandomState:
References
----------
.. [1] Wikipedia, "Normal distribution",
- http://en.wikipedia.org/wiki/Normal_distribution
+ https://en.wikipedia.org/wiki/Normal_distribution
.. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
Random Variables and Random Signal Principles", 4th ed., 2001,
pp. 51, 51, 125.
@@ -1680,9 +1679,9 @@ cdef class RandomState:
Parameters
----------
a : float or array_like of floats
- Alpha, non-negative.
+ Alpha, positive (>0).
b : float or array_like of floats
- Beta, non-negative.
+ Beta, positive (>0).
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1759,9 +1758,9 @@ cdef class RandomState:
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
Random Signal Principles", 4th ed, 2001, p. 57.
.. [2] Wikipedia, "Poisson process",
- http://en.wikipedia.org/wiki/Poisson_process
+ https://en.wikipedia.org/wiki/Poisson_process
.. [3] Wikipedia, "Exponential distribution",
- http://en.wikipedia.org/wiki/Exponential_distribution
+ https://en.wikipedia.org/wiki/Exponential_distribution
"""
cdef ndarray oscale
@@ -1860,7 +1859,7 @@ cdef class RandomState:
Wolfram Web Resource.
http://mathworld.wolfram.com/GammaDistribution.html
.. [2] Wikipedia, "Gamma distribution",
- http://en.wikipedia.org/wiki/Gamma_distribution
+ https://en.wikipedia.org/wiki/Gamma_distribution
Examples
--------
@@ -1950,7 +1949,7 @@ cdef class RandomState:
Wolfram Web Resource.
http://mathworld.wolfram.com/GammaDistribution.html
.. [2] Wikipedia, "Gamma distribution",
- http://en.wikipedia.org/wiki/Gamma_distribution
+ https://en.wikipedia.org/wiki/Gamma_distribution
Examples
--------
@@ -2047,7 +2046,7 @@ cdef class RandomState:
.. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
Fifth Edition, 2002.
.. [2] Wikipedia, "F-distribution",
- http://en.wikipedia.org/wiki/F-distribution
+ https://en.wikipedia.org/wiki/F-distribution
Examples
--------
@@ -2150,7 +2149,7 @@ cdef class RandomState:
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/NoncentralF-Distribution.html
.. [2] Wikipedia, "Noncentral F-distribution",
- http://en.wikipedia.org/wiki/Noncentral_F-distribution
+ https://en.wikipedia.org/wiki/Noncentral_F-distribution
Examples
--------
@@ -2257,7 +2256,7 @@ cdef class RandomState:
References
----------
.. [1] NIST "Engineering Statistics Handbook"
- http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
@@ -2333,8 +2332,8 @@ cdef class RandomState:
.. [1] Delhi, M.S. Holla, "On a noncentral chi-square distribution in
the analysis of weapon systems effectiveness", Metrika,
Volume 15, Number 1 / December, 1970.
- .. [2] Wikipedia, "Noncentral chi-square distribution"
- http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution
+ .. [2] Wikipedia, "Noncentral chi-squared distribution"
+ https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution
Examples
--------
@@ -2433,12 +2432,12 @@ cdef class RandomState:
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
Distribution",
- http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
.. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
Wolfram Web Resource.
http://mathworld.wolfram.com/CauchyDistribution.html
.. [3] Wikipedia, "Cauchy distribution"
- http://en.wikipedia.org/wiki/Cauchy_distribution
+ https://en.wikipedia.org/wiki/Cauchy_distribution
Examples
--------
@@ -2501,12 +2500,12 @@ cdef class RandomState:
.. [1] Dalgaard, Peter, "Introductory Statistics With R",
Springer, 2002.
.. [2] Wikipedia, "Student's t-distribution"
- http://en.wikipedia.org/wiki/Student's_t-distribution
+ https://en.wikipedia.org/wiki/Student's_t-distribution
Examples
--------
From Dalgaard page 83 [1]_, suppose the daily energy intake for 11
- women in Kj is:
+ women in kilojoules (kJ) is:
>>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\
... 7515, 8230, 8770])
@@ -2731,7 +2730,7 @@ cdef class RandomState:
.. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
Values, Birkhauser Verlag, Basel, pp 23-30.
.. [4] Wikipedia, "Pareto distribution",
- http://en.wikipedia.org/wiki/Pareto_distribution
+ https://en.wikipedia.org/wiki/Pareto_distribution
Examples
--------
@@ -2786,7 +2785,7 @@ cdef class RandomState:
Parameters
----------
a : float or array_like of floats
- Shape of the distribution. Should be greater than zero.
+ Shape parameter of the distribution. Must be nonnegative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2836,7 +2835,7 @@ cdef class RandomState:
Wide Applicability", Journal Of Applied Mechanics ASME Paper
1951.
.. [3] Wikipedia, "Weibull distribution",
- http://en.wikipedia.org/wiki/Weibull_distribution
+ https://en.wikipedia.org/wiki/Weibull_distribution
Examples
--------
@@ -2927,7 +2926,7 @@ cdef class RandomState:
Dataplot Reference Manual, Volume 2: Let Subcommands and Library
Functions", National Institute of Standards and Technology
Handbook Series, June 2003.
- http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
+ https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
Examples
--------
@@ -3042,7 +3041,7 @@ cdef class RandomState:
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LaplaceDistribution.html
.. [4] Wikipedia, "Laplace distribution",
- http://en.wikipedia.org/wiki/Laplace_distribution
+ https://en.wikipedia.org/wiki/Laplace_distribution
Examples
--------
@@ -3272,7 +3271,7 @@ cdef class RandomState:
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LogisticDistribution.html
.. [3] Wikipedia, "Logistic-distribution",
- http://en.wikipedia.org/wiki/Logistic_distribution
+ https://en.wikipedia.org/wiki/Logistic_distribution
Examples
--------
@@ -3366,7 +3365,7 @@ cdef class RandomState:
.. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
Distributions across the Sciences: Keys and Clues,"
BioScience, Vol. 51, No. 5, May, 2001.
- http://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
+ https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
.. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
@@ -3472,9 +3471,9 @@ cdef class RandomState:
References
----------
.. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
- http://www.brighton-webs.co.uk/distributions/rayleigh.asp
+ https://web.archive.org/web/20090514091424/http://brighton-webs.co.uk:80/distributions/rayleigh.asp
.. [2] Wikipedia, "Rayleigh distribution"
- http://en.wikipedia.org/wiki/Rayleigh_distribution
+ https://en.wikipedia.org/wiki/Rayleigh_distribution
Examples
--------
@@ -3560,12 +3559,12 @@ cdef class RandomState:
References
----------
.. [1] Brighton Webs Ltd., Wald Distribution,
- http://www.brighton-webs.co.uk/distributions/wald.asp
+ https://web.archive.org/web/20090423014010/http://www.brighton-webs.co.uk:80/distributions/wald.asp
.. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
Distribution: Theory : Methodology, and Applications", CRC Press,
1988.
- .. [3] Wikipedia, "Wald distribution"
- http://en.wikipedia.org/wiki/Wald_distribution
+ .. [3] Wikipedia, "Inverse Gaussian distribution"
+ https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
Examples
--------
@@ -3651,7 +3650,7 @@ cdef class RandomState:
References
----------
.. [1] Wikipedia, "Triangular distribution"
- http://en.wikipedia.org/wiki/Triangular_distribution
+ https://en.wikipedia.org/wiki/Triangular_distribution
Examples
--------
@@ -3758,7 +3757,7 @@ cdef class RandomState:
Wolfram Web Resource.
http://mathworld.wolfram.com/BinomialDistribution.html
.. [5] Wikipedia, "Binomial distribution",
- http://en.wikipedia.org/wiki/Binomial_distribution
+ https://en.wikipedia.org/wiki/Binomial_distribution
Examples
--------
@@ -3861,7 +3860,7 @@ cdef class RandomState:
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/NegativeBinomialDistribution.html
.. [2] Wikipedia, "Negative binomial distribution",
- http://en.wikipedia.org/wiki/Negative_binomial_distribution
+ https://en.wikipedia.org/wiki/Negative_binomial_distribution
Examples
--------
@@ -3955,7 +3954,7 @@ cdef class RandomState:
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/PoissonDistribution.html
.. [2] Wikipedia, "Poisson distribution",
- http://en.wikipedia.org/wiki/Poisson_distribution
+ https://en.wikipedia.org/wiki/Poisson_distribution
Examples
--------
@@ -4144,15 +4143,15 @@ cdef class RandomState:
if op.shape == ():
fp = PyFloat_AsDouble(p)
- if fp < 0.0:
- raise ValueError("p < 0.0")
+ if fp <= 0.0:
+ raise ValueError("p <= 0.0")
if fp > 1.0:
raise ValueError("p > 1.0")
return discd_array_sc(self.internal_state, rk_geometric, size, fp,
self.lock)
- if np.any(np.less(op, 0.0)):
- raise ValueError("p < 0.0")
+ if np.any(np.less_equal(op, 0.0)):
+ raise ValueError("p <= 0.0")
if np.any(np.greater(op, 1.0)):
raise ValueError("p > 1.0")
return discd_array(self.internal_state, rk_geometric, size, op,
@@ -4199,12 +4198,12 @@ cdef class RandomState:
-----
The probability density for the Hypergeometric distribution is
- .. math:: P(x) = \\frac{\\binom{m}{n}\\binom{N-m}{n-x}}{\\binom{N}{n}},
+ .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}},
- where :math:`0 \\le x \\le m` and :math:`n+m-N \\le x \\le n`
+ where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g`
- for P(x) the probability of x successes, n = ngood, m = nbad, and
- N = number of samples.
+ for P(x) the probability of x successes, g = ngood, b = nbad, and
+ n = number of samples.
Consider an urn with black and white marbles in it, ngood of them
black and nbad are white. If you draw nsample balls without
@@ -4225,7 +4224,7 @@ cdef class RandomState:
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/HypergeometricDistribution.html
.. [3] Wikipedia, "Hypergeometric distribution",
- http://en.wikipedia.org/wiki/Hypergeometric_distribution
+ https://en.wikipedia.org/wiki/Hypergeometric_distribution
Examples
--------
@@ -4335,7 +4334,7 @@ cdef class RandomState:
.. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small
Data Sets, CRC Press, 1994.
.. [4] Wikipedia, "Logarithmic distribution",
- http://en.wikipedia.org/wiki/Logarithmic_distribution
+ https://en.wikipedia.org/wiki/Logarithmic_distribution
Examples
--------
@@ -4697,9 +4696,9 @@ cdef class RandomState:
----------
.. [1] David McKay, "Information Theory, Inference and Learning
Algorithms," chapter 23,
- http://www.inference.phy.cam.ac.uk/mackay/
+ http://www.inference.org.uk/mackay/itila/
.. [2] Wikipedia, "Dirichlet distribution",
- http://en.wikipedia.org/wiki/Dirichlet_distribution
+ https://en.wikipedia.org/wiki/Dirichlet_distribution
Examples
--------
@@ -4837,9 +4836,8 @@ cdef class RandomState:
self._shuffle_raw(n, sizeof(npy_intp), stride, x_ptr, buf_ptr)
else:
self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr)
- elif isinstance(x, np.ndarray) and x.ndim > 1 and x.size:
- # Multidimensional ndarrays require a bounce buffer.
- buf = np.empty_like(x[0])
+ elif isinstance(x, np.ndarray) and x.ndim and x.size:
+ buf = np.empty_like(x[0,...])
with self.lock:
for i in reversed(range(1, n)):
j = rk_interval(i, self.internal_state)
diff --git a/numpy/random/mtrand/randint_helpers.pxi.in b/numpy/random/mtrand/randint_helpers.pxi.in
index 4bd7cd356..894a25167 100644
--- a/numpy/random/mtrand/randint_helpers.pxi.in
+++ b/numpy/random/mtrand/randint_helpers.pxi.in
@@ -23,7 +23,7 @@ def get_dispatch(dtypes):
{{for npy_dt, npy_udt, np_dt in get_dispatch(dtypes)}}
-def _rand_{{npy_dt}}(low, high, size, rngstate):
+def _rand_{{npy_dt}}(npy_{{npy_dt}} low, npy_{{npy_dt}} high, size, rngstate):
"""
_rand_{{npy_dt}}(low, high, size, rngstate)
@@ -60,8 +60,8 @@ def _rand_{{npy_dt}}(low, high, size, rngstate):
cdef npy_intp cnt
cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
- rng = <npy_{{npy_udt}}>(high - low)
- off = <npy_{{npy_udt}}>(<npy_{{npy_dt}}>low)
+ off = <npy_{{npy_udt}}>(low)
+ rng = <npy_{{npy_udt}}>(high) - <npy_{{npy_udt}}>(low)
if size is None:
rk_random_{{npy_udt}}(off, rng, 1, &buf, state)
diff --git a/numpy/random/mtrand/randomkit.c b/numpy/random/mtrand/randomkit.c
index 380917180..6371ebe33 100644
--- a/numpy/random/mtrand/randomkit.c
+++ b/numpy/random/mtrand/randomkit.c
@@ -616,7 +616,7 @@ rk_gauss(rk_state *state)
}
while (r2 >= 1.0 || r2 == 0.0);
- /* Box-Muller transform */
+ /* Polar method, a more efficient version of the Box-Muller approach. */
f = sqrt(-2.0*log(r2)/r2);
/* Keep for next call */
state->gauss = f*x1;
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index a8d82b141..394a70ead 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -1,7 +1,6 @@
from __future__ import division, print_function
-from os.path import join, split, dirname
-import os
+from os.path import join
import sys
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
@@ -39,9 +38,6 @@ def configuration(parent_package='',top_path=None):
('_LARGEFILE64_SOURCE', '1')]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
- # fix for 0.26 < cython < 0.29 and perhaps 0.28.5
- # see https://github.com/cython/cython/issues/2494
- defs.append(('CYTHON_SMALL_CODE', ''))
libs = []
# Configure mtrand
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 61c6e912d..d0bb92a73 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -9,7 +9,6 @@ from numpy.testing import (
)
from numpy import random
import sys
-import warnings
class TestSeed(object):
@@ -440,6 +439,15 @@ class TestRandomDist(object):
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
+ # Check zero-size
+ assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
+ assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
+ assert_equal(np.random.choice(0, size=0).shape, (0,))
+ assert_equal(np.random.choice([], size=(0,)).shape, (0,))
+ assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_raises(ValueError, np.random.choice, [], 10)
+
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
@@ -458,6 +466,10 @@ class TestRandomDist(object):
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
@@ -759,7 +771,7 @@ class TestRandomDist(object):
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
- # http://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
+ # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
@@ -930,7 +942,8 @@ class TestRandomDist(object):
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
- assert_equal(np.random.weibull(a=0), 0)
+ np.random.seed(self.seed)
+ assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
@@ -1445,7 +1458,6 @@ class TestBroadcast(object):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
-
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index a7c85931c..a8bd4fc15 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -17,6 +17,6 @@ from ._private.nosetester import (
__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite']
-from ._private.pytesttester import PytestTester
+from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/testing/_private/noseclasses.py b/numpy/testing/_private/noseclasses.py
index 08dec0ca9..e99bbc97d 100644
--- a/numpy/testing/_private/noseclasses.py
+++ b/numpy/testing/_private/noseclasses.py
@@ -26,7 +26,7 @@ from .utils import KnownFailureException, KnownFailureTest
#-----------------------------------------------------------------------------
# Modified version of the one in the stdlib, that fixes a python bug (doctests
-# not found in extension modules, http://bugs.python.org/issue3158)
+# not found in extension modules, https://bugs.python.org/issue3158)
class NumpyDocTestFinder(doctest.DocTestFinder):
def _from_module(self, module, object):
diff --git a/numpy/testing/_private/nosetester.py b/numpy/testing/_private/nosetester.py
index c2cf58377..1728d9d1f 100644
--- a/numpy/testing/_private/nosetester.py
+++ b/numpy/testing/_private/nosetester.py
@@ -338,12 +338,14 @@ class NoseTester(object):
Identifies the tests to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
+
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow tests as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
+ * attribute_identifier - string passed directly to nosetests as '-A'.
+
verbose : int, optional
Verbosity value for test outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
@@ -352,16 +354,14 @@ class NoseTester(object):
If True, run doctests in module. Default is False.
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
- (This requires the `coverage module:
- <http://nedbatchelder.com/code/modules/coverage.html>`_).
+ (This requires the
+ `coverage module <https://nedbatchelder.com/code/modules/coveragehtml>`_).
raise_warnings : None, str or sequence of warnings, optional
This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
+ of being shown once during the test execution. Valid strings are:
- The default is to use the class initialization value.
+ * "develop" : equals ``(Warning,)``
+ * "release" : equals ``()``, do not raise on any warnings.
timer : bool or int, optional
Timing of individual tests with ``nose-timer`` (which needs to be
installed). If True, time tests and report on all of them.
@@ -489,12 +489,14 @@ class NoseTester(object):
Identifies the benchmarks to run. This can be a string to pass to
the nosetests executable with the '-A' option, or one of several
special values. Special values are:
+
* 'fast' - the default - which corresponds to the ``nosetests -A``
option of 'not slow'.
* 'full' - fast (as above) and slow benchmarks as in the
'no -A' option to nosetests - this is the same as ''.
* None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
+ * attribute_identifier - string passed directly to nosetests as '-A'.
+
verbose : int, optional
Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
extra_argv : list, optional
diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py
index 53e67517d..a5fa4fb5e 100644
--- a/numpy/testing/_private/parameterized.py
+++ b/numpy/testing/_private/parameterized.py
@@ -190,7 +190,7 @@ def parameterized_argument_value_pairs(func, p):
in zip(named_args, argspec.defaults or [])
])
- seen_arg_names = set([ n for (n, _) in result ])
+ seen_arg_names = {n for (n, _) in result}
keywords = QuietOrderedDict(sorted([
(name, p.kwargs[name])
for name in p.kwargs
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index e501b2be6..55306e499 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -69,7 +69,7 @@ def import_nose():
if not nose_is_good:
msg = ('Need nose >= %d.%d.%d for tests - see '
- 'http://nose.readthedocs.io' %
+ 'https://nose.readthedocs.io' %
minimum_nose_version)
raise ImportError(msg)
@@ -177,7 +177,7 @@ if os.name == 'nt':
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
- # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
+ # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
@@ -352,7 +352,7 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
+ except (ValueError, TypeError):
usecomplex = False
if usecomplex:
@@ -687,11 +687,13 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
from numpy.core import array, isnan, inf, bool_
- from numpy.core.fromnumeric import all as npall
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
+ # original array for output formating
+ ox, oy = x, y
+
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
@@ -705,15 +707,20 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
at the same locations.
"""
- # Both the != True comparison here and the cast to bool_ at the end are
- # done to deal with `masked`, which cannot be compared usefully, and
- # for which np.all yields masked. The use of the function np.all is
- # for back compatibility with ndarray subclasses that changed the
- # return values of the all method. We are not committed to supporting
- # such subclasses, but some used to work.
x_id = func(x)
y_id = func(y)
- if npall(x_id == y_id) != True:
+ # We include work-arounds here to handle three types of slightly
+ # pathological ndarray subclasses:
+ # (1) all() on `masked` array scalars can return masked arrays, so we
+ # use != True
+ # (2) __eq__ on some ndarray subclasses returns Python booleans
+ # instead of element-wise comparisons, so we cast to bool_() and
+ # use isinstance(..., bool) checks
+ # (3) subclasses with bare-bones __array_function__ implemenations may
+ # not implement np.all(), so favor using the .all() method
+ # We are not committed to supporting such subclasses, but it's nice to
+ # support them if possible.
+ if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
@@ -721,9 +728,9 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
- if x_id.ndim == 0:
+ if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
- elif y_id.ndim == 0:
+ elif isinstance(x_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
@@ -780,10 +787,10 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
- match = 100-100.0*reduced.count(1)/len(reduced)
- msg = build_err_msg([x, y],
+ mismatch = 100.0 * reduced.count(0) / ox.size
+ msg = build_err_msg([ox, oy],
err_msg
- + '\n(mismatch %s%%)' % (match,),
+ + '\n(mismatch %s%%)' % (mismatch,),
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
@@ -938,7 +945,7 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
__tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import around, number, float_, result_type, array
+ from numpy.core import number, float_, result_type, array
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
@@ -1084,7 +1091,7 @@ def assert_string_equal(actual, desired):
raise AssertionError(repr(type(actual)))
if not isinstance(desired, str):
raise AssertionError(repr(type(desired)))
- if re.match(r'\A'+desired+r'\Z', actual, re.M):
+ if desired == actual:
return
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
@@ -1108,7 +1115,7 @@ def assert_string_equal(actual, desired):
l.append(d3)
else:
diff.insert(0, d3)
- if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
+ if d2[2:] == d1[2:]:
continue
diff_list.extend(l)
continue
@@ -1618,7 +1625,7 @@ def _integer_repr(x, vdt, comp):
# Reinterpret binary representation of the float as sign-magnitude:
# take into account two-complement representation
# See also
- # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+ # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
rx = x.view(vdt)
if not (rx.size == 1):
rx[rx < 0] = comp - rx[rx < 0]
@@ -1926,7 +1933,7 @@ class suppress_warnings(object):
``warnings.catch_warnings``.
However, it also provides a filter mechanism to work around
- http://bugs.python.org/issue4180.
+ https://bugs.python.org/issue4180.
This bug causes Python before 3.4 to not reliably show warnings again
after they have been ignored once (even within catch_warnings). It
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index 68c1554b5..bf78be500 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -8,8 +8,8 @@ from __future__ import division, absolute_import, print_function
import warnings
# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.decorators is deprecated, "
- "import from numpy.testing instead.",
+warnings.warn("Importing from numpy.testing.decorators is deprecated "
+ "since numpy 1.15.0, import from numpy.testing instead.",
DeprecationWarning, stacklevel=2)
from ._private.decorators import *
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index e0e728a32..5748a9a0f 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -7,8 +7,8 @@ from __future__ import division, absolute_import, print_function
import warnings
# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.noseclasses is deprecated, "
- "import from numpy.testing instead",
+warnings.warn("Importing from numpy.testing.noseclasses is deprecated "
+ "since 1.15.0, import from numpy.testing instead",
DeprecationWarning, stacklevel=2)
from ._private.noseclasses import *
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index c8c7d6e68..2ac212eee 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -8,8 +8,8 @@ from __future__ import division, absolute_import, print_function
import warnings
# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.nosetester is deprecated, "
- "import from numpy.testing instead.",
+warnings.warn("Importing from numpy.testing.nosetester is deprecated "
+ "since 1.15.0, import from numpy.testing instead.",
DeprecationWarning, stacklevel=2)
from ._private.nosetester import *
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index e27a9b85b..7c3f2fbdf 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -15,7 +15,7 @@ if __name__ == '__main__':
setup(maintainer="NumPy Developers",
maintainer_email="numpy-dev@numpy.org",
description="NumPy test module",
- url="http://www.numpy.org",
+ url="https://www.numpy.org",
license="NumPy License (BSD Style)",
configuration=configuration,
)
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index ea684140d..bb3ea1acb 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -13,7 +13,7 @@ from numpy.testing import (
try:
- import nose
+ import nose # noqa: F401
except ImportError:
HAVE_NOSE = False
else:
@@ -29,7 +29,6 @@ class TestNoseDecorators(object):
pass
def test_slow(self):
- import nose
@dec.slow
def slow_func(x, y, z):
pass
@@ -53,7 +52,6 @@ class TestNoseDecorators(object):
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
-
def test_skip_functions_hardcoded(self):
@dec.skipif(True)
def f1(x):
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 465c217d4..43afafaa8 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -25,12 +25,8 @@ class _GenericTest(object):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
- try:
+ with assert_raises(AssertionError):
self._assert_func(a, b)
- except AssertionError:
- pass
- else:
- raise AssertionError("a and b are found equal but are not")
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
@@ -162,6 +158,41 @@ class TestArrayEqual(_GenericTest):
self._test_equal(a, b)
self._test_equal(b, a)
+ def test_subclass_that_overrides_eq(self):
+ # While we cannot guarantee testing functions will always work for
+ # subclasses, the tests should ideally rely only on subclasses having
+ # comparison operators, not on them being able to store booleans
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+ class MyArray(np.ndarray):
+ def __eq__(self, other):
+ return bool(np.equal(self, other).all())
+
+ def __ne__(self, other):
+ return not self == other
+
+ a = np.array([1., 2.]).view(MyArray)
+ b = np.array([2., 3.]).view(MyArray)
+ assert_(type(a == a), bool)
+ assert_(a == a)
+ assert_(a != b)
+ self._test_equal(a, a)
+ self._test_not_equal(a, b)
+ self._test_not_equal(b, a)
+
+ def test_subclass_that_does_not_implement_npall(self):
+ class MyArray(np.ndarray):
+ def __array_function__(self, *args, **kwargs):
+ return NotImplemented
+
+ a = np.array([1., 2.]).view(MyArray)
+ b = np.array([2., 3.]).view(MyArray)
+ if np.core.overrides.ENABLE_ARRAY_FUNCTION:
+ with assert_raises(TypeError):
+ np.all(a)
+ self._test_equal(a, a)
+ self._test_not_equal(a, b)
+ self._test_not_equal(b, a)
+
class TestBuildErrorMessage(object):
@@ -473,7 +504,8 @@ class TestAlmostEqual(_GenericTest):
self._test_not_equal(x, z)
def test_error_message(self):
- """Check the message is formatted correctly for the decimal value"""
+ """Check the message is formatted correctly for the decimal value.
+ Also check the message when input includes inf or nan (gh12200)"""
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
@@ -497,6 +529,19 @@ class TestAlmostEqual(_GenericTest):
# remove anything that's not the array string
assert_equal(str(e).split('%)\n ')[1], b)
+ # Check the error message when input includes inf or nan
+ x = np.array([np.inf, 0])
+ y = np.array([np.inf, 1])
+ try:
+ self._assert_func(x, y)
+ except AssertionError as e:
+ msgs = str(e).split('\n')
+ # assert error percentage is 50%
+ assert_equal(msgs[3], '(mismatch 50.0%)')
+ # assert output array contains inf
+ assert_equal(msgs[4], ' x: array([inf, 0.])')
+ assert_equal(msgs[5], ' y: array([inf, 1.])')
+
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
@@ -1082,9 +1127,26 @@ class TestStringEqual(object):
assert_raises(AssertionError,
lambda: assert_string_equal("foo", "hello"))
+ def test_regex(self):
+ assert_string_equal("a+*b", "a+*b")
+
+ assert_raises(AssertionError,
+ lambda: assert_string_equal("aaa", "a+b"))
+
def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
- mod_warns = mod.__warningregistry__
+ try:
+ mod_warns = mod.__warningregistry__
+ except AttributeError:
+ # the lack of a __warningregistry__
+ # attribute means that no warning has
+ # occurred; this can be triggered in
+ # a parallel test scenario, while in
+ # a serial test scenario an initial
+ # warning (and therefore the attribute)
+ # are always created first
+ mod_warns = {}
+
num_warns = len(mod_warns)
# Python 3.4 appears to clear any pre-existing warnings of the same type,
# when raising warnings inside a catch_warnings block. So, there is a
@@ -1106,6 +1168,33 @@ def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
n_in_context = py34
assert_equal(num_warns, n_in_context)
+def test_warn_len_equal_call_scenarios():
+ # assert_warn_len_equal is called under
+ # varying circumstances depending on serial
+ # vs. parallel test scenarios; this test
+ # simply aims to probe both code paths and
+ # check that no assertion is uncaught
+
+ # parallel scenario -- no warning issued yet
+ class mod(object):
+ pass
+
+ mod_inst = mod()
+
+ assert_warn_len_equal(mod=mod_inst,
+ n_in_context=0)
+
+ # serial test scenario -- the __warningregistry__
+ # attribute should be present
+ class mod(object):
+ def __init__(self):
+ self.__warningregistry__ = {'warning1':1,
+ 'warning2':2}
+
+ mod_inst = mod()
+ assert_warn_len_equal(mod=mod_inst,
+ n_in_context=2)
+
def _get_fresh_mod():
# Get this module, with warning registry empty
@@ -1343,7 +1432,7 @@ def test_tempdir():
def test_temppath():
with temppath() as fpath:
- with open(fpath, 'w') as f:
+ with open(fpath, 'w'):
pass
assert_(not os.path.isfile(fpath))
@@ -1385,7 +1474,6 @@ class TestAssertNoGcCycles(object):
assert_no_gc_cycles(no_cycle)
-
def test_asserts(self):
def make_cycle():
a = []
@@ -1400,7 +1488,6 @@ class TestAssertNoGcCycles(object):
with assert_raises(AssertionError):
assert_no_gc_cycles(make_cycle)
-
def test_fails(self):
"""
Test that in cases where the garbage cannot be collected, we raise an
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 184adcc74..98f19e348 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -8,8 +8,8 @@ from __future__ import division, absolute_import, print_function
import warnings
# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.utils is deprecated, "
- "import from numpy.testing instead.",
+warnings.warn("Importing from numpy.testing.utils is deprecated "
+ "since 1.15.0, import from numpy.testing instead.",
ImportWarning, stacklevel=2)
from ._private.utils import *
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index 75ce9c8ca..d389b37a8 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -2,6 +2,7 @@ from __future__ import division, absolute_import, print_function
import sys
import pytest
+import weakref
import numpy as np
from numpy.ctypeslib import ndpointer, load_library, as_array
@@ -9,20 +10,30 @@ from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
try:
+ import ctypes
+except ImportError:
+ ctypes = None
+else:
cdll = None
+ test_cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
- cdll = load_library('multiarray_d', np.core.multiarray.__file__)
+ cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__)
+ except OSError:
+ pass
+ try:
+ test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
except OSError:
pass
if cdll is None:
- cdll = load_library('multiarray', np.core.multiarray.__file__)
- _HAS_CTYPE = True
-except ImportError:
- _HAS_CTYPE = False
+ cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
+ if test_cdll is None:
+ test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
+
+ c_forward_pointer = test_cdll.forward_pointer
-@pytest.mark.skipif(not _HAS_CTYPE,
+@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
@@ -30,7 +41,7 @@ class TestLoadLibrary(object):
def test_basic(self):
try:
# Should succeed
- load_library('multiarray', np.core.multiarray.__file__)
+ load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
except ImportError as e:
msg = ("ctypes is not available on this python: skipping the test"
" (import error was: %s)" % str(e))
@@ -43,7 +54,7 @@ class TestLoadLibrary(object):
try:
so = get_shared_lib_extension(is_python_ext=True)
# Should succeed
- load_library('multiarray%s' % so, np.core.multiarray.__file__)
+ load_library('_multiarray_umath%s' % so, np.core._multiarray_umath.__file__)
except ImportError:
print("No distutils available, skipping test.")
except ImportError as e:
@@ -108,12 +119,72 @@ class TestNdpointer(object):
assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
- a1 = ndpointer(dtype=np.float64)
- a2 = ndpointer(dtype=np.float64)
- assert_(a1 == a2)
+ assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64))
+
+ # shapes are normalized
+ assert_(ndpointer(shape=2) is ndpointer(shape=(2,)))
+ # 1.12 <= v < 1.16 had a bug that made these fail
+ assert_(ndpointer(shape=2) is not ndpointer(ndim=2))
+ assert_(ndpointer(ndim=2) is not ndpointer(shape=2))
-@pytest.mark.skipif(not _HAS_CTYPE,
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available on this python installation")
+class TestNdpointerCFunc(object):
+ def test_arguments(self):
+ """ Test that arguments are coerced from arrays """
+ c_forward_pointer.restype = ctypes.c_void_p
+ c_forward_pointer.argtypes = (ndpointer(ndim=2),)
+
+ c_forward_pointer(np.zeros((2, 3)))
+ # too many dimensions
+ assert_raises(
+ ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
+
+ @pytest.mark.parametrize(
+ 'dt', [
+ float,
+ np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['a', 'b'],
+ offsets=[0, 2],
+ itemsize=6
+ ))
+ ], ids=[
+ 'float',
+ 'overlapping-fields'
+ ]
+ )
+ def test_return(self, dt):
+ """ Test that return values are coerced to arrays """
+ arr = np.zeros((2, 3), dt)
+ ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
+
+ c_forward_pointer.restype = ptr_type
+ c_forward_pointer.argtypes = (ptr_type,)
+
+ # check that the arrays are equivalent views on the same data
+ arr2 = c_forward_pointer(arr)
+ assert_equal(arr2.dtype, arr.dtype)
+ assert_equal(arr2.shape, arr.shape)
+ assert_equal(
+ arr2.__array_interface__['data'],
+ arr.__array_interface__['data']
+ )
+
+ def test_vague_return_value(self):
+ """ Test that vague ndpointer return values do not promote to arrays """
+ arr = np.zeros((2, 3))
+ ptr_type = ndpointer(dtype=arr.dtype)
+
+ c_forward_pointer.restype = ptr_type
+ c_forward_pointer.argtypes = (ptr_type,)
+
+ ret = c_forward_pointer(arr)
+ assert_(isinstance(ret, ptr_type))
+
+
+@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsArray(object):
def test_array(self):
@@ -170,3 +241,35 @@ class TestAsArray(object):
check(as_array(pointer(c_array), shape=()))
check(as_array(pointer(c_array[0]), shape=(2,)))
check(as_array(pointer(c_array[0][0]), shape=(2, 3)))
+
+ def test_reference_cycles(self):
+ # related to gh-6511
+ import ctypes
+
+ # create array to work with
+ # don't use int/long to avoid running into bpo-10746
+ N = 100
+ a = np.arange(N, dtype=np.short)
+
+ # get pointer to array
+ pnt = np.ctypeslib.as_ctypes(a)
+
+ with np.testing.assert_no_gc_cycles():
+ # decay the array above to a pointer to its first element
+ newpnt = ctypes.cast(pnt, ctypes.POINTER(ctypes.c_short))
+ # and construct an array using this data
+ b = np.ctypeslib.as_array(newpnt, (N,))
+ # now delete both, which should cleanup both objects
+ del newpnt, b
+
+ def test_segmentation_fault(self):
+ arr = np.zeros((224, 224, 3))
+ c_arr = np.ctypeslib.as_ctypes(arr)
+ arr_ref = weakref.ref(arr)
+ del arr
+
+ # check the reference wasn't cleaned up
+ assert_(arr_ref() is not None)
+
+ # check we avoid the segfault
+ c_arr[0][0][0]
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
new file mode 100644
index 000000000..194f8ecbb
--- /dev/null
+++ b/numpy/tests/test_public_api.py
@@ -0,0 +1,89 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+
+import numpy as np
+import pytest
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+def check_dir(module, module_name=None):
+ """Returns a mapping of all objects with the wrong __module__ attribute."""
+ if module_name is None:
+ module_name = module.__name__
+ results = {}
+ for name in dir(module):
+ item = getattr(module, name)
+ if (hasattr(item, '__module__') and hasattr(item, '__name__')
+ and item.__module__ != module_name):
+ results[name] = item.__module__ + '.' + item.__name__
+ return results
+
+
+@pytest.mark.skipif(
+ sys.version_info[0] < 3,
+ reason="NumPy exposes slightly different functions on Python 2")
+def test_numpy_namespace():
+ # None of these objects are publicly documented.
+ undocumented = {
+ 'Tester': 'numpy.testing._private.nosetester.NoseTester',
+ '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
+ 'add_docstring': 'numpy.core._multiarray_umath.add_docstring',
+ 'add_newdoc': 'numpy.core.function_base.add_newdoc',
+ 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
+ 'byte_bounds': 'numpy.lib.utils.byte_bounds',
+ 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays',
+ 'deprecate': 'numpy.lib.utils.deprecate',
+ 'deprecate_with_doc': 'numpy.lib.utils.<lambda>',
+ 'disp': 'numpy.lib.function_base.disp',
+ 'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose',
+ 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
+ 'get_include': 'numpy.lib.utils.get_include',
+ 'int_asbuffer': 'numpy.core._multiarray_umath.int_asbuffer',
+ 'mafromtxt': 'numpy.lib.npyio.mafromtxt',
+ 'maximum_sctype': 'numpy.core.numerictypes.maximum_sctype',
+ 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt',
+ 'recfromcsv': 'numpy.lib.npyio.recfromcsv',
+ 'recfromtxt': 'numpy.lib.npyio.recfromtxt',
+ 'safe_eval': 'numpy.lib.utils.safe_eval',
+ 'set_string_function': 'numpy.core.arrayprint.set_string_function',
+ 'show_config': 'numpy.__config__.show',
+ 'who': 'numpy.lib.utils.who',
+ }
+ # These built-in types are re-exported by numpy.
+ builtins = {
+ 'bool': 'builtins.bool',
+ 'complex': 'builtins.complex',
+ 'float': 'builtins.float',
+ 'int': 'builtins.int',
+ 'long': 'builtins.int',
+ 'object': 'builtins.object',
+ 'str': 'builtins.str',
+ 'unicode': 'builtins.str',
+ }
+ whitelist = dict(undocumented, **builtins)
+ bad_results = check_dir(np)
+ # pytest gives better error messages with the builtin assert than with
+ # assert_equal
+ assert bad_results == whitelist
+
+
+def test_numpy_linalg():
+ bad_results = check_dir(np.linalg)
+ assert bad_results == {}
+
+
+def test_numpy_fft():
+ bad_results = check_dir(np.fft)
+ assert bad_results == {}
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available in this python")
+def test_NPY_NO_EXPORT():
+ cdll = ctypes.CDLL(np.core._multiarray_tests.__file__)
+ # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
+ f = getattr(cdll, 'test_not_exported', None)
+ assert f is None, ("'test_not_exported' is mistakenly exported, "
+ "NPY_NO_EXPORT does not work")
diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py
index cd42252e3..a073d691f 100644
--- a/numpy/tests/test_reloading.py
+++ b/numpy/tests/test_reloading.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
import sys
-import pickle
from numpy.testing import assert_raises, assert_, assert_equal
+from numpy.core.numeric import pickle
if sys.version_info[:2] >= (3, 4):
from importlib import reload
@@ -32,5 +32,7 @@ def test_numpy_reloading():
def test_novalue():
import numpy as np
- assert_equal(repr(np._NoValue), '<no value>')
- assert_(pickle.loads(pickle.dumps(np._NoValue)) is np._NoValue)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(repr(np._NoValue), '<no value>')
+ assert_(pickle.loads(pickle.dumps(np._NoValue,
+ protocol=proto)) is np._NoValue)
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 49ace6d38..9e27cc6ce 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -7,7 +7,7 @@ from __future__ import division, print_function, absolute_import
import sys
import os
import pytest
-from os.path import join as pathjoin, isfile, dirname, basename
+from os.path import join as pathjoin, isfile, dirname
from subprocess import Popen, PIPE
import numpy as np
@@ -63,32 +63,37 @@ def run_command(cmd, check_code=True):
@pytest.mark.xfail(reason="Test is unreliable")
def test_f2py():
# test that we can run f2py script
+
+ def try_f2py_commands(cmds):
+ success = 0
+ for f2py_cmd in cmds:
+ try:
+ code, stdout, stderr = run_command([f2py_cmd, '-v'])
+ assert_equal(stdout.strip(), b'2')
+ success += 1
+ except Exception:
+ pass
+ return success
+
if sys.platform == 'win32':
+ # Only the single 'f2py' script is installed in windows.
exe_dir = dirname(sys.executable)
-
if exe_dir.endswith('Scripts'): # virtualenv
- f2py_cmd = r"%s\f2py.py" % exe_dir
+ f2py_cmds = [os.path.join(exe_dir, 'f2py')]
else:
- f2py_cmd = r"%s\Scripts\f2py.py" % exe_dir
-
- code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v'])
- success = stdout.strip() == b'2'
- assert_(success, "Warning: f2py not found in path")
+ f2py_cmds = [os.path.join(exe_dir, "Scripts", 'f2py')]
+ success = try_f2py_commands(f2py_cmds)
+ msg = "Warning: f2py not found in path"
+ assert_(success == 1, msg)
else:
+ # Three scripts are installed in Unix-like systems:
+ # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
+ # if installed with python3.7 the scripts would be named
+ # 'f2py', 'f2py3', and 'f2py3.7'.
version = sys.version_info
major = str(version.major)
minor = str(version.minor)
-
f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor)
- success = False
-
- for f2py_cmd in f2py_cmds:
- try:
- code, stdout, stderr = run_command([f2py_cmd, '-v'])
- assert_equal(stdout.strip(), b'2')
- success = True
- break
- except Exception:
- pass
- msg = "Warning: neither %s nor %s nor %s found in path" % f2py_cmds
- assert_(success, msg)
+ success = try_f2py_commands(f2py_cmds)
+ msg = "Warning: not all of %s, %s, and %s are found in path" % f2py_cmds
+ assert_(success == 3, msg)
diff --git a/pavement.py b/pavement.py
index d0818e66b..f2c56883b 100644
--- a/pavement.py
+++ b/pavement.py
@@ -4,37 +4,6 @@ possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
-Building a fancy dmg from scratch
-=================================
-
-Clone the numpy-macosx-installer git repo from on github into the source tree
-(numpy-macosx-installer should be in the same directory as setup.py). Then, do
-as follows::
-
- git clone git://github.com/cournape/macosx-numpy-installer
- # remove build dir, and everything generated by previous paver calls
- # (included generated installers). Use with care !
- paver nuke
- paver bootstrap && source bootstrap/bin/activate
- # Installing numpy is necessary to build the correct documentation (because
- # of autodoc)
- python setup.py install
- paver dmg
-
-Building a simple (no-superpack) windows installer from wine
-============================================================
-
-It assumes that blas/lapack are in c:\local\lib inside drive_c.
-
- paver bdist_wininst_simple
-
-You will have to configure your wine python locations (WINE_PYS).
-
-The superpack requires all the atlas libraries for every arch to be installed
-(see SITECFG), and can then be built as follows::
-
- paver bdist_superpack
-
Building changelog + notes
==========================
@@ -43,7 +12,7 @@ Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
-This automatically put the checksum into NOTES.txt, and write the Changelog
+This automatically put the checksum into README.rst, and write the Changelog
which can be uploaded to sourceforge.
TODO
@@ -56,10 +25,6 @@ TODO
"""
from __future__ import division, print_function
-# What need to be installed to build everything on mac os x:
-# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
-# - paver + virtualenv
-# - full texlive
import os
import sys
import shutil
@@ -67,39 +32,17 @@ import subprocess
import re
import hashlib
+# The paver package needs to be installed to run tasks
import paver
-from paver.easy import \
- options, Bunch, task, call_task, sh, needs, cmdopts, dry
-
-sys.path.insert(0, os.path.dirname(__file__))
-try:
- setup_py = __import__("setup")
- FULLVERSION = setup_py.VERSION
- # This is duplicated from setup.py
- if os.path.exists('.git'):
- GIT_REVISION = setup_py.git_version()
- elif os.path.exists('numpy/version.py'):
- # must be a source distribution, use existing version file
- from numpy.version import git_revision as GIT_REVISION
- else:
- GIT_REVISION = "Unknown"
-
- if not setup_py.ISRELEASED:
- FULLVERSION += '.dev0+' + GIT_REVISION[:7]
-finally:
- sys.path.pop(0)
+from paver.easy import Bunch, options, task, sh
#-----------------------------------
# Things to be changed for a release
#-----------------------------------
-# Source of the release notes
-RELEASE_NOTES = 'doc/release/1.15.4-notes.rst'
-
-# Start/end of the log (from git)
-LOG_START = 'v1.15.3'
-LOG_END = 'maintenance/1.15.x'
+# Path to the release notes
+RELEASE_NOTES = 'doc/release/1.16.0-notes.rst'
#-------------------------------------------------------
@@ -107,435 +50,63 @@ LOG_END = 'maintenance/1.15.x'
#-------------------------------------------------------
DEFAULT_PYTHON = "2.7"
-# Where to put the final installers, as put on sourceforge
-SUPERPACK_BUILD = 'build-superpack'
-SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
-
-options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
- virtualenv=Bunch(packages_to_install=["sphinx==1.1.3", "numpydoc"],
- no_site_packages=False),
- sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
- superpack=Bunch(builddir="build-superpack"),
- installers=Bunch(releasedir="release",
- installersdir=os.path.join("release", "installers")),
- doc=Bunch(doc_root="doc",
- sdir=os.path.join("doc", "source"),
- bdir=os.path.join("doc", "build"),
- bdir_latex=os.path.join("doc", "build", "latex"),
- destdir_pdf=os.path.join("build_doc", "pdf")
- ),
- html=Bunch(builddir=os.path.join("build", "html")),
- dmg=Bunch(python_version=DEFAULT_PYTHON),
- bdist_wininst_simple=Bunch(python_version=DEFAULT_PYTHON),
-)
-
-MPKG_PYTHON = {
- "2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"],
- "2.7": ["/Library/Frameworks/Python.framework/Versions/2.7/bin/python"],
- "3.2": ["/Library/Frameworks/Python.framework/Versions/3.2/bin/python3"],
- "3.3": ["/Library/Frameworks/Python.framework/Versions/3.3/bin/python3"],
- "3.4": ["/Library/Frameworks/Python.framework/Versions/3.4/bin/python3"],
-}
-
-SSE3_CFG = {'ATLAS': r'C:\local\lib\atlas\sse3'}
-SSE2_CFG = {'ATLAS': r'C:\local\lib\atlas\sse2'}
-NOSSE_CFG = {'BLAS': r'C:\local\lib\atlas\nosse', 'LAPACK': r'C:\local\lib\atlas\nosse'}
-
-SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
-
-if sys.platform =="darwin":
- WINDOWS_PYTHON = {
- "3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
- "2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
- }
- WINDOWS_ENV = os.environ
- WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
- MAKENSIS = ["wine", "makensis"]
-elif sys.platform == "win32":
- WINDOWS_PYTHON = {
- "3.4": [r"C:\Python34\python.exe"],
- "2.7": [r"C:\Python27\python.exe"],
- }
- # XXX: find out which env variable is necessary to avoid the pb with python
- # 2.6 and random module when importing tempfile
- WINDOWS_ENV = os.environ
- MAKENSIS = ["makensis"]
-else:
- WINDOWS_PYTHON = {
- "3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
- "2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
- }
- WINDOWS_ENV = os.environ
- MAKENSIS = ["wine", "makensis"]
-
-
-#-------------------
-# Windows installers
-#-------------------
-def superpack_name(pyver, numver):
- """Return the filename of the superpack installer."""
- return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
-
-def internal_wininst_name(arch):
- """Return the name of the wininst as it will be inside the superpack (i.e.
- with the arch encoded."""
- ext = '.exe'
- return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
-
-def wininst_name(pyver):
- """Return the name of the installer built by wininst command."""
- ext = '.exe'
- return "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
-
-def prepare_nsis_script(pyver, numver):
- if not os.path.exists(SUPERPACK_BUILD):
- os.makedirs(SUPERPACK_BUILD)
-
- tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
- source = open(tpl, 'r')
- target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
-
- installer_name = superpack_name(pyver, numver)
- cnt = "".join(source.readlines())
- cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
- for arch in ['nosse', 'sse2', 'sse3']:
- cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
- internal_wininst_name(arch))
-
- target.write(cnt)
-
-def bdist_wininst_arch(pyver, arch):
- """Arch specific wininst build."""
- if os.path.exists("build"):
- shutil.rmtree("build")
-
- _bdist_wininst(pyver, SITECFG[arch])
+# Where to put the release installers
+options(installers=Bunch(releasedir="release",
+ installersdir=os.path.join("release", "installers")),)
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_superpack(options):
- """Build all arch specific wininst installers."""
- pyver = options.python_version
- def copy_bdist(arch):
- # Copy the wininst in dist into the release directory
- source = os.path.join('dist', wininst_name(pyver))
- target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
- if os.path.exists(target):
- os.remove(target)
- if not os.path.exists(os.path.dirname(target)):
- os.makedirs(os.path.dirname(target))
- try:
- os.rename(source, target)
- except OSError:
- # When git is installed on OS X but not under Wine, the name of the
- # .exe has "-Unknown" in it instead of the correct git revision.
- # Try to fix this here:
- revidx = source.index(".dev-") + 5
- gitrev = source[revidx:revidx+7]
- os.rename(source.replace(gitrev, "Unknown"), target)
-
- bdist_wininst_arch(pyver, 'nosse')
- copy_bdist("nosse")
- bdist_wininst_arch(pyver, 'sse2')
- copy_bdist("sse2")
- bdist_wininst_arch(pyver, 'sse3')
- copy_bdist("sse3")
- idirs = options.installers.installersdir
- pyver = options.python_version
- prepare_nsis_script(pyver, FULLVERSION)
- subprocess.check_call(MAKENSIS + ['numpy-superinstaller.nsi'],
- cwd=SUPERPACK_BUILD)
-
- # Copy the superpack into installers dir
- if not os.path.exists(idirs):
- os.makedirs(idirs)
-
- source = os.path.join(SUPERPACK_BUILD, superpack_name(pyver, FULLVERSION))
- target = os.path.join(idirs, superpack_name(pyver, FULLVERSION))
- shutil.copy(source, target)
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_nosse(options):
- """Build the nosse wininst installer."""
- bdist_wininst_arch(options.python_version, 'nosse')
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_sse2(options):
- """Build the sse2 wininst installer."""
- bdist_wininst_arch(options.python_version, 'sse2')
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_sse3(options):
- """Build the sse3 wininst installer."""
- bdist_wininst_arch(options.python_version, 'sse3')
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_simple():
- """Simple wininst-based installer."""
- pyver = options.bdist_wininst_simple.python_version
- _bdist_wininst(pyver)
-
-def _bdist_wininst(pyver, cfg_env=None):
- cmd = WINDOWS_PYTHON[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
- if cfg_env:
- for k, v in WINDOWS_ENV.items():
- cfg_env[k] = v
- else:
- cfg_env = WINDOWS_ENV
- subprocess.check_call(cmd, env=cfg_env)
-
-#----------------
-# Bootstrap stuff
-#----------------
-@task
-def bootstrap(options):
- """create virtualenv in ./bootstrap"""
- try:
- import virtualenv
- except ImportError as e:
- raise RuntimeError("virtualenv is needed for bootstrap")
-
- bdir = options.bootstrap_dir
- if not os.path.exists(bdir):
- os.makedirs(bdir)
- bscript = "boostrap.py"
-
- options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
- bscript)
- options.virtualenv.no_site_packages = False
- options.bootstrap.no_site_packages = False
- call_task('paver.virtual.bootstrap')
- sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
-
-@task
-def clean():
- """Remove build, dist, egg-info garbage."""
- d = ['build', 'dist', 'numpy.egg-info']
- for i in d:
- if os.path.exists(i):
- shutil.rmtree(i)
-
- bdir = os.path.join('doc', options.sphinx.builddir)
- if os.path.exists(bdir):
- shutil.rmtree(bdir)
-
-@task
-def clean_bootstrap():
- bdir = os.path.join(options.bootstrap.bootstrap_dir)
- if os.path.exists(bdir):
- shutil.rmtree(bdir)
-
-@task
-@needs('clean', 'clean_bootstrap')
-def nuke(options):
- """Remove everything: build dir, installers, bootstrap dirs, etc..."""
- for d in [options.superpack.builddir, options.installers.releasedir]:
- if os.path.exists(d):
- shutil.rmtree(d)
-
-#---------------------
-# Documentation tasks
-#---------------------
-@task
-def html(options):
- """Build numpy documentation and put it into build/docs"""
- # Don't use paver html target because of numpy bootstrapping problems
- bdir = os.path.join("doc", options.sphinx.builddir, "html")
- if os.path.exists(bdir):
- shutil.rmtree(bdir)
- subprocess.check_call(["make", "html"], cwd="doc")
- html_destdir = options.html.builddir
- if os.path.exists(html_destdir):
- shutil.rmtree(html_destdir)
- shutil.copytree(bdir, html_destdir)
+#-----------------------------
+# Generate the release version
+#-----------------------------
-@task
-def latex():
- """Build numpy documentation in latex format."""
- subprocess.check_call(["make", "latex"], cwd="doc")
-
-@task
-@needs('latex')
-def pdf():
- sdir = options.doc.sdir
- bdir = options.doc.bdir
- bdir_latex = options.doc.bdir_latex
- destdir_pdf = options.doc.destdir_pdf
-
- def build_pdf():
- subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
- dry("Build pdf doc", build_pdf)
-
- if os.path.exists(destdir_pdf):
- shutil.rmtree(destdir_pdf)
- os.makedirs(destdir_pdf)
-
- user = os.path.join(bdir_latex, "numpy-user.pdf")
- shutil.copy(user, os.path.join(destdir_pdf, "userguide.pdf"))
- ref = os.path.join(bdir_latex, "numpy-ref.pdf")
- shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
-
-#------------------
-# Mac OS X targets
-#------------------
-def dmg_name(fullversion, pyver, osxver=None):
- """Return name for dmg installer.
-
- Notes
- -----
- Python 2.7 has two binaries, one for 10.3 (ppc, i386) and one for 10.6
- (i386, x86_64). All other Python versions at python.org at the moment
- have binaries for 10.3 only. The "macosx%s" part of the dmg name should
- correspond to the python.org naming scheme.
- """
- # assume that for the py2.7/osx10.6 build the deployment target is set
- # (should be done in the release script).
- if not osxver:
- osxver = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
- return "numpy-%s-py%s-python.org-macosx%s.dmg" % (fullversion, pyver,
- osxver)
-
-def macosx_version():
- if not sys.platform == 'darwin':
- raise ValueError("Not darwin ??")
- st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
- out = st.stdout.readlines()
- ver = re.compile(r"ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
- for i in out:
- m = ver.match(i)
- if m:
- return m.groups()
-
-def mpkg_name(pyver):
- maj, min = macosx_version()[:2]
- # Note that bdist_mpkg breaks this if building a dev version with a git
- # commit string attached. make_fullplatcomponents() in
- # bdist_mpkg/cmd_bdist_mpkg.py replaces '-' with '_', comment this out if
- # needed.
- return "numpy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
-
-def _build_mpkg(pyver):
- # account for differences between Python 2.7.1 versions from python.org
- if os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) == "10.6":
- ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch x86_64 -Wl,-search_paths_first"
+sys.path.insert(0, os.path.dirname(__file__))
+try:
+ setup_py = __import__("setup")
+ FULLVERSION = setup_py.VERSION
+ # This is duplicated from setup.py
+ if os.path.exists('.git'):
+ GIT_REVISION = setup_py.git_version()
+ elif os.path.exists('numpy/version.py'):
+ # must be a source distribution, use existing version file
+ from numpy.version import git_revision as GIT_REVISION
else:
- ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
-
- ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
- sh("LDFLAGS='%s' %s setup.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
-
-@task
-def simple_dmg():
- pyver = "2.6"
- src_dir = "dmg-source"
-
- # Clean the source dir
- if os.path.exists(src_dir):
- shutil.rmtree(src_dir)
- os.makedirs(src_dir)
-
- # Build the mpkg
- clean()
- _build_mpkg(pyver)
-
- # Build the dmg
- shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
- os.path.join(src_dir, mpkg_name(pyver)))
- _create_dmg(pyver, src_dir, "NumPy Universal %s" % FULLVERSION)
-
-@task
-def bdist_mpkg(options):
- call_task("clean")
- try:
- pyver = options.bdist_mpkg.python_version
- except AttributeError:
- pyver = options.python_version
-
- _build_mpkg(pyver)
-
-def _create_dmg(pyver, src_dir, volname=None):
- # Build the dmg
- image_name = dmg_name(FULLVERSION, pyver)
- if os.path.exists(image_name):
- os.remove(image_name)
- cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
- if volname:
- cmd.extend(["-volname", "'%s'" % volname])
- sh(" ".join(cmd))
+ GIT_REVISION = "Unknown"
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def dmg(options):
- try:
- pyver = options.dmg.python_version
- except Exception:
- pyver = DEFAULT_PYTHON
- idirs = options.installers.installersdir
+ if not setup_py.ISRELEASED:
+ FULLVERSION += '.dev0+' + GIT_REVISION[:7]
+finally:
+ sys.path.pop(0)
- # Check if docs exist. If not, say so and quit.
- ref = os.path.join(options.doc.destdir_pdf, "reference.pdf")
- user = os.path.join(options.doc.destdir_pdf, "userguide.pdf")
- if (not os.path.exists(ref)) or (not os.path.exists(user)):
- import warnings
- warnings.warn("Docs need to be built first! Can't find them.", stacklevel=2)
-
- # Build the mpkg package
- call_task("clean")
- _build_mpkg(pyver)
-
- macosx_installer_dir = "tools/numpy-macosx-installer"
- dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
- if os.path.exists(dmg):
- os.remove(dmg)
-
- # Clean the image source
- content = os.path.join(macosx_installer_dir, 'content')
- if os.path.exists(content):
- shutil.rmtree(content)
- os.makedirs(content)
-
- # Copy mpkg into image source
- mpkg_source = os.path.join("dist", mpkg_name(pyver))
- mpkg_target = os.path.join(content, "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver))
- shutil.copytree(mpkg_source, mpkg_target)
-
- # Copy docs into image source
- pdf_docs = os.path.join(content, "Documentation")
- if os.path.exists(pdf_docs):
- shutil.rmtree(pdf_docs)
- os.makedirs(pdf_docs)
- shutil.copy(user, os.path.join(pdf_docs, "userguide.pdf"))
- shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
-
- # Build the dmg
- cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
- "--volname", "numpy", os.path.basename(dmg), "./content"]
- st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
-
- source = dmg
- target = os.path.join(idirs, os.path.basename(dmg))
- if not os.path.exists(os.path.dirname(target)):
- os.makedirs(os.path.dirname(target))
- shutil.copy(source, target)
#--------------------------
# Source distribution stuff
#--------------------------
-def tarball_name(type='gztar'):
+def tarball_name(ftype='gztar'):
+ """Generate source distribution name
+
+ Parameters
+ ----------
+ ftype : {'zip', 'gztar'}
+ Type of archive, default is 'gztar'.
+
+ """
root = 'numpy-%s' % FULLVERSION
- if type == 'gztar':
+ if ftype == 'gztar':
return root + '.tar.gz'
- elif type == 'zip':
+ elif ftype == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist(options):
+ """Make source distributions.
+
+ Parameters
+ ----------
+ options :
+ Set by ``task`` decorator.
+
+ """
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
@@ -553,29 +124,75 @@ def sdist(options):
if not os.path.exists(idirs):
os.makedirs(idirs)
- for t in ['gztar', 'zip']:
- source = os.path.join('dist', tarball_name(t))
- target = os.path.join(idirs, tarball_name(t))
+ for ftype in ['gztar', 'zip']:
+ source = os.path.join('dist', tarball_name(ftype))
+ target = os.path.join(idirs, tarball_name(ftype))
shutil.copy(source, target)
-def _compute_hash(idirs, algo):
+
+#-------------
+# README stuff
+#-------------
+
+def _compute_hash(idirs, hashfunc):
+ """Hash files using given hashfunc.
+
+ Parameters
+ ----------
+ idirs : directory path
+ Directory containing files to be hashed.
+ hashfunc : hash function
+ Function to be used to hash the files.
+
+ """
released = paver.path.path(idirs).listdir()
checksums = []
- for f in sorted(released):
- with open(f, 'r') as _file:
- m = algo(_file.read())
- checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
+ for fpath in sorted(released):
+ with open(fpath, 'rb') as fin:
+ fhash = hashfunc(fin.read())
+ checksums.append(
+ '%s %s' % (fhash.hexdigest(), os.path.basename(fpath)))
return checksums
+
def compute_md5(idirs):
+ """Compute md5 hash of files in idirs.
+
+ Parameters
+ ----------
+ idirs : directory path
+ Directory containing files to be hashed.
+
+ """
return _compute_hash(idirs, hashlib.md5)
+
def compute_sha256(idirs):
+ """Compute sha256 hash of files in idirs.
+
+ Parameters
+ ----------
+ idirs : directory path
+ Directory containing files to be hashed.
+
+ """
# better checksum so gpg signed README.rst containing the sums can be used
# to verify the binaries instead of signing all binaries
return _compute_hash(idirs, hashlib.sha256)
+
def write_release_task(options, filename='README'):
+ """Append hashes of release files to release notes.
+
+ Parameters
+ ----------
+ options :
+ Set by ``task`` decorator.
+ filename : string
+ Filename of the modified notes. The file is written
+ in the release directory.
+
+ """
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
target = paver.path.path(filename + '.rst')
@@ -619,30 +236,19 @@ SHA256
ftarget.write(mdtext)
-def write_log_task(options, filename='Changelog'):
- st = subprocess.Popen(
- ['git', 'log', '--no-merges', '--use-mailmap',
- '%s..%s' % (LOG_START, LOG_END)],
- stdout=subprocess.PIPE)
-
- out = st.communicate()[0]
- a = open(filename, 'w')
- a.writelines(out)
- a.close()
-
-
@task
def write_release(options):
- write_release_task(options)
+ """Write the README files.
+ Two README files are generated from the release notes, one in ``rst``
+ markup for the general release, the other in ``md`` markup for the github
+ release notes.
-@task
-def write_log(options):
- write_log_task(options)
+ Parameters
+ ----------
+ options :
+ Set by ``task`` decorator.
-
-@task
-def write_release_and_log(options):
+ """
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'README'))
- write_log_task(options, os.path.join(rdir, 'Changelog'))
diff --git a/runtests.py b/runtests.py
index 35717b319..81c7c103f 100755
--- a/runtests.py
+++ b/runtests.py
@@ -34,7 +34,7 @@ from __future__ import division, print_function
PROJECT_MODULE = "numpy"
PROJECT_ROOT_FILES = ['numpy', 'LICENSE.txt', 'setup.py']
-SAMPLE_TEST = "numpy/linalg/tests/test_linalg.py:test_byteorder_check"
+SAMPLE_TEST = "numpy/linalg/tests/test_linalg.py::test_byteorder_check"
SAMPLE_SUBMODULE = "linalg"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
@@ -328,19 +328,22 @@ def build_project(args):
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
cvars = distutils.sysconfig.get_config_vars()
- if 'gcc' in cvars.get('CC', ''):
- # add flags used as werrors
- warnings_as_errors = ' '.join([
- # from tools/travis-test.sh
- '-Werror=declaration-after-statement',
- '-Werror=vla',
- '-Werror=nonnull',
- '-Werror=pointer-arith',
- '-Wlogical-op',
- # from sysconfig
- '-Werror=unused-function',
- ])
- env['CFLAGS'] = warnings_as_errors + ' ' + env.get('CFLAGS', '')
+ compiler = env.get('CC') or cvars.get('CC', '')
+ if 'gcc' in compiler:
+ # Check that this isn't clang masquerading as gcc.
+ if sys.platform != 'darwin' or 'gnu-gcc' in compiler:
+ # add flags used as werrors
+ warnings_as_errors = ' '.join([
+ # from tools/travis-test.sh
+ '-Werror=declaration-after-statement',
+ '-Werror=vla',
+ '-Werror=nonnull',
+ '-Werror=pointer-arith',
+ '-Wlogical-op',
+ # from sysconfig
+ '-Werror=unused-function',
+ ])
+ env['CFLAGS'] = warnings_as_errors + ' ' + env.get('CFLAGS', '')
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
@@ -384,23 +387,27 @@ def build_project(args):
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
-
- # Wait for it to finish, and print something to indicate the
- # process is alive, but only if the log file has grown (to
- # allow continuous integration environments kill a hanging
- # process accurately if it produces no output)
- last_blip = time.time()
- last_log_size = os.stat(log_filename).st_size
- while p.poll() is None:
- time.sleep(0.5)
- if time.time() - last_blip > 60:
- log_size = os.stat(log_filename).st_size
- if log_size > last_log_size:
- print(" ... build in progress")
- last_blip = time.time()
- last_log_size = log_size
-
- ret = p.wait()
+ try:
+ # Wait for it to finish, and print something to indicate the
+ # process is alive, but only if the log file has grown (to
+ # allow continuous integration environments kill a hanging
+ # process accurately if it produces no output)
+ last_blip = time.time()
+ last_log_size = os.stat(log_filename).st_size
+ while p.poll() is None:
+ time.sleep(0.5)
+ if time.time() - last_blip > 60:
+ log_size = os.stat(log_filename).st_size
+ if log_size > last_log_size:
+ print(" ... build in progress")
+ last_blip = time.time()
+ last_log_size = log_size
+
+ ret = p.wait()
+ except:
+ p.kill()
+ p.wait()
+ raise
if ret == 0:
print("Build OK")
diff --git a/setup.py b/setup.py
index 060e82c54..12d3010b3 100755
--- a/setup.py
+++ b/setup.py
@@ -1,23 +1,20 @@
#!/usr/bin/env python
-"""NumPy: array processing for numbers, strings, records, and objects.
+""" NumPy is the fundamental package for array computing with Python.
-NumPy is a general-purpose array-processing package designed to
-efficiently manipulate large multi-dimensional arrays of arbitrary
-records without sacrificing too much speed for small multi-dimensional
-arrays. NumPy is built on the Numeric code base and adds features
-introduced by numarray as well as an extended C-API and the ability to
-create arrays of arbitrary type which also makes NumPy suitable for
-interfacing with general-purpose data-base applications.
+It provides:
-There are also basic facilities for discrete fourier transform,
-basic linear algebra and random number generation.
+- a powerful N-dimensional array object
+- sophisticated (broadcasting) functions
+- tools for integrating C/C++ and Fortran code
+- useful linear algebra, Fourier transform, and random number capabilities
+- and much more
-All numpy wheels distributed from pypi are BSD licensed.
+Besides its obvious scientific uses, NumPy can also be used as an efficient
+multi-dimensional container of generic data. Arbitrary data-types can be
+defined. This allows NumPy to seamlessly and speedily integrate with a wide
+variety of databases.
-Windows wheels are linked against the ATLAS BLAS / LAPACK library, restricted
-to SSE2 instructions, so may not give optimal linear algebra performance for
-your machine. See http://docs.scipy.org/doc/numpy/user/install.html for
-alternatives.
+All NumPy wheels distributed on PyPI are BSD licensed.
"""
from __future__ import division, print_function
@@ -63,8 +60,8 @@ Operating System :: MacOS
"""
MAJOR = 1
-MINOR = 15
-MICRO = 4
+MINOR = 16
+MICRO = 0
ISRELEASED = True
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
@@ -352,13 +349,25 @@ def setup_package():
# Rewrite the version file everytime
write_version_py()
+ # The f2py scripts that will be installed
+ if sys.platform == 'win32':
+ f2py_cmds = [
+ 'f2py = numpy.f2py.f2py2e:main',
+ ]
+ else:
+ f2py_cmds = [
+ 'f2py = numpy.f2py.f2py2e:main',
+ 'f2py%s = numpy.f2py.f2py2e:main' % sys.version_info[:1],
+ 'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],
+ ]
+
metadata = dict(
name = 'numpy',
maintainer = "NumPy Developers",
maintainer_email = "numpy-discussion@python.org",
description = DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
- url = "http://www.numpy.org",
+ url = "https://www.numpy.org",
author = "Travis E. Oliphant et al.",
download_url = "https://pypi.python.org/pypi/numpy",
license = 'BSD',
@@ -368,6 +377,9 @@ def setup_package():
cmdclass={"sdist": sdist_checked},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
zip_safe=False,
+ entry_points={
+ 'console_scripts': f2py_cmds
+ },
)
if "--force" in sys.argv:
diff --git a/shippable.yml b/shippable.yml
new file mode 100644
index 000000000..49cd91e1e
--- /dev/null
+++ b/shippable.yml
@@ -0,0 +1,71 @@
+branches:
+ only:
+ - master
+ - maintenance/*
+
+language: python
+
+python:
+ # use versions available for job image
+ # aarch64_u16pytall:v6.7.4
+ # (what we currently have access to by default)
+ # this is a bit restrictive in terms
+ # of version availability / control,
+ # but it is convenient
+ - 2.7
+ - 3.7
+
+runtime:
+ # use the free open source pool of nodes
+ # only for ARM platform
+ nodePool: shippable_shared_aarch64
+
+build:
+ ci:
+ # install dependencies
+ - sudo apt-get install gcc gfortran libblas-dev liblapack-dev
+ # add pathlib for Python 2, otherwise many tests are skipped
+ - pip install --upgrade pip
+ # we will pay the ~13 minute cost of compiling Cython only when a new
+ # version is scraped in by pip; otherwise, use the cached
+ # wheel shippable places on Amazon S3 after we build it once
+ - pip install cython --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION
+ - pip install pathlib
+ # install pytz for datetime testing
+ - pip install pytz
+ # install pytest-xdist to leverage a second core
+ # for unit tests
+ - pip install pytest-xdist
+
+ # build and test numpy
+ - export PATH=$PATH:$SHIPPABLE_REPO_DIR
+ # build first and adjust PATH so f2py is found in scripts dir
+ # use > 1 core for build sometimes slows down a fair bit,
+ # other times modestly speeds up, so avoid for now
+ - python setup.py install
+ - extra_directories=($SHIPPABLE_REPO_DIR/build/*scripts*)
+ - extra_path=$(printf "%s:" "${extra_directories[@]}")
+ - export PATH="${extra_path}${PATH}"
+ # run the test suite
+ - python runtests.py -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
+
+ cache: true
+ cache_dir_list:
+ # the NumPy project uses a single Amazon S3 cache
+ # so upload the parent path of the Python-specific
+ # version paths to avoid i.e., 2.7 overwriting
+ # 3.7 pip cache (seems to be an issue)
+ - /root/.cache/pip/wheels
+
+
+
+# disable email notification
+# of CI job result
+integrations:
+ notifications:
+ - integrationName: email
+ type: email
+ on_success: never
+ on_failure: never
+ on_cancel: never
+ on_pull_request: never
diff --git a/site.cfg.example b/site.cfg.example
index 21609a332..9d7eb99ec 100644
--- a/site.cfg.example
+++ b/site.cfg.example
@@ -10,7 +10,7 @@
# The format of the file is that of the standard library's ConfigParser module.
# No interpolation is allowed, RawConfigParser class being used to load it.
#
-# http://docs.python.org/3/library/configparser.html
+# https://docs.python.org/library/configparser.html
#
# Each section defines settings that apply to one particular dependency. Some of
# the settings are general and apply to nearly any section and are defined here.
@@ -122,7 +122,7 @@
# multiprocessing.
# (This problem does not exist with multithreaded ATLAS.)
#
-# http://docs.python.org/3.4/library/multiprocessing.html#contexts-and-start-methods
+# https://docs.python.org/library/multiprocessing.html#contexts-and-start-methods
# https://github.com/xianyi/OpenBLAS/issues/294
#
# [openblas]
@@ -197,9 +197,9 @@
#
# UMFPACK is not used by numpy.
#
-# http://www.cise.ufl.edu/research/sparse/umfpack/
-# http://www.cise.ufl.edu/research/sparse/amd/
-# http://scikits.appspot.com/umfpack
+# https://www.cise.ufl.edu/research/sparse/umfpack/
+# https://www.cise.ufl.edu/research/sparse/amd/
+# https://scikit-umfpack.github.io/scikit-umfpack/
#
#[amd]
#amd_libs = amd
@@ -210,10 +210,10 @@
# FFT libraries
# -------------
# There are two FFT libraries that we can configure here: FFTW (2 and 3) and djbfft.
-# Note that these libraries are not used by for numpy or scipy.
+# Note that these libraries are not used by numpy or scipy.
#
# http://fftw.org/
-# http://cr.yp.to/djbfft.html
+# https://cr.yp.to/djbfft.html
#
# Given only this section, numpy.distutils will try to figure out which version
# of FFTW you are using.
diff --git a/tools/allocation_tracking/sorttable.js b/tools/allocation_tracking/sorttable.js
index 25bccb2b6..c9528873e 100644
--- a/tools/allocation_tracking/sorttable.js
+++ b/tools/allocation_tracking/sorttable.js
@@ -2,7 +2,7 @@
SortTable
version 2
7th April 2007
- Stuart Langridge, http://www.kryogenix.org/code/browser/sorttable/
+ Stuart Langridge, https://www.kryogenix.org/code/browser/sorttable/
Instructions:
Download this file
@@ -11,7 +11,7 @@
Click on the headers to sort
Thanks to many, many people for contributions and suggestions.
- Licenced as X11: http://www.kryogenix.org/code/browser/licence.html
+ Licenced as X11: https://www.kryogenix.org/code/browser/licence.html
This basically means: do what you want with it.
*/
@@ -301,7 +301,7 @@ sorttable = {
shaker_sort: function(list, comp_func) {
// A stable sort function to allow multi-level sorting of data
- // see: http://en.wikipedia.org/wiki/Cocktail_sort
+ // see: https://en.wikipedia.org/wiki/Cocktail_shaker_sort
// thanks to Joseph Nahmias
var b = 0;
var t = list.length - 1;
@@ -441,7 +441,7 @@ fixEvent.stopPropagation = function() {
/*
forEach, version 1.0
Copyright 2006, Dean Edwards
- License: http://www.opensource.org/licenses/mit-license.php
+ License: https://www.opensource.org/licenses/mit-license.php
*/
// array-like enumeration
diff --git a/tools/changelog.py b/tools/changelog.py
index 84e046c5f..b135b14e5 100755
--- a/tools/changelog.py
+++ b/tools/changelog.py
@@ -42,8 +42,10 @@ import codecs
from git import Repo
from github import Github
-UTF8Writer = codecs.getwriter('utf8')
-sys.stdout = UTF8Writer(sys.stdout)
+if sys.version_info.major < 3:
+ UTF8Writer = codecs.getwriter('utf8')
+ sys.stdout = UTF8Writer(sys.stdout)
+
this_repo = Repo(os.path.join(os.path.dirname(__file__), ".."))
author_msg =\
diff --git a/tools/swig/pyfragments.swg b/tools/swig/pyfragments.swg
index 901e6ed9d..97ca8cf97 100644
--- a/tools/swig/pyfragments.swg
+++ b/tools/swig/pyfragments.swg
@@ -22,7 +22,6 @@
SWIGINTERN int
SWIG_AsVal_dec(long)(PyObject * obj, long * val)
{
- PyArray_Descr * longDescr = PyArray_DescrNewFromType(NPY_LONG);
if (PyInt_Check(obj)) {
if (val) *val = PyInt_AsLong(obj);
return SWIG_OK;
@@ -56,7 +55,9 @@
}
%#endif
if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError;
+ PyArray_Descr * longDescr = PyArray_DescrNewFromType(NPY_LONG);
PyArray_CastScalarToCtype(obj, (void*)val, longDescr);
+ Py_DECREF(longDescr);
return SWIG_OK;
}
}
@@ -74,7 +75,6 @@
SWIGINTERN int
SWIG_AsVal_dec(unsigned long)(PyObject *obj, unsigned long *val)
{
- PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG);
%#if PY_VERSION_HEX < 0x03000000
if (PyInt_Check(obj))
{
@@ -120,7 +120,9 @@
}
%#endif
if (!PyArray_IsScalar(obj,Integer)) return SWIG_TypeError;
+ PyArray_Descr * ulongDescr = PyArray_DescrNewFromType(NPY_ULONG);
PyArray_CastScalarToCtype(obj, (void*)val, ulongDescr);
+ Py_DECREF(ulongDescr);
return SWIG_OK;
}
}
diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
index 04a2a1da2..14f11b7ed 100644
--- a/tools/test-installed-numpy.py
+++ b/tools/test-installed-numpy.py
@@ -46,6 +46,10 @@ elif numpy.ones((10, 1), order='C').flags.f_contiguous:
print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
sys.exit(1)
+if options.coverage:
+ # Produce code coverage XML report for codecov.io
+ args += ["--cov-report=xml"]
+
result = numpy.test(options.mode,
verbose=options.verbose,
extra_argv=args,
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index 1671d35b4..c334e91ae 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -9,12 +9,7 @@ pushd builds
# Build into own virtualenv
# We therefore control our own environment, avoid travis' numpy
-#
-# Some change in virtualenv 14.0.5 caused `test_f2py` to fail. So, we have
-# pinned `virtualenv` to the last known working version to avoid this failure.
-# Appears we had some issues with certificates on Travis. It looks like
-# bumping to 14.0.6 will help.
-pip install -U 'virtualenv==14.0.6'
+pip install -U virtualenv
if [ -n "$USE_DEBUG" ]
then
@@ -25,6 +20,11 @@ fi
source venv/bin/activate
python -V
+
+if [ -n "$INSTALL_PICKLE5" ]; then
+ pip install pickle5
+fi
+
pip install --upgrade pip setuptools
pip install nose pytz cython pytest
if [ -n "$USE_ASV" ]; then pip install asv; fi
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index b99866f0d..fa83606b2 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -28,12 +28,16 @@ fi
werrors="-Werror=declaration-after-statement -Werror=vla "
werrors+="-Werror=nonnull -Werror=pointer-arith"
+# build with c99 by default
+
setup_base()
{
# use default python flags but remoge sign-compare
sysflags="$($PYTHON -c "from distutils import sysconfig; \
print (sysconfig.get_config_var('CFLAGS'))")"
export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare"
+ # use c99
+ export CFLAGS=$CFLAGS" -std=c99"
# We used to use 'setup.py install' here, but that has the terrible
# behaviour that if a copy of the package is already installed in the
# install location, then the new copy just gets dropped on top of it.
@@ -46,6 +50,8 @@ setup_base()
if [ -z "$USE_DEBUG" ]; then
$PIP install -v . 2>&1 | tee log
else
+ # Python3.5-dbg on travis seems to need this
+ export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized"
$PYTHON setup.py build_ext --inplace 2>&1 | tee log
fi
grep -v "_configtest" log \
@@ -95,8 +101,8 @@ setup_chroot()
# install needed packages
sudo chroot $DIR bash -c "apt-get install -qq -y \
- libatlas-base-dev gfortran python-dev python-nose python-pip cython \
- python-pytest"
+ libatlas-base-dev gfortran python3-dev python3-pip \
+ cython python3-pytest"
}
run_test()
@@ -105,6 +111,12 @@ run_test()
export PYTHONPATH=$PWD
fi
+ if [ -n "$RUN_COVERAGE" ]; then
+ $PIP install pytest-cov
+ NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1
+ COVERAGE_FLAG=--coverage
+ fi
+
# We change directories to make sure that python won't find the copy
# of numpy in the source directory.
mkdir -p empty
@@ -113,10 +125,33 @@ run_test()
"import os; import numpy; print(os.path.dirname(numpy.__file__))")
export PYTHONWARNINGS=default
if [ -n "$RUN_FULL_TESTS" ]; then
- $PYTHON ../tools/test-installed-numpy.py -v --mode=full
+ export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv"
+ $PYTHON ../tools/test-installed-numpy.py -v --mode=full $COVERAGE_FLAG
else
$PYTHON ../tools/test-installed-numpy.py -v
fi
+
+ if [ -n "$RUN_COVERAGE" ]; then
+ # move back up to the source dir because we want to execute
+ # gcov on the source files after the tests have gone through
+ # the code paths
+ cd ..
+
+ # execute gcov on source files
+ find . -name '*.gcno' -type f -exec gcov -pb {} +
+
+ # move the C line coverage report files to the same path
+ # as the Python report data
+ mv *.gcov empty
+
+ # move back to the previous path for good measure
+ # as the Python coverage data is there
+ cd empty
+
+ # Upload coverage files to codecov
+ bash <(curl -s https://codecov.io/bash) -X gcov -X coveragepy
+ fi
+
if [ -n "$USE_ASV" ]; then
pushd ../benchmarks
$PYTHON `which asv` machine --machine travis
@@ -141,14 +176,29 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
$PIP install -U virtualenv
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
+ # use c99
+ export CFLAGS=$CFLAGS" -std=c99"
+ # adjust gcc flags if C coverage requested
+ if [ -n "$RUN_COVERAGE" ]; then
+ export NPY_DISTUTILS_APPEND_FLAGS=1
+ export CC='gcc --coverage'
+ export F77='gfortran --coverage'
+ export F90='gfortran --coverage'
+ export LDFLAGS='--coverage'
+ fi
$PYTHON setup.py bdist_wheel
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
- pip install --pre --no-index --upgrade --find-links=. numpy
- pip install nose pytest
+ $PIP install --pre --no-index --upgrade --find-links=. numpy
+ $PIP install nose pytest
+
+ if [ -n "$INSTALL_PICKLE5" ]; then
+ $PIP install pickle5
+ fi
+
popd
run_test
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
@@ -158,14 +208,20 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
$PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)"
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
+ # use c99
+ export CFLAGS=$CFLAGS" -std=c99"
$PYTHON setup.py sdist
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
- pip install numpy*
- pip install nose pytest
+ $PIP install numpy*
+ $PIP install nose pytest
+ if [ -n "$INSTALL_PICKLE5" ]; then
+ $PIP install pickle5
+ fi
+
popd
run_test
elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then
@@ -174,11 +230,10 @@ elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then
# the chroot'ed environment will not have the current locale,
# avoid any warnings which may disturb testing
export LANG=C LC_ALL=C
- # run again in chroot with this time testing
+ # run again in chroot with this time testing with python3
sudo linux32 chroot $DIR bash -c \
- "cd numpy && PYTHON=python PIP=pip IN_CHROOT=1 $0 test"
+ "cd numpy && PYTHON=python3 PIP=pip3 IN_CHROOT=1 $0 test"
else
setup_base
run_test
fi
-
diff --git a/tox.ini b/tox.ini
index e6f1b124f..c7df36e23 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
# 'Tox' is a tool for automating sdist/build/test cycles against
# multiple Python versions:
-# http://pypi.python.org/pypi/tox
-# http://tox.testrun.org/
+# https://pypi.python.org/pypi/tox
+# https://tox.readthedocs.io/
# Running the command 'tox' while in the root of the numpy source
# directory will: