diff options
author | Charles Harris <charlesr.harris@gmail.com> | 2017-12-22 19:26:52 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-12-22 19:26:52 -0700 |
commit | c6c1760cd2f510b717ca08aa24ff9df1318aa34a (patch) | |
tree | af39f1ae38121b5c3f7d2c7619f5c05cab9e1670 /numpy | |
parent | 3fd3a71952eb3fd35261b002e6f7731b8bb708cc (diff) | |
parent | 5d616b3cdee8a7800cacb475ff8eae7f4c6d9f12 (diff) | |
download | python-numpy-c6c1760cd2f510b717ca08aa24ff9df1318aa34a.tar.gz python-numpy-c6c1760cd2f510b717ca08aa24ff9df1318aa34a.tar.bz2 python-numpy-c6c1760cd2f510b717ca08aa24ff9df1318aa34a.zip |
Merge pull request #10260 from xoviat/add-pytest-support
ENH: Add pytest support
Diffstat (limited to 'numpy')
-rw-r--r-- | numpy/conftest.py | 32 | ||||
-rw-r--r-- | numpy/ma/core.py | 1 | ||||
-rw-r--r-- | numpy/testing/decorators.py | 2 | ||||
-rw-r--r-- | numpy/testing/nose_tools/decorators.py | 3 | ||||
-rw-r--r-- | numpy/testing/nose_tools/parameterized.py | 3 | ||||
-rw-r--r-- | numpy/testing/nose_tools/utils.py | 1 | ||||
-rw-r--r-- | numpy/testing/noseclasses.py | 3 | ||||
-rw-r--r-- | numpy/testing/nosetester.py | 3 | ||||
-rw-r--r-- | numpy/testing/pytest_tools/__init__.py | 0 | ||||
-rw-r--r-- | numpy/testing/pytest_tools/decorators.py | 278 | ||||
-rw-r--r-- | numpy/testing/pytest_tools/noseclasses.py | 342 | ||||
-rw-r--r-- | numpy/testing/pytest_tools/nosetester.py | 566 | ||||
-rw-r--r-- | numpy/testing/pytest_tools/utils.py | 2275 | ||||
-rwxr-xr-x | numpy/testing/setup.py | 1 | ||||
-rw-r--r-- | numpy/testing/tests/test_decorators.py | 16 | ||||
-rw-r--r-- | numpy/testing/utils.py | 6 |
16 files changed, 3519 insertions, 13 deletions
diff --git a/numpy/conftest.py b/numpy/conftest.py index ea4197049..15985a75b 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -5,6 +5,8 @@ from __future__ import division, absolute_import, print_function import warnings import pytest +import numpy +import importlib from numpy.core.multiarray_tests import get_fpu_mode @@ -52,3 +54,33 @@ def check_fpu_mode(request): raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" " when collecting the test".format(old_mode, new_mode)) + + +def pytest_addoption(parser): + parser.addoption("--runslow", action="store_true", + default=False, help="run slow tests") + + +def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + + +for module, replacement in { + 'numpy.testing.decorators': 'numpy.testing.pytest_tools.decorators', + 'numpy.testing.utils': 'numpy.testing.pytest_tools.utils', +}.items(): + module = importlib.import_module(module) + replacement = importlib.import_module(replacement) + module.__dict__.clear() + module.__dict__.update(replacement.__dict__) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 1edfba42e..fe092f552 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -6937,6 +6937,7 @@ def transpose(a, axes=None): [[False False] [False True]], fill_value = 999999) + >>> ma.transpose(x) masked_array(data = [[0 2] diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py index b63850090..21bcdd798 100644 --- a/numpy/testing/decorators.py +++ b/numpy/testing/decorators.py @@ -3,4 +3,6 @@ Back compatibility decorators module. It will import the appropriate set of tools """ +import os + from .nose_tools.decorators import * diff --git a/numpy/testing/nose_tools/decorators.py b/numpy/testing/nose_tools/decorators.py index 12531e734..243c0c8c1 100644 --- a/numpy/testing/nose_tools/decorators.py +++ b/numpy/testing/nose_tools/decorators.py @@ -19,6 +19,9 @@ import collections from .utils import SkipTest, assert_warns +__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated', + 'parametrize',] + def slow(t): """ diff --git a/numpy/testing/nose_tools/parameterized.py b/numpy/testing/nose_tools/parameterized.py index 372928e3d..d094f7c7f 100644 --- a/numpy/testing/nose_tools/parameterized.py +++ b/numpy/testing/nose_tools/parameterized.py @@ -252,7 +252,8 @@ def default_name_func(func, num, p): return base_name + name_suffix -_test_runner_override = None +# force nose for numpy purposes. +_test_runner_override = 'nose' _test_runner_guess = False _test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"]) _test_runner_aliases = { diff --git a/numpy/testing/nose_tools/utils.py b/numpy/testing/nose_tools/utils.py index 6c77e5e21..2d97b5c1e 100644 --- a/numpy/testing/nose_tools/utils.py +++ b/numpy/testing/nose_tools/utils.py @@ -1849,6 +1849,7 @@ def _gen_alignment_data(dtype=float32, type='binary', max_size=24): class IgnoreException(Exception): "Ignoring this exception due to disabled feature" + pass @contextlib.contextmanager diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py index 563ed14ea..144c4e7e4 100644 --- a/numpy/testing/noseclasses.py +++ b/numpy/testing/noseclasses.py @@ -1,6 +1,5 @@ """ Back compatibility noseclasses module. It will import the appropriate set of tools - """ -from .nose_tools.noseclasses import * +from .nose_tools.noseclasses import *
\ No newline at end of file diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py index b726684c9..949fae03e 100644 --- a/numpy/testing/nosetester.py +++ b/numpy/testing/nosetester.py @@ -3,8 +3,11 @@ Back compatibility nosetester module. It will import the appropriate set of tools """ +import os + from .nose_tools.nosetester import * + __all__ = ['get_package_name', 'run_module_suite', 'NoseTester', '_numpy_tester', 'get_package_name', 'import_nose', 'suppress_warnings'] diff --git a/numpy/testing/pytest_tools/__init__.py b/numpy/testing/pytest_tools/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/numpy/testing/pytest_tools/__init__.py diff --git a/numpy/testing/pytest_tools/decorators.py b/numpy/testing/pytest_tools/decorators.py new file mode 100644 index 000000000..08a39e0c0 --- /dev/null +++ b/numpy/testing/pytest_tools/decorators.py @@ -0,0 +1,278 @@ +""" +Compatibility shim for pytest compatibility with the nose decorators. + +Decorators for labeling and modifying behavior of test objects. + +Decorators that merely return a modified version of the original +function object are straightforward. + +Decorators that return a new function will not preserve meta-data such as +function name, setup and teardown functions and so on. + +""" +from __future__ import division, absolute_import, print_function + +import collections + +from .utils import SkipTest, assert_warns + +__all__ = ['slow', 'setastest', 'skipif', 'knownfailureif', 'deprecated', + 'parametrize',] + + +def slow(t): + """ + Label a test as 'slow'. + + The exact definition of a slow test is obviously both subjective and + hardware-dependent, but in general any individual test that requires more + than a second or two should be labeled as slow (the whole suite consits of + thousands of tests, so even a second is significant). + + Parameters + ---------- + t : callable + The test to mark as slow. + + Returns + ------- + t : callable + The decorated test `t`. + + Examples + -------- + The `numpy.testing` module includes ``import decorators as dec``. + A test can be decorated as slow like this:: + + from numpy.testing import * + + @dec.slow + def test_big(self): + print('Big, slow test') + + """ + import pytest + + return pytest.mark.slow(t) + + +def setastest(tf=True): + """ + Signals to nose that this function is or is not a test. + + Parameters + ---------- + tf : bool + If True, specifies that the decorated callable is a test. + If False, specifies that the decorated callable is not a test. + Default is True. + + Examples + -------- + `setastest` can be used in the following way:: + + from numpy.testing.decorators import setastest + + @setastest(False) + def func_with_test_in_name(arg1, arg2): + pass + + """ + def set_test(t): + t.__test__ = tf + return t + return set_test + + +def skipif(skip_condition, msg=None): + """ + Make function raise SkipTest exception if a given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + skip_condition : bool or callable + Flag to determine whether to skip the decorated test. + msg : str, optional + Message to give on raising a SkipTest exception. Default is None. + + Returns + ------- + decorator : function + Decorator which, when applied to a function, causes SkipTest + to be raised when `skip_condition` is True, and the function + to be called normally otherwise. + + Notes + ----- + Undecorated functions are returned and that may lead to some lost + information. Note that this function differ from the pytest fixture + ``pytest.mark.skipif``. The latter marks test functions on import and the + skip is handled during collection, hence it cannot be used for non-test + functions, nor does it handle callable conditions. + + """ + def skip_decorator(f): + # Local import to avoid a hard pytest dependency and only incur the + # import time overhead at actual test-time. + import inspect + import pytest + + if msg is None: + out = 'Test skipped due to test condition' + else: + out = msg + + # Allow for both boolean or callable skip conditions. + if isinstance(skip_condition, collections.Callable): + skip_val = lambda: skip_condition() + else: + skip_val = lambda: skip_condition + + # We need to define *two* skippers because Python doesn't allow both + # return with value and yield inside the same function. + def get_msg(func,msg=None): + """Skip message with information about function being skipped.""" + if msg is None: + out = 'Test skipped due to test condition' + else: + out = msg + return "Skipping test: %s: %s" % (func.__name__, out) + + def skipper_func(*args, **kwargs): + """Skipper for normal test functions.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + return f(*args, **kwargs) + + def skipper_gen(*args, **kwargs): + """Skipper for test generators.""" + if skip_val(): + raise SkipTest(get_msg(f, msg)) + else: + for x in f(*args, **kwargs): + yield x + + # Choose the right skipper to use when building the actual decorator. + if inspect.isgeneratorfunction(f): + skipper = skipper_gen + else: + skipper = skipper_func + return skipper + + return skip_decorator + + +def knownfailureif(fail_condition, msg=None): + """ + Make function raise KnownFailureException exception if given condition is true. + + If the condition is a callable, it is used at runtime to dynamically + make the decision. This is useful for tests that may require costly + imports, to delay the cost until the test suite is actually executed. + + Parameters + ---------- + fail_condition : bool or callable + Flag to determine whether to mark the decorated test as a known + failure (if True) or not (if False). + msg : str, optional + Message to give on raising a KnownFailureException exception. + Default is None. + + Returns + ------- + decorator : function + Decorator, which, when applied to a function, causes + KnownFailureException to be raised when `fail_condition` is True, + and the function to be called normally otherwise. + + Notes + ----- + The decorator itself is not decorated in the pytest case unlike for nose. + + """ + import pytest + from .utils import KnownFailureException + + if msg is None: + msg = 'Test skipped due to known failure' + + # Allow for both boolean or callable known failure conditions. + if isinstance(fail_condition, collections.Callable): + fail_val = lambda: fail_condition() + else: + fail_val = lambda: fail_condition + + def knownfail_decorator(f): + + def knownfailer(*args, **kwargs): + if fail_val(): + raise KnownFailureException(msg) + return f(*args, **kwargs) + + return knownfailer + + return knownfail_decorator + + +def deprecated(conditional=True): + """ + Filter deprecation warnings while running the test suite. + + This decorator can be used to filter DeprecationWarning's, to avoid + printing them during the test suite run, while checking that the test + actually raises a DeprecationWarning. + + Parameters + ---------- + conditional : bool or callable, optional + Flag to determine whether to mark test as deprecated or not. If the + condition is a callable, it is used at runtime to dynamically make the + decision. Default is True. + + Returns + ------- + decorator : function + The `deprecated` decorator itself. + + Notes + ----- + .. versionadded:: 1.4.0 + + """ + def deprecate_decorator(f): + + def _deprecated_imp(*args, **kwargs): + # Poor man's replacement for the with statement + with assert_warns(DeprecationWarning): + f(*args, **kwargs) + + if isinstance(conditional, collections.Callable): + cond = conditional() + else: + cond = conditional + if cond: + return _deprecated_imp + else: + return f + return deprecate_decorator + + +def parametrize(vars, input): + """ + Pytest compatibility class. This implements the simplest level of + pytest.mark.parametrize for use in nose as an aid in making the transition + to pytest. It achieves that by adding a dummy var parameter and ignoring + the doc_func parameter of the base class. It does not support variable + substitution by name, nor does it support nesting or classes. See the + pytest documentation for usage. + + """ + import pytest + + return pytest.mark.parametrize(vars, input) diff --git a/numpy/testing/pytest_tools/noseclasses.py b/numpy/testing/pytest_tools/noseclasses.py new file mode 100644 index 000000000..2486029fe --- /dev/null +++ b/numpy/testing/pytest_tools/noseclasses.py @@ -0,0 +1,342 @@ +# These classes implement a doctest runner plugin for nose, a "known failure" +# error class, and a customized TestProgram for NumPy. + +# Because this module imports nose directly, it should not +# be used except by nosetester.py to avoid a general NumPy +# dependency on nose. +from __future__ import division, absolute_import, print_function + +import os +import doctest +import inspect + +import numpy +import pytest +from .utils import KnownFailureException, SkipTest +import _pytest.runner +import _pytest.skipping + + +class NpyPlugin(object): + + def pytest_runtest_makereport(self, call): + if call.excinfo: + if call.excinfo.errisinstance(KnownFailureException): + #let's substitute the excinfo with a pytest.xfail one + call2 = call.__class__( + lambda: _pytest.runner.skip(str(call.excinfo.value)), + call.when) + print() + print() + print(call.excinfo._getreprcrash()) + print() + print(call.excinfo) + print() + print(call2.excinfo) + print() + call.excinfo = call2.excinfo + if call.excinfo.errisinstance(SkipTest): + #let's substitute the excinfo with a pytest.skip one + call2 = call.__class__( + lambda: _pytest.runner.skip(str(call.excinfo.value)), + call.when) + call.excinfo = call2.excinfo + + +if False: + from nose.plugins import doctests as npd + from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin + from nose.plugins.base import Plugin + from nose.util import src + from .nosetester import get_package_name + # Some of the classes in this module begin with 'Numpy' to clearly distinguish + # them from the plethora of very similar names from nose/unittest/doctest + + #----------------------------------------------------------------------------- + # Modified version of the one in the stdlib, that fixes a python bug (doctests + # not found in extension modules, http://bugs.python.org/issue3158) + class NumpyDocTestFinder(doctest.DocTestFinder): + + def _from_module(self, module, object): + """ + Return true if the given object is defined in the given + module. + """ + if module is None: + return True + elif inspect.isfunction(object): + return module.__dict__ is object.__globals__ + elif inspect.isbuiltin(object): + return module.__name__ == object.__module__ + elif inspect.isclass(object): + return module.__name__ == object.__module__ + elif inspect.ismethod(object): + # This one may be a bug in cython that fails to correctly set the + # __module__ attribute of methods, but since the same error is easy + # to make by extension code writers, having this safety in place + # isn't such a bad idea + return module.__name__ == object.__self__.__class__.__module__ + elif inspect.getmodule(object) is not None: + return module is inspect.getmodule(object) + elif hasattr(object, '__module__'): + return module.__name__ == object.__module__ + elif isinstance(object, property): + return True # [XX] no way not be sure. + else: + raise ValueError("object must be a class or function") + + def _find(self, tests, obj, name, module, source_lines, globs, seen): + """ + Find tests for the given object and any contained objects, and + add them to `tests`. + """ + + doctest.DocTestFinder._find(self, tests, obj, name, module, + source_lines, globs, seen) + + # Below we re-run pieces of the above method with manual modifications, + # because the original code is buggy and fails to correctly identify + # doctests in extension modules. + + # Local shorthands + from inspect import ( + isroutine, isclass, ismodule, isfunction, ismethod + ) + + # Look for tests in a module's contained objects. + if ismodule(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + valname1 = '%s.%s' % (name, valname) + if ( (isroutine(val) or isclass(val)) + and self._from_module(module, val)): + + self._find(tests, val, valname1, module, source_lines, + globs, seen) + + # Look for tests in a class's contained objects. + if isclass(obj) and self._recurse: + for valname, val in obj.__dict__.items(): + # Special handling for staticmethod/classmethod. + if isinstance(val, staticmethod): + val = getattr(obj, valname) + if isinstance(val, classmethod): + val = getattr(obj, valname).__func__ + + # Recurse to methods, properties, and nested classes. + if ((isfunction(val) or isclass(val) or + ismethod(val) or isinstance(val, property)) and + self._from_module(module, val)): + valname = '%s.%s' % (name, valname) + self._find(tests, val, valname, module, source_lines, + globs, seen) + + + # second-chance checker; if the default comparison doesn't + # pass, then see if the expected output string contains flags that + # tell us to ignore the output + class NumpyOutputChecker(doctest.OutputChecker): + def check_output(self, want, got, optionflags): + ret = doctest.OutputChecker.check_output(self, want, got, + optionflags) + if not ret: + if "#random" in want: + return True + + # it would be useful to normalize endianness so that + # bigendian machines don't fail all the tests (and there are + # actually some bigendian examples in the doctests). Let's try + # making them all little endian + got = got.replace("'>", "'<") + want = want.replace("'>", "'<") + + # try to normalize out 32 and 64 bit default int sizes + for sz in [4, 8]: + got = got.replace("'<i%d'" % sz, "int") + want = want.replace("'<i%d'" % sz, "int") + + ret = doctest.OutputChecker.check_output(self, want, + got, optionflags) + + return ret + + + # Subclass nose.plugins.doctests.DocTestCase to work around a bug in + # its constructor that blocks non-default arguments from being passed + # down into doctest.DocTestCase + class NumpyDocTestCase(npd.DocTestCase): + def __init__(self, test, optionflags=0, setUp=None, tearDown=None, + checker=None, obj=None, result_var='_'): + self._result_var = result_var + self._nose_obj = obj + doctest.DocTestCase.__init__(self, test, + optionflags=optionflags, + setUp=setUp, tearDown=tearDown, + checker=checker) + + + print_state = numpy.get_printoptions() + + class NumpyDoctest(npd.Doctest): + name = 'numpydoctest' # call nosetests with --with-numpydoctest + score = 1000 # load late, after doctest builtin + + # always use whitespace and ellipsis options for doctests + doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS + + # files that should be ignored for doctests + doctest_ignore = ['generate_numpy_api.py', + 'setup.py'] + + # Custom classes; class variables to allow subclassing + doctest_case_class = NumpyDocTestCase + out_check_class = NumpyOutputChecker + test_finder_class = NumpyDocTestFinder + + # Don't use the standard doctest option handler; hard-code the option values + def options(self, parser, env=os.environ): + Plugin.options(self, parser, env) + # Test doctests in 'test' files / directories. Standard plugin default + # is False + self.doctest_tests = True + # Variable name; if defined, doctest results stored in this variable in + # the top-level namespace. None is the standard default + self.doctest_result_var = None + + def configure(self, options, config): + # parent method sets enabled flag from command line --with-numpydoctest + Plugin.configure(self, options, config) + self.finder = self.test_finder_class() + self.parser = doctest.DocTestParser() + if self.enabled: + # Pull standard doctest out of plugin list; there's no reason to run + # both. In practice the Unplugger plugin above would cover us when + # run from a standard numpy.test() call; this is just in case + # someone wants to run our plugin outside the numpy.test() machinery + config.plugins.plugins = [p for p in config.plugins.plugins + if p.name != 'doctest'] + + def set_test_context(self, test): + """ Configure `test` object to set test context + + We set the numpy / scipy standard doctest namespace + + Parameters + ---------- + test : test object + with ``globs`` dictionary defining namespace + + Returns + ------- + None + + Notes + ----- + `test` object modified in place + """ + # set the namespace for tests + pkg_name = get_package_name(os.path.dirname(test.filename)) + + # Each doctest should execute in an environment equivalent to + # starting Python and executing "import numpy as np", and, + # for SciPy packages, an additional import of the local + # package (so that scipy.linalg.basic.py's doctests have an + # implicit "from scipy import linalg" as well. + # + # Note: __file__ allows the doctest in NoseTester to run + # without producing an error + test.globs = {'__builtins__':__builtins__, + '__file__':'__main__', + '__name__':'__main__', + 'np':numpy} + # add appropriate scipy import for SciPy tests + if 'scipy' in pkg_name: + p = pkg_name.split('.') + p2 = p[-1] + test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2]) + + # Override test loading to customize test context (with set_test_context + # method), set standard docstring options, and install our own test output + # checker + def loadTestsFromModule(self, module): + if not self.matches(module.__name__): + npd.log.debug("Doctest doesn't want module %s", module) + return + try: + tests = self.finder.find(module) + except AttributeError: + # nose allows module.__test__ = False; doctest does not and + # throws AttributeError + return + if not tests: + return + tests.sort() + module_file = src(module.__file__) + for test in tests: + if not test.examples: + continue + if not test.filename: + test.filename = module_file + # Set test namespace; test altered in place + self.set_test_context(test) + yield self.doctest_case_class(test, + optionflags=self.doctest_optflags, + checker=self.out_check_class(), + result_var=self.doctest_result_var) + + # Add an afterContext method to nose.plugins.doctests.Doctest in order + # to restore print options to the original state after each doctest + def afterContext(self): + numpy.set_printoptions(**print_state) + + # Ignore NumPy-specific build files that shouldn't be searched for tests + def wantFile(self, file): + bn = os.path.basename(file) + if bn in self.doctest_ignore: + return False + return npd.Doctest.wantFile(self, file) + + + class Unplugger(object): + """ Nose plugin to remove named plugin late in loading + + By default it removes the "doctest" plugin. + """ + name = 'unplugger' + enabled = True # always enabled + score = 4000 # load late in order to be after builtins + + def __init__(self, to_unplug='doctest'): + self.to_unplug = to_unplug + + def options(self, parser, env): + pass + + def configure(self, options, config): + # Pull named plugin out of plugins list + config.plugins.plugins = [p for p in config.plugins.plugins + if p.name != self.to_unplug] + + + + # Class allows us to save the results of the tests in runTests - see runTests + # method docstring for details + class NumpyTestProgram(nose.core.TestProgram): + def runTests(self): + """Run Tests. Returns true on success, false on failure, and + sets self.success to the same value. + + Because nose currently discards the test result object, but we need + to return it to the user, override TestProgram.runTests to retain + the result + """ + if self.testRunner is None: + self.testRunner = nose.core.TextTestRunner(stream=self.config.stream, + verbosity=self.config.verbosity, + config=self.config) + plug_runner = self.config.plugins.prepareTestRunner(self.testRunner) + if plug_runner is not None: + self.testRunner = plug_runner + self.result = self.testRunner.run(self.test) + self.success = self.result.wasSuccessful() + return self.success + diff --git a/numpy/testing/pytest_tools/nosetester.py b/numpy/testing/pytest_tools/nosetester.py new file mode 100644 index 000000000..46e2b9b8c --- /dev/null +++ b/numpy/testing/pytest_tools/nosetester.py @@ -0,0 +1,566 @@ +""" +Nose test running. + +This module implements ``test()`` and ``bench()`` functions for NumPy modules. + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import warnings +from numpy.compat import basestring +import numpy as np + +from .utils import import_nose, suppress_warnings + + +__all__ = ['get_package_name', 'run_module_suite', 'NoseTester', + '_numpy_tester', 'get_package_name', 'import_nose', + 'suppress_warnings'] + + +def get_package_name(filepath): + """ + Given a path where a package is installed, determine its name. + + Parameters + ---------- + filepath : str + Path to a file. If the determination fails, "numpy" is returned. + + Examples + -------- + >>> np.testing.nosetester.get_package_name('nonsense') + 'numpy' + + """ + + fullpath = filepath[:] + pkg_name = [] + while 'site-packages' in filepath or 'dist-packages' in filepath: + filepath, p2 = os.path.split(filepath) + if p2 in ('site-packages', 'dist-packages'): + break + pkg_name.append(p2) + + # if package name determination failed, just default to numpy/scipy + if not pkg_name: + if 'scipy' in fullpath: + return 'scipy' + else: + return 'numpy' + + # otherwise, reverse to get correct order and return + pkg_name.reverse() + + # don't include the outer egg directory + if pkg_name[0].endswith('.egg'): + pkg_name.pop(0) + + return '.'.join(pkg_name) + + +def run_module_suite(file_to_run=None, argv=None): + """ + Run a test module. + + Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from + the command line. This version is for pytest rather than nose. + + Parameters + ---------- + file_to_run : str, optional + Path to test module, or None. + By default, run the module from which this function is called. + argv : list of strings + Arguments to be passed to the pytest runner. ``argv[0]`` is + ignored. All command line arguments accepted by ``pytest`` + will work. If it is the default value None, sys.argv is used. + + .. versionadded:: 1.14.0 + + Examples + -------- + Adding the following:: + + if __name__ == "__main__" : + run_module_suite(argv=sys.argv) + + at the end of a test module will run the tests when that module is + called in the python interpreter. + + Alternatively, calling:: + + >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") + + from an interpreter will run all the test routine in 'test_matlib.py'. + """ + import pytest + if file_to_run is None: + f = sys._getframe(1) + file_to_run = f.f_locals.get('__file__', None) + if file_to_run is None: + raise AssertionError + + if argv is None: + argv = sys.argv[1:] + [file_to_run] + else: + argv = argv + [file_to_run] + + pytest.main(argv) + +if False: + # disable run_module_suite and NoseTester + # until later + class NoseTester(object): + """ + Nose test runner. + + This class is made available as numpy.testing.Tester, and a test function + is typically added to a package's __init__.py like so:: + + from numpy.testing import Tester + test = Tester().test + + Calling this test function finds and runs all tests associated with the + package and all its sub-packages. + + Attributes + ---------- + package_path : str + Full path to the package to test. + package_name : str + Name of the package to test. + + Parameters + ---------- + package : module, str or None, optional + The package to test. If a string, this should be the full path to + the package. If None (default), `package` is set to the module from + which `NoseTester` is initialized. + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + - "develop" : equals ``(Warning,)`` + - "release" : equals ``()``, don't raise on any warnings. + + Default is "release". + depth : int, optional + If `package` is None, then this can be used to initialize from the + module of the caller of (the caller of (...)) the code that + initializes `NoseTester`. Default of 0 means the module of the + immediate caller; higher values are useful for utility routines that + want to initialize `NoseTester` objects on behalf of other code. + + """ + def __init__(self, package=None, raise_warnings="release", depth=0): + # Back-compat: 'None' used to mean either "release" or "develop" + # depending on whether this was a release or develop version of + # numpy. Those semantics were fine for testing numpy, but not so + # helpful for downstream projects like scipy that use + # numpy.testing. (They want to set this based on whether *they* are a + # release or develop version, not whether numpy is.) So we continue to + # accept 'None' for back-compat, but it's now just an alias for the + # default "release". + if raise_warnings is None: + raise_warnings = "release" + + package_name = None + if package is None: + f = sys._getframe(1 + depth) + package_path = f.f_locals.get('__file__', None) + if package_path is None: + raise AssertionError + package_path = os.path.dirname(package_path) + package_name = f.f_locals.get('__name__', None) + elif isinstance(package, type(os)): + package_path = os.path.dirname(package.__file__) + package_name = getattr(package, '__name__', None) + else: + package_path = str(package) + + self.package_path = package_path + + # Find the package name under test; this name is used to limit coverage + # reporting (if enabled). + if package_name is None: + package_name = get_package_name(package_path) + self.package_name = package_name + + # Set to "release" in constructor in maintenance branches. + self.raise_warnings = raise_warnings + + def _test_argv(self, label, verbose, extra_argv): + ''' Generate argv for nosetests command + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + see ``test`` docstring + verbose : int, optional + Integer in range 1..3, bigger means more verbose. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + argv : list + command line arguments that will be passed to nose + ''' + argv = [__file__, self.package_path, '-s'] + if label and label != 'full': + if not isinstance(label, basestring): + raise TypeError('Selection label should be a string') + if label == 'fast': + label = 'not slow' + argv += ['-A', label] + + argv += [['-q'], [''], ['-v']][min(verbose - 1, 2)] + + # FIXME is this true of pytest + # When installing with setuptools, and also in some other cases, the + # test_*.py files end up marked +x executable. Nose, by default, does + # not run files marked with +x as they might be scripts. However, in + # our case nose only looks for test_*.py files under the package + # directory, which should be safe. + # argv += ['--exe'] + if extra_argv: + argv += extra_argv + return argv + + def _show_system_info(self): + import pytest + import numpy + + print("NumPy version %s" % numpy.__version__) + relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous + print("NumPy relaxed strides checking option:", relaxed_strides) + npdir = os.path.dirname(numpy.__file__) + print("NumPy is installed in %s" % npdir) + + if 'scipy' in self.package_name: + import scipy + print("SciPy version %s" % scipy.__version__) + spdir = os.path.dirname(scipy.__file__) + print("SciPy is installed in %s" % spdir) + + pyversion = sys.version.replace('\n', '') + print("Python version %s" % pyversion) + print("pytest version %d.%d.%d" % pytest.__versioninfo__) + + def _get_custom_doctester(self): + """ Return instantiated plugin for doctests + + Allows subclassing of this class to override doctester + + A return value of None means use the nose builtin doctest plugin + """ + from .noseclasses import NumpyDoctest + return NumpyDoctest() + + def prepare_test_args(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, timer=False): + """ + Run tests for module using nose. + + This method does the heavy lifting for the `test` method. It takes all + the same arguments, for details see `test`. + + See Also + -------- + test + + """ + # fail with nice error message if nose is not present + import_nose() + # compile argv + argv = self._test_argv(label, verbose, extra_argv) + # our way of doing coverage + if coverage: + argv += ['--cover-package=%s' % self.package_name, '--with-coverage', + '--cover-tests', '--cover-erase'] + + if timer: + if timer is True: + argv += ['--with-timer'] + elif isinstance(timer, int): + argv += ['--with-timer', '--timer-top-n', str(timer)] + + # construct list of plugins + import nose.plugins.builtin + from nose.plugins import EntryPointPluginManager + from .noseclasses import KnownFailurePlugin, Unplugger + plugins = [KnownFailurePlugin()] + plugins += [p() for p in nose.plugins.builtin.plugins] + try: + # External plugins (like nose-timer) + entrypoint_manager = EntryPointPluginManager() + entrypoint_manager.loadPlugins() + plugins += [p for p in entrypoint_manager.plugins] + except ImportError: + # Relies on pkg_resources, not a hard dependency + pass + + # add doctesting if required + doctest_argv = '--with-doctest' in argv + if doctests == False and doctest_argv: + doctests = True + plug = self._get_custom_doctester() + if plug is None: + # use standard doctesting + if doctests and not doctest_argv: + argv += ['--with-doctest'] + else: # custom doctesting + if doctest_argv: # in fact the unplugger would take care of this + argv.remove('--with-doctest') + plugins += [Unplugger('doctest'), plug] + if doctests: + argv += ['--with-' + plug.name] + return argv, plugins + + def test(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, raise_warnings=None, + timer=False): + """ + Run tests for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the tests to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow tests as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Verbosity value for test outputs, in the range 1..3. Default is 1. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + doctests : bool, optional + If True, run doctests in module. Default is False. + coverage : bool, optional + If True, report coverage of NumPy code. Default is False. + (This requires the `coverage module: + <http://nedbatchelder.com/code/modules/coverage.html>`_). + raise_warnings : None, str or sequence of warnings, optional + This specifies which warnings to configure as 'raise' instead + of being shown once during the test execution. Valid strings are: + + - "develop" : equals ``(Warning,)`` + - "release" : equals ``()``, don't raise on any warnings. + + The default is to use the class initialization value. + timer : bool or int, optional + Timing of individual tests with ``nose-timer`` (which needs to be + installed). If True, time tests and report on all of them. + If an integer (say ``N``), report timing results for ``N`` slowest + tests. + + Returns + ------- + result : object + Returns the result of running the tests as a + ``nose.result.TextTestResult`` object. + + Notes + ----- + Each NumPy module exposes `test` in its namespace to run all tests for it. + For example, to run all tests for numpy.lib: + + >>> np.lib.test() #doctest: +SKIP + + Examples + -------- + >>> result = np.lib.test() #doctest: +SKIP + Running unit tests for numpy.lib + ... + Ran 976 tests in 3.933s + + OK + + >>> result.errors #doctest: +SKIP + [] + >>> result.knownfail #doctest: +SKIP + [] + """ + + # cap verbosity at 3 because nose becomes *very* verbose beyond that + verbose = min(verbose, 3) + + from . import utils + utils.verbose = verbose + + argv, plugins = self.prepare_test_args( + label, verbose, extra_argv, doctests, coverage, timer) + + if doctests: + print("Running unit tests and doctests for %s" % self.package_name) + else: + print("Running unit tests for %s" % self.package_name) + + self._show_system_info() + + # reset doctest state on every run + import doctest + doctest.master = None + + if raise_warnings is None: + raise_warnings = self.raise_warnings + + _warn_opts = dict(develop=(Warning,), + release=()) + if isinstance(raise_warnings, basestring): + raise_warnings = _warn_opts[raise_warnings] + + with suppress_warnings("location") as sup: + # Reset the warning filters to the default state, + # so that running the tests is more repeatable. + warnings.resetwarnings() + # Set all warnings to 'warn', this is because the default 'once' + # has the bad property of possibly shadowing later warnings. + warnings.filterwarnings('always') + # Force the requested warnings to raise + for warningtype in raise_warnings: + warnings.filterwarnings('error', category=warningtype) + # Filter out annoying import messages. + sup.filter(message='Not importing directory') + sup.filter(message="numpy.dtype size changed") + sup.filter(message="numpy.ufunc size changed") + sup.filter(category=np.ModuleDeprecationWarning) + # Filter out boolean '-' deprecation messages. This allows + # older versions of scipy to test without a flood of messages. + sup.filter(message=".*boolean negative.*") + sup.filter(message=".*boolean subtract.*") + # Filter out distutils cpu warnings (could be localized to + # distutils tests). ASV has problems with top level import, + # so fetch module for suppression here. + with warnings.catch_warnings(): + warnings.simplefilter("always") + from ...distutils import cpuinfo + sup.filter(category=UserWarning, module=cpuinfo) + # See #7949: Filter out deprecation warnings due to the -3 flag to + # python 2 + if sys.version_info.major == 2 and sys.py3kwarning: + # This is very specific, so using the fragile module filter + # is fine + import threading + sup.filter(DeprecationWarning, + r"sys\.exc_clear\(\) not supported in 3\.x", + module=threading) + sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__") + sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__") + sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x") + sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x") + sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x") + # Filter out some deprecation warnings inside nose 1.3.7 when run + # on python 3.5b2. See + # https://github.com/nose-devs/nose/issues/929 + # Note: it is hard to filter based on module for sup (lineno could + # be implemented). + warnings.filterwarnings("ignore", message=".*getargspec.*", + category=DeprecationWarning, + module=r"nose\.") + + from .noseclasses import NumpyTestProgram + + t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins) + + return t.result + + def bench(self, label='fast', verbose=1, extra_argv=None): + """ + Run benchmarks for module using nose. + + Parameters + ---------- + label : {'fast', 'full', '', attribute identifier}, optional + Identifies the benchmarks to run. This can be a string to pass to + the nosetests executable with the '-A' option, or one of several + special values. Special values are: + * 'fast' - the default - which corresponds to the ``nosetests -A`` + option of 'not slow'. + * 'full' - fast (as above) and slow benchmarks as in the + 'no -A' option to nosetests - this is the same as ''. + * None or '' - run all tests. + attribute_identifier - string passed directly to nosetests as '-A'. + verbose : int, optional + Integer in range 1..3, bigger means more verbose. + extra_argv : list, optional + List with any extra arguments to pass to nosetests. + + Returns + ------- + success : bool + Returns True if running the benchmarks works, False if an error + occurred. + + Notes + ----- + Benchmarks are like tests, but have names starting with "bench" instead + of "test", and can be found under the "benchmarks" sub-directory of the + module. + + Each NumPy module exposes `bench` in its namespace to run all benchmarks + for it. + + Examples + -------- + >>> success = np.lib.bench() #doctest: +SKIP + Running benchmarks for numpy.lib + ... + using 562341 items: + unique: + 0.11 + unique1d: + 0.11 + ratio: 1.0 + nUnique: 56230 == 56230 + ... + OK + + >>> success #doctest: +SKIP + True + + """ + + print("Running benchmarks for %s" % self.package_name) + self._show_system_info() + + argv = self._test_argv(label, verbose, extra_argv) + argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep] + + # import nose or make informative error + nose = import_nose() + + # get plugin to disable doctests + from .noseclasses import Unplugger + add_plugins = [Unplugger('doctest')] + + return nose.run(argv=argv, addplugins=add_plugins) +else: + + class NoseTester(object): + def __init__(self, package=None, raise_warnings="release", depth=0): + pass + + def test(self, label='fast', verbose=1, extra_argv=None, + doctests=False, coverage=False, raise_warnings=None, + timer=False): + pass + + def bench(self, label='fast', verbose=1, extra_argv=None): + pass + + +def _numpy_tester(): + if hasattr(np, "__version__") and ".dev0" in np.__version__: + mode = "develop" + else: + mode = "release" + return NoseTester(raise_warnings=mode, depth=1) diff --git a/numpy/testing/pytest_tools/utils.py b/numpy/testing/pytest_tools/utils.py new file mode 100644 index 000000000..19982ec54 --- /dev/null +++ b/numpy/testing/pytest_tools/utils.py @@ -0,0 +1,2275 @@ +""" +Utility function to facilitate testing. + +""" +from __future__ import division, absolute_import, print_function + +import os +import sys +import re +import operator +import warnings +from functools import partial, wraps +import shutil +import contextlib +from tempfile import mkdtemp, mkstemp + +from numpy.core import( + float32, empty, arange, array_repr, ndarray, isnat, array) +from numpy.lib.utils import deprecate + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +__all__ = [ + 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', + 'assert_array_equal', 'assert_array_less', 'assert_string_equal', + 'assert_array_almost_equal', 'assert_raises', 'build_err_msg', + 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal', + 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure', + 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex', + 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings', + 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings', + 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', + 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', + '_assert_valid_refcount', '_gen_alignment_data', + ] + + +class KnownFailureException(Exception): + """Raise this exception to mark a test as a known failing test. + + """ + def __new__(cls, *args, **kwargs): + # import _pytest here to avoid hard dependency + import _pytest + return _pytest.skipping.xfail(*args, **kwargs) + + +class SkipTest(Exception): + """Raise this exception to mark a skipped test. + + """ + def __new__(cls, *args, **kwargs): + # import _pytest here to avoid hard dependency + import _pytest + return _pytest.runner.Skipped(*args, **kwargs) + + +class IgnoreException(Exception): + """Ignoring this exception due to disabled feature + + This exception seems unused and can be removed. + + """ + pass + + +KnownFailureTest = KnownFailureException # backwards compat + +verbose = 0 + +IS_PYPY = '__pypy__' in sys.modules +HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None + + +def import_nose(): + """ Not wanted for pytest, make it a dummy function + + """ + pass + + +def assert_(val, msg=''): + """ + Assert that works in release mode. + Accepts callable msg to allow deferring evaluation until failure. + + The Python built-in ``assert`` does not work when executing code in + optimized mode (the ``-O`` flag) - no byte-code is generated for it. + + For documentation on usage, refer to the Python documentation. + + """ + __tracebackhide__ = True # Hide traceback for py.test + if not val: + try: + smsg = msg() + except TypeError: + smsg = msg + raise AssertionError(smsg) + + +def gisnan(x): + """like isnan, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isnan and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isnan + st = isnan(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isnan not supported for this type") + return st + + +def gisfinite(x): + """like isfinite, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isfinite and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isfinite, errstate + with errstate(invalid='ignore'): + st = isfinite(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isfinite not supported for this type") + return st + + +def gisinf(x): + """like isinf, but always raise an error if type not supported instead of + returning a TypeError object. + + Notes + ----- + isinf and other ufunc sometimes return a NotImplementedType object instead + of raising any exception. This function is a wrapper to make sure an + exception is always raised. + + This should be removed once this problem is solved at the Ufunc level.""" + from numpy.core import isinf, errstate + with errstate(invalid='ignore'): + st = isinf(x) + if isinstance(st, type(NotImplemented)): + raise TypeError("isinf not supported for this type") + return st + + +@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. " + "Use numpy.random.rand instead.") +def rand(*args): + """Returns an array of random numbers with the given shape. + + This only uses the standard library, so it is useful for testing purposes. + """ + import random + from numpy.core import zeros, float64 + results = zeros(args, float64) + f = results.flat + for i in range(len(f)): + f[i] = random.random() + return results + + +if os.name == 'nt': + # Code "stolen" from enthought/debug/memusage.py + def GetPerformanceAttributes(object, counter, instance=None, + inum=-1, format=None, machine=None): + # NOTE: Many counters require 2 samples to give accurate results, + # including "% Processor Time" (as by definition, at any instant, a + # thread's CPU usage is either 0 or 100). To read counters like this, + # you should copy this function, but keep the counter open, and call + # CollectQueryData() each time you need to know. + # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp + # My older explanation for this was that the "AddCounter" process forced + # the CPU to 100%, but the above makes more sense :) + import win32pdh + if format is None: + format = win32pdh.PDH_FMT_LONG + path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter)) + hq = win32pdh.OpenQuery() + try: + hc = win32pdh.AddCounter(hq, path) + try: + win32pdh.CollectQueryData(hq) + type, val = win32pdh.GetFormattedCounterValue(hc, format) + return val + finally: + win32pdh.RemoveCounter(hc) + finally: + win32pdh.CloseQuery(hq) + + def memusage(processName="python", instance=0): + # from win32pdhutil, part of the win32all package + import win32pdh + return GetPerformanceAttributes("Process", "Virtual Bytes", + processName, instance, + win32pdh.PDH_FMT_LONG, None) +elif sys.platform[:5] == 'linux': + + def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())): + """ + Return virtual memory size in bytes of the running python. + + """ + try: + f = open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[22]) + except Exception: + return +else: + def memusage(): + """ + Return memory usage of running python. [Not implemented] + + """ + raise NotImplementedError + + +if sys.platform[:5] == 'linux': + def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()), + _load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + try: + f = open(_proc_pid_stat, 'r') + l = f.readline().split(' ') + f.close() + return int(l[13]) + except Exception: + return int(100*(time.time()-_load_time[0])) +else: + # os.getpid is not in all platforms available. + # Using time is safe but inaccurate, especially when process + # was suspended or sleeping. + def jiffies(_load_time=[]): + """ + Return number of jiffies elapsed. + + Return number of jiffies (1/100ths of a second) that this + process has been scheduled in user mode. See man 5 proc. + + """ + import time + if not _load_time: + _load_time.append(time.time()) + return int(100*(time.time()-_load_time[0])) + + +def build_err_msg(arrays, err_msg, header='Items are not equal:', + verbose=True, names=('ACTUAL', 'DESIRED'), precision=8): + msg = ['\n' + header] + if err_msg: + if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header): + msg = [msg[0] + ' ' + err_msg] + else: + msg.append(err_msg) + if verbose: + for i, a in enumerate(arrays): + + if isinstance(a, ndarray): + # precision argument is only needed if the objects are ndarrays + r_func = partial(array_repr, precision=precision) + else: + r_func = repr + + try: + r = r_func(a) + except Exception as exc: + r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc) + if r.count('\n') > 3: + r = '\n'.join(r.splitlines()[:3]) + r += '...' + msg.append(' %s: %s' % (names[i], r)) + return '\n'.join(msg) + + +def assert_equal(actual, desired, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal. + + Given two objects (scalars, lists, tuples, dictionaries or numpy arrays), + check that all elements of these objects are equal. An exception is raised + at the first conflicting values. + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal. + + Examples + -------- + >>> np.testing.assert_equal([4,5], [4,6]) + ... + <type 'exceptions.AssertionError'>: + Items are not equal: + item=1 + ACTUAL: 5 + DESIRED: 6 + + """ + __tracebackhide__ = True # Hide traceback for py.test + if isinstance(desired, dict): + if not isinstance(actual, dict): + raise AssertionError(repr(type(actual))) + assert_equal(len(actual), len(desired), err_msg, verbose) + for k, i in desired.items(): + if k not in actual: + raise AssertionError(repr(k)) + assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose) + return + if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)): + assert_equal(len(actual), len(desired), err_msg, verbose) + for k in range(len(desired)): + assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose) + return + from numpy.core import ndarray, isscalar, signbit + from numpy.lib import iscomplexobj, real, imag + if isinstance(actual, ndarray) or isinstance(desired, ndarray): + return assert_array_equal(actual, desired, err_msg, verbose) + msg = build_err_msg([actual, desired], err_msg, verbose=verbose) + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_equal(actualr, desiredr) + assert_equal(actuali, desiredi) + except AssertionError: + raise AssertionError(msg) + + # isscalar test to check cases such as [np.nan] != np.nan + if isscalar(desired) != isscalar(actual): + raise AssertionError(msg) + + # Inf/nan/negative zero handling + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + isdesnan = gisnan(desired) + isactnan = gisnan(actual) + if isdesnan or isactnan: + if not (isdesnan and isactnan): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + elif desired == 0 and actual == 0: + if not signbit(desired) == signbit(actual): + raise AssertionError(msg) + # If TypeError or ValueError raised while using isnan and co, just handle + # as before + except (TypeError, ValueError, NotImplementedError): + pass + + try: + # If both are NaT (and have the same dtype -- datetime or timedelta) + # they are considered equal. + if (isnat(desired) == isnat(actual) and + array(desired).dtype.type == array(actual).dtype.type): + return + else: + raise AssertionError(msg) + + # If TypeError or ValueError raised while using isnan and co, just handle + # as before + except (TypeError, ValueError, NotImplementedError): + pass + + # Explicitly use __eq__ for comparison, ticket #2552 + if not (desired == actual): + raise AssertionError(msg) + + +def print_assert_equal(test_string, actual, desired): + """ + Test if two objects are equal, and print an error message if test fails. + + The test is performed with ``actual == desired``. + + Parameters + ---------- + test_string : str + The message supplied to AssertionError. + actual : object + The object to test for equality against `desired`. + desired : object + The expected result. + + Examples + -------- + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) + >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) + Traceback (most recent call last): + ... + AssertionError: Test XYZ of func xyz failed + ACTUAL: + [0, 1] + DESIRED: + [0, 2] + + """ + __tracebackhide__ = True # Hide traceback for py.test + import pprint + + if not (actual == desired): + msg = StringIO() + msg.write(test_string) + msg.write(' failed\nACTUAL: \n') + pprint.pprint(actual, msg) + msg.write('DESIRED: \n') + pprint.pprint(desired, msg) + raise AssertionError(msg.getvalue()) + + +def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies that the elements of ``actual`` and ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation in `assert_array_almost_equal` did up to rounding + vagaries. An exception is raised at conflicting values. For ndarrays this + delegates to assert_array_almost_equal + + Parameters + ---------- + actual : array_like + The object to check. + desired : array_like + The expected object. + decimal : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> import numpy.testing as npt + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334) + >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10) + ... + <type 'exceptions.AssertionError'>: + Items are not equal: + ACTUAL: 2.3333333333333002 + DESIRED: 2.3333333399999998 + + >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]), + ... np.array([1.0,2.33333334]), decimal=9) + ... + <type 'exceptions.AssertionError'>: + Arrays are not almost equal + <BLANKLINE> + (mismatch 50.0%) + x: array([ 1. , 2.33333333]) + y: array([ 1. , 2.33333334]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import ndarray + from numpy.lib import iscomplexobj, real, imag + + # Handle complex numbers: separate into real/imag to handle + # nan/inf/negative zero correctly + # XXX: catch ValueError for subclasses of ndarray where iscomplex fail + try: + usecomplex = iscomplexobj(actual) or iscomplexobj(desired) + except ValueError: + usecomplex = False + + def _build_err_msg(): + header = ('Arrays are not almost equal to %d decimals' % decimal) + return build_err_msg([actual, desired], err_msg, verbose=verbose, + header=header) + + if usecomplex: + if iscomplexobj(actual): + actualr = real(actual) + actuali = imag(actual) + else: + actualr = actual + actuali = 0 + if iscomplexobj(desired): + desiredr = real(desired) + desiredi = imag(desired) + else: + desiredr = desired + desiredi = 0 + try: + assert_almost_equal(actualr, desiredr, decimal=decimal) + assert_almost_equal(actuali, desiredi, decimal=decimal) + except AssertionError: + raise AssertionError(_build_err_msg()) + + if isinstance(actual, (ndarray, tuple, list)) \ + or isinstance(desired, (ndarray, tuple, list)): + return assert_array_almost_equal(actual, desired, decimal, err_msg) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(_build_err_msg()) + else: + if not desired == actual: + raise AssertionError(_build_err_msg()) + return + except (NotImplementedError, TypeError): + pass + if abs(desired - actual) >= 1.5 * 10.0**(-decimal): + raise AssertionError(_build_err_msg()) + + +def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True): + """ + Raises an AssertionError if two items are not equal up to significant + digits. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + Given two numbers, check that they are approximately equal. + Approximately equal is defined as the number of significant digits + that agree. + + Parameters + ---------- + actual : scalar + The object to check. + desired : scalar + The expected object. + significant : int, optional + Desired precision, default is 7. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, + significant=8) + >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, + significant=8) + ... + <type 'exceptions.AssertionError'>: + Items are not equal to 8 significant digits: + ACTUAL: 1.234567e-021 + DESIRED: 1.2345672000000001e-021 + + the evaluated condition that raises the exception is + + >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1) + True + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + (actual, desired) = map(float, (actual, desired)) + if desired == actual: + return + # Normalized the numbers to be in range (-10.0,10.0) + # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual)))))) + with np.errstate(invalid='ignore'): + scale = 0.5*(np.abs(desired) + np.abs(actual)) + scale = np.power(10, np.floor(np.log10(scale))) + try: + sc_desired = desired/scale + except ZeroDivisionError: + sc_desired = 0.0 + try: + sc_actual = actual/scale + except ZeroDivisionError: + sc_actual = 0.0 + msg = build_err_msg([actual, desired], err_msg, + header='Items are not equal to %d significant digits:' % + significant, + verbose=verbose) + try: + # If one of desired/actual is not finite, handle it specially here: + # check that both are nan if any is a nan, and test for equality + # otherwise + if not (gisfinite(desired) and gisfinite(actual)): + if gisnan(desired) or gisnan(actual): + if not (gisnan(desired) and gisnan(actual)): + raise AssertionError(msg) + else: + if not desired == actual: + raise AssertionError(msg) + return + except (TypeError, NotImplementedError): + pass + if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)): + raise AssertionError(msg) + + +def assert_array_compare(comparison, x, y, err_msg='', verbose=True, + header='', precision=6, equal_nan=True, + equal_inf=True): + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import array, isnan, isinf, any, inf + x = array(x, copy=False, subok=True) + y = array(y, copy=False, subok=True) + + def isnumber(x): + return x.dtype.char in '?bhilqpBHILQPefdgFDG' + + def istime(x): + return x.dtype.char in "Mm" + + def chk_same_position(x_id, y_id, hasval='nan'): + """Handling nan/inf: check that x and y have the nan/inf at the same + locations.""" + try: + assert_array_equal(x_id, y_id) + except AssertionError: + msg = build_err_msg([x, y], + err_msg + '\nx and y %s location mismatch:' + % (hasval), verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + try: + cond = (x.shape == () or y.shape == ()) or x.shape == y.shape + if not cond: + msg = build_err_msg([x, y], + err_msg + + '\n(shapes %s, %s mismatch)' % (x.shape, + y.shape), + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise AssertionError(msg) + + if isnumber(x) and isnumber(y): + has_nan = has_inf = False + if equal_nan: + x_isnan, y_isnan = isnan(x), isnan(y) + # Validate that NaNs are in the same place + has_nan = any(x_isnan) or any(y_isnan) + if has_nan: + chk_same_position(x_isnan, y_isnan, hasval='nan') + + if equal_inf: + x_isinf, y_isinf = isinf(x), isinf(y) + # Validate that infinite values are in the same place + has_inf = any(x_isinf) or any(y_isinf) + if has_inf: + # Check +inf and -inf separately, since they are different + chk_same_position(x == +inf, y == +inf, hasval='+inf') + chk_same_position(x == -inf, y == -inf, hasval='-inf') + + if has_nan and has_inf: + x = x[~(x_isnan | x_isinf)] + y = y[~(y_isnan | y_isinf)] + elif has_nan: + x = x[~x_isnan] + y = y[~y_isnan] + elif has_inf: + x = x[~x_isinf] + y = y[~y_isinf] + + # Only do the comparison if actual values are left + if x.size == 0: + return + + elif istime(x) and istime(y): + # If one is datetime64 and the other timedelta64 there is no point + if equal_nan and x.dtype.type == y.dtype.type: + x_isnat, y_isnat = isnat(x), isnat(y) + + if any(x_isnat) or any(y_isnat): + chk_same_position(x_isnat, y_isnat, hasval="NaT") + + if any(x_isnat) or any(y_isnat): + x = x[~x_isnat] + y = y[~y_isnat] + + val = comparison(x, y) + + if isinstance(val, bool): + cond = val + reduced = [0] + else: + reduced = val.ravel() + cond = reduced.all() + reduced = reduced.tolist() + if not cond: + match = 100-100.0*reduced.count(1)/len(reduced) + msg = build_err_msg([x, y], + err_msg + + '\n(mismatch %s%%)' % (match,), + verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + if not cond: + raise AssertionError(msg) + except ValueError: + import traceback + efmt = traceback.format_exc() + header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header) + + msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header, + names=('x', 'y'), precision=precision) + raise ValueError(msg) + + +def assert_array_equal(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not equal. + + Given two array_like objects, check that the shape is equal and all + elements of these objects are equal. An exception is raised at + shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if + both objects have NaNs in the same positions. + + The usual caution for verifying equality with floating point numbers is + advised. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + The first assert does not raise an exception: + + >>> np.testing.assert_array_equal([1.0,2.33333,np.nan], + ... [np.exp(0),2.33333, np.nan]) + + Assert fails with numerical inprecision with floats: + + >>> np.testing.assert_array_equal([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan]) + ... + <type 'exceptions.ValueError'>: + AssertionError: + Arrays are not equal + <BLANKLINE> + (mismatch 50.0%) + x: array([ 1. , 3.14159265, NaN]) + y: array([ 1. , 3.14159265, NaN]) + + Use `assert_allclose` or one of the nulp (number of floating point values) + functions for these cases instead: + + >>> np.testing.assert_allclose([1.0,np.pi,np.nan], + ... [1, np.sqrt(np.pi)**2, np.nan], + ... rtol=1e-10, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__eq__, x, y, err_msg=err_msg, + verbose=verbose, header='Arrays are not equal') + + +def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + precision. + + .. note:: It is recommended to use one of `assert_allclose`, + `assert_array_almost_equal_nulp` or `assert_array_max_ulp` + instead of this function for more consistent floating point + comparisons. + + The test verifies identical shapes and that the elements of ``actual`` and + ``desired`` satisfy. + + ``abs(desired-actual) < 1.5 * 10**(-decimal)`` + + That is a looser test than originally documented, but agrees with what the + actual implementation did up to rounding vagaries. An exception is raised + at shape mismatch or conflicting values. In contrast to the standard usage + in numpy, NaNs are compared like numbers, no assertion is raised if both + objects have NaNs in the same positions. + + Parameters + ---------- + x : array_like + The actual object to check. + y : array_like + The desired, expected object. + decimal : int, optional + Desired precision, default is 6. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_allclose: Compare two array_like objects for equality with desired + relative and/or absolute precision. + assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal + + Examples + -------- + the first assert does not raise an exception + + >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan], + [1.0,2.333,np.nan]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33339,np.nan], decimal=5) + ... + <type 'exceptions.AssertionError'>: + AssertionError: + Arrays are not almost equal + <BLANKLINE> + (mismatch 50.0%) + x: array([ 1. , 2.33333, NaN]) + y: array([ 1. , 2.33339, NaN]) + + >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan], + ... [1.0,2.33333, 5], decimal=5) + <type 'exceptions.ValueError'>: + ValueError: + Arrays are not almost equal + x: array([ 1. , 2.33333, NaN]) + y: array([ 1. , 2.33333, 5. ]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + from numpy.core import around, number, float_, result_type, array + from numpy.core.numerictypes import issubdtype + from numpy.core.fromnumeric import any as npany + + def compare(x, y): + try: + if npany(gisinf(x)) or npany( gisinf(y)): + xinfid = gisinf(x) + yinfid = gisinf(y) + if not (xinfid == yinfid).all(): + return False + # if one item, x and y is +- inf + if x.size == y.size == 1: + return x == y + x = x[~xinfid] + y = y[~yinfid] + except (TypeError, NotImplementedError): + pass + + # make sure y is an inexact type to avoid abs(MIN_INT); will cause + # casting of x later. + dtype = result_type(y, 1.) + y = array(y, dtype=dtype, copy=False, subok=True) + z = abs(x - y) + + if not issubdtype(z.dtype, number): + z = z.astype(float_) # handle object arrays + + return z < 1.5 * 10.0**(-decimal) + + assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose, + header=('Arrays are not almost equal to %d decimals' % decimal), + precision=decimal) + + +def assert_array_less(x, y, err_msg='', verbose=True): + """ + Raises an AssertionError if two array_like objects are not ordered by less + than. + + Given two array_like objects, check that the shape is equal and all + elements of the first object are strictly smaller than those of the + second object. An exception is raised at shape mismatch or incorrectly + ordered values. Shape mismatch does not raise if an object has zero + dimension. In contrast to the standard usage in numpy, NaNs are + compared, no assertion is raised if both objects have NaNs in the same + positions. + + + + Parameters + ---------- + x : array_like + The smaller object to check. + y : array_like + The larger object to compare. + err_msg : string + The error message to be printed in case of failure. + verbose : bool + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired objects are not equal. + + See Also + -------- + assert_array_equal: tests objects for equality + assert_array_almost_equal: test objects for equality up to precision + + + + Examples + -------- + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan]) + >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan]) + ... + <type 'exceptions.ValueError'>: + Arrays are not less-ordered + (mismatch 50.0%) + x: array([ 1., 1., NaN]) + y: array([ 1., 2., NaN]) + + >>> np.testing.assert_array_less([1.0, 4.0], 3) + ... + <type 'exceptions.ValueError'>: + Arrays are not less-ordered + (mismatch 50.0%) + x: array([ 1., 4.]) + y: array(3) + + >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4]) + ... + <type 'exceptions.ValueError'>: + Arrays are not less-ordered + (shapes (3,), (1,) mismatch) + x: array([ 1., 2., 3.]) + y: array([4]) + + """ + __tracebackhide__ = True # Hide traceback for py.test + assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, + verbose=verbose, + header='Arrays are not less-ordered', + equal_inf=False) + + +def runstring(astr, dict): + exec(astr, dict) + + +def assert_string_equal(actual, desired): + """ + Test if two strings are equal. + + If the given strings are equal, `assert_string_equal` does nothing. + If they are not equal, an AssertionError is raised, and the diff + between the strings is shown. + + Parameters + ---------- + actual : str + The string to test for equality against the expected string. + desired : str + The expected string. + + Examples + -------- + >>> np.testing.assert_string_equal('abc', 'abc') + >>> np.testing.assert_string_equal('abc', 'abcd') + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ... + AssertionError: Differences in strings: + - abc+ abcd? + + + """ + # delay import of difflib to reduce startup time + __tracebackhide__ = True # Hide traceback for py.test + import difflib + + if not isinstance(actual, str): + raise AssertionError(repr(type(actual))) + if not isinstance(desired, str): + raise AssertionError(repr(type(desired))) + if re.match(r'\A'+desired+r'\Z', actual, re.M): + return + + diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1))) + diff_list = [] + while diff: + d1 = diff.pop(0) + if d1.startswith(' '): + continue + if d1.startswith('- '): + l = [d1] + d2 = diff.pop(0) + if d2.startswith('? '): + l.append(d2) + d2 = diff.pop(0) + if not d2.startswith('+ '): + raise AssertionError(repr(d2)) + l.append(d2) + if diff: + d3 = diff.pop(0) + if d3.startswith('? '): + l.append(d3) + else: + diff.insert(0, d3) + if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]): + continue + diff_list.extend(l) + continue + raise AssertionError(repr(d1)) + if not diff_list: + return + msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip() + if actual != desired: + raise AssertionError(msg) + + +def rundocs(filename=None, raise_on_error=True): + """ + Run doctests found in the given file. + + By default `rundocs` raises an AssertionError on failure. + + Parameters + ---------- + filename : str + The path to the file for which the doctests are run. + raise_on_error : bool + Whether to raise an AssertionError when a doctest fails. Default is + True. + + Notes + ----- + The doctests can be run by the user/developer by adding the ``doctests`` + argument to the ``test()`` call. For example, to run all tests (including + doctests) for `numpy.lib`: + + >>> np.lib.test(doctests=True) #doctest: +SKIP + """ + from numpy.compat import npy_load_module + import doctest + if filename is None: + f = sys._getframe(1) + filename = f.f_globals['__file__'] + name = os.path.splitext(os.path.basename(filename))[0] + m = npy_load_module(name, filename) + + tests = doctest.DocTestFinder().find(m) + runner = doctest.DocTestRunner(verbose=False) + + msg = [] + if raise_on_error: + out = lambda s: msg.append(s) + else: + out = None + + for test in tests: + runner.run(test, out=out) + + if runner.failures > 0 and raise_on_error: + raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) + + +def raises(*exceptions): + """ + This is actually a decorator and belongs in decorators.py. + + """ + import pytest + + def raises_decorator(f): + + def raiser(*args, **kwargs): + try: + f(*args, **kwargs) + except exceptions: + return + raise AssertionError() + + return raiser + + + return raises_decorator + + +def assert_raises(exception_class, fn=None, *args, **kwargs): + """ + assert_raises(exception_class, callable, *args, **kwargs) + assert_raises(exception_class) + + Fail unless an exception of class exception_class is thrown + by callable when invoked with arguments args and keyword + arguments kwargs. If a different type of exception is + thrown, it will not be caught, and the test case will be + deemed to have suffered an error, exactly as for an + unexpected exception. + + Alternatively, `assert_raises` can be used as a context manager: + + >>> from numpy.testing import assert_raises + >>> with assert_raises(ZeroDivisionError): + ... 1 / 0 + + is equivalent to + + >>> def div(x, y): + ... return x / y + >>> assert_raises(ZeroDivisionError, div, 1, 0) + + """ + import pytest + + __tracebackhide__ = True # Hide traceback for py.test + + if fn is not None: + pytest.raises(exception_class, fn, *args,**kwargs) + else: + @contextlib.contextmanager + def assert_raises_context(): + try: + yield + except BaseException as raised_exception: + assert isinstance(raised_exception, exception_class) + else: + raise ValueError('Function did not raise an exception') + + return assert_raises_context() + + +def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs): + """ + assert_raises_regex(exception_class, expected_regexp, callable, *args, + **kwargs) + assert_raises_regex(exception_class, expected_regexp) + + Fail unless an exception of class exception_class and with message that + matches expected_regexp is thrown by callable when invoked with arguments + args and keyword arguments kwargs. + + Alternatively, can be used as a context manager like `assert_raises`. + + Name of this function adheres to Python 3.2+ reference, but should work in + all versions down to 2.6. + + Notes + ----- + .. versionadded:: 1.9.0 + + """ + import pytest + import unittest + + class Dummy(unittest.TestCase): + def do_nothing(self): + pass + + tmp = Dummy('do_nothing') + + __tracebackhide__ = True # Hide traceback for py.test + res = pytest.raises(exception_class, *args, **kwargs) + + if sys.version_info.major >= 3: + funcname = tmp.assertRaisesRegex + else: + # Only present in Python 2.7, missing from unittest in 2.6 + funcname = tmp.assertRaisesRegexp + + return funcname(exception_class, expected_regexp, *args, **kwargs) + + +def decorate_methods(cls, decorator, testmatch=None): + """ + Apply a decorator to all methods in a class matching a regular expression. + + The given decorator is applied to all public methods of `cls` that are + matched by the regular expression `testmatch` + (``testmatch.search(methodname)``). Methods that are private, i.e. start + with an underscore, are ignored. + + Parameters + ---------- + cls : class + Class whose methods to decorate. + decorator : function + Decorator to apply to methods + testmatch : compiled regexp or str, optional + The regular expression. Default value is None, in which case the + nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``) + is used. + If `testmatch` is a string, it is compiled to a regular expression + first. + + """ + if testmatch is None: + testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep) + else: + testmatch = re.compile(testmatch) + cls_attr = cls.__dict__ + + # delayed import to reduce startup time + from inspect import isfunction + + methods = [_m for _m in cls_attr.values() if isfunction(_m)] + for function in methods: + try: + if hasattr(function, 'compat_func_name'): + funcname = function.compat_func_name + else: + funcname = function.__name__ + except AttributeError: + # not a function + continue + if testmatch.search(funcname) and not funcname.startswith('_'): + setattr(cls, funcname, decorator(function)) + return + + +def measure(code_str,times=1,label=None): + """ + Return elapsed time for executing code in the namespace of the caller. + + The supplied code string is compiled with the Python builtin ``compile``. + The precision of the timing is 10 milli-seconds. If the code will execute + fast on this timescale, it can be executed many times to get reasonable + timing accuracy. + + Parameters + ---------- + code_str : str + The code to be timed. + times : int, optional + The number of times the code is executed. Default is 1. The code is + only compiled once. + label : str, optional + A label to identify `code_str` with. This is passed into ``compile`` + as the second argument (for run-time error messages). + + Returns + ------- + elapsed : float + Total elapsed time in seconds for executing `code_str` `times` times. + + Examples + -------- + >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', + ... times=times) + >>> print("Time for a single execution : ", etime / times, "s") + Time for a single execution : 0.005 s + + """ + frame = sys._getframe(1) + locs, globs = frame.f_locals, frame.f_globals + + code = compile(code_str, + 'Test name: %s ' % label, + 'exec') + i = 0 + elapsed = jiffies() + while i < times: + i += 1 + exec(code, globs, locs) + elapsed = jiffies() - elapsed + return 0.01*elapsed + + +def _assert_valid_refcount(op): + """ + Check that ufuncs don't mishandle refcount of object `1`. + Used in a few regression tests. + """ + if not HAS_REFCOUNT: + return True + import numpy as np + + b = np.arange(100*100).reshape(100, 100) + c = b + i = 1 + + rc = sys.getrefcount(i) + for j in range(15): + d = op(b, c) + assert_(sys.getrefcount(i) >= rc) + del d # for pyflakes + + +def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True, + err_msg='', verbose=True): + """ + Raises an AssertionError if two objects are not equal up to desired + tolerance. + + The test is equivalent to ``allclose(actual, desired, rtol, atol)``. + It compares the difference between `actual` and `desired` to + ``atol + rtol * abs(desired)``. + + .. versionadded:: 1.5.0 + + Parameters + ---------- + actual : array_like + Array obtained. + desired : array_like + Array desired. + rtol : float, optional + Relative tolerance. + atol : float, optional + Absolute tolerance. + equal_nan : bool, optional. + If True, NaNs will compare equal. + err_msg : str, optional + The error message to be printed in case of failure. + verbose : bool, optional + If True, the conflicting values are appended to the error message. + + Raises + ------ + AssertionError + If actual and desired are not equal up to specified precision. + + See Also + -------- + assert_array_almost_equal_nulp, assert_array_max_ulp + + Examples + -------- + >>> x = [1e-5, 1e-3, 1e-1] + >>> y = np.arccos(np.cos(x)) + >>> assert_allclose(x, y, rtol=1e-5, atol=0) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + + def compare(x, y): + return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol, + equal_nan=equal_nan) + + actual, desired = np.asanyarray(actual), np.asanyarray(desired) + header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol) + assert_array_compare(compare, actual, desired, err_msg=str(err_msg), + verbose=verbose, header=header, equal_nan=equal_nan) + + +def assert_array_almost_equal_nulp(x, y, nulp=1): + """ + Compare two arrays relatively to their spacing. + + This is a relatively robust method to compare two arrays whose amplitude + is variable. + + Parameters + ---------- + x, y : array_like + Input arrays. + nulp : int, optional + The maximum number of unit in the last place for tolerance (see Notes). + Default is 1. + + Returns + ------- + None + + Raises + ------ + AssertionError + If the spacing between `x` and `y` for one or more elements is larger + than `nulp`. + + See Also + -------- + assert_array_max_ulp : Check that all items of arrays differ in at most + N Units in the Last Place. + spacing : Return the distance between x and the nearest adjacent number. + + Notes + ----- + An assertion is raised if the following condition is not met:: + + abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y))) + + Examples + -------- + >>> x = np.array([1., 1e-10, 1e-20]) + >>> eps = np.finfo(x.dtype).eps + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) + + >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) + Traceback (most recent call last): + ... + AssertionError: X and Y are not equal to 1 ULP (max is 2) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ax = np.abs(x) + ay = np.abs(y) + ref = nulp * np.spacing(np.where(ax > ay, ax, ay)) + if not np.all(np.abs(x-y) <= ref): + if np.iscomplexobj(x) or np.iscomplexobj(y): + msg = "X and Y are not equal to %d ULP" % nulp + else: + max_nulp = np.max(nulp_diff(x, y)) + msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp) + raise AssertionError(msg) + + +def assert_array_max_ulp(a, b, maxulp=1, dtype=None): + """ + Check that all items of arrays differ in at most N Units in the Last Place. + + Parameters + ---------- + a, b : array_like + Input arrays to be compared. + maxulp : int, optional + The maximum number of units in the last place that elements of `a` and + `b` can differ. Default is 1. + dtype : dtype, optional + Data-type to convert `a` and `b` to if given. Default is None. + + Returns + ------- + ret : ndarray + Array containing number of representable floating point numbers between + items in `a` and `b`. + + Raises + ------ + AssertionError + If one or more elements differ by more than `maxulp`. + + See Also + -------- + assert_array_almost_equal_nulp : Compare two arrays relatively to their + spacing. + + Examples + -------- + >>> a = np.linspace(0., 1., 100) + >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) + + """ + __tracebackhide__ = True # Hide traceback for py.test + import numpy as np + ret = nulp_diff(a, b, dtype) + if not np.all(ret <= maxulp): + raise AssertionError("Arrays are not almost equal up to %g ULP" % + maxulp) + return ret + + +def nulp_diff(x, y, dtype=None): + """For each item in x and y, return the number of representable floating + points between them. + + Parameters + ---------- + x : array_like + first input array + y : array_like + second input array + dtype : dtype, optional + Data-type to convert `x` and `y` to if given. Default is None. + + Returns + ------- + nulp : array_like + number of representable floating point numbers between each item in x + and y. + + Examples + -------- + # By definition, epsilon is the smallest number such as 1 + eps != 1, so + # there should be exactly one ULP between 1 and 1 + eps + >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) + 1.0 + """ + import numpy as np + if dtype: + x = np.array(x, dtype=dtype) + y = np.array(y, dtype=dtype) + else: + x = np.array(x) + y = np.array(y) + + t = np.common_type(x, y) + if np.iscomplexobj(x) or np.iscomplexobj(y): + raise NotImplementedError("_nulp not implemented for complex array") + + x = np.array(x, dtype=t) + y = np.array(y, dtype=t) + + if not x.shape == y.shape: + raise ValueError("x and y do not have the same shape: %s - %s" % + (x.shape, y.shape)) + + def _diff(rx, ry, vdt): + diff = np.array(rx-ry, dtype=vdt) + return np.abs(diff) + + rx = integer_repr(x) + ry = integer_repr(y) + return _diff(rx, ry, t) + + +def _integer_repr(x, vdt, comp): + # Reinterpret binary representation of the float as sign-magnitude: + # take into account two-complement representation + # See also + # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm + rx = x.view(vdt) + if not (rx.size == 1): + rx[rx < 0] = comp - rx[rx < 0] + else: + if rx < 0: + rx = comp - rx + + return rx + + +def integer_repr(x): + """Return the signed-magnitude interpretation of the binary representation of + x.""" + import numpy as np + if x.dtype == np.float32: + return _integer_repr(x, np.int32, np.int32(-2**31)) + elif x.dtype == np.float64: + return _integer_repr(x, np.int64, np.int64(-2**63)) + else: + raise ValueError("Unsupported dtype %s" % x.dtype) + + +# The following two classes are copied from python 2.6 warnings module (context +# manager) +class WarningMessage(object): + + """ + Holds the result of a single showwarning() call. + + Deprecated in 1.8.0 + + Notes + ----- + `WarningMessage` is copied from the Python 2.6 warnings module, + so it can be used in NumPy with older Python versions. + + """ + + _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file", + "line") + + def __init__(self, message, category, filename, lineno, file=None, + line=None): + local_values = locals() + for attr in self._WARNING_DETAILS: + setattr(self, attr, local_values[attr]) + if category: + self._category_name = category.__name__ + else: + self._category_name = None + + def __str__(self): + return ("{message : %r, category : %r, filename : %r, lineno : %s, " + "line : %r}" % (self.message, self._category_name, + self.filename, self.lineno, self.line)) + + +class WarningManager(object): + """ + A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of ``warnings.showwarning()`` and be appended to a + list returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only useful + when testing the warnings module itself. + + Deprecated in 1.8.0 + + Notes + ----- + `WarningManager` is a copy of the ``catch_warnings`` context manager + from the Python 2.6 warnings module, with slight modifications. + It is copied so it can be used in NumPy with older Python versions. + + """ + + def __init__(self, record=False, module=None): + self._record = record + if module is None: + self._module = sys.modules['warnings'] + else: + self._module = module + self._entered = False + + def __enter__(self): + if self._entered: + raise RuntimeError("Cannot enter %r twice" % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + if self._record: + log = [] + + def showwarning(*args, **kwargs): + log.append(WarningMessage(*args, **kwargs)) + self._module.showwarning = showwarning + return log + else: + return None + + def __exit__(self): + if not self._entered: + raise RuntimeError("Cannot exit %r without entering first" % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning + + +@contextlib.contextmanager +def _assert_warns_context(warning_class, name=None): + __tracebackhide__ = True # Hide traceback for py.test + with suppress_warnings() as sup: + l = sup.record(warning_class) + yield + if not len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("No warning raised" + name_str) + + +def assert_warns(warning_class, *args, **kwargs): + """ + Fail unless the given callable throws the specified warning. + + A warning of class warning_class should be thrown by the callable when + invoked with arguments args and keyword arguments kwargs. + If a different type of warning is thrown, it will not be caught. + + If called with all arguments other than the warning class omitted, may be + used as a context manager: + + with assert_warns(SomeWarning): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.4.0 + + Parameters + ---------- + warning_class : class + The class defining the warning that `func` is expected to throw. + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_warns_context(warning_class) + + func = args[0] + args = args[1:] + with _assert_warns_context(warning_class, name=func.__name__): + return func(*args, **kwargs) + + +@contextlib.contextmanager +def _assert_no_warnings_context(name=None): + __tracebackhide__ = True # Hide traceback for py.test + with warnings.catch_warnings(record=True) as l: + warnings.simplefilter('always') + yield + if len(l) > 0: + name_str = " when calling %s" % name if name is not None else "" + raise AssertionError("Got warnings%s: %s" % (name_str, l)) + + +def assert_no_warnings(*args, **kwargs): + """ + Fail if the given callable produces any warnings. + + If called with all arguments omitted, may be used as a context manager: + + with assert_no_warnings(): + do_something() + + The ability to be used as a context manager is new in NumPy v1.11.0. + + .. versionadded:: 1.7.0 + + Parameters + ---------- + func : callable + The callable to test. + \\*args : Arguments + Arguments passed to `func`. + \\*\\*kwargs : Kwargs + Keyword arguments passed to `func`. + + Returns + ------- + The value returned by `func`. + + """ + if not args: + return _assert_no_warnings_context() + + func = args[0] + args = args[1:] + with _assert_no_warnings_context(name=func.__name__): + return func(*args, **kwargs) + + +def _gen_alignment_data(dtype=float32, type='binary', max_size=24): + """ + generator producing data with different alignment and offsets + to test simd vectorization + + Parameters + ---------- + dtype : dtype + data type to produce + type : string + 'unary': create data for unary operations, creates one input + and output array + 'binary': create data for unary operations, creates two input + and output array + max_size : integer + maximum size of data to produce + + Returns + ------- + if type is 'unary' yields one output, one input array and a message + containing information on the data + if type is 'binary' yields one output array, two input array and a message + containing information on the data + + """ + ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s' + bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s' + for o in range(3): + for s in range(o + 2, max(o + 3, max_size)): + if type == 'unary': + inp = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp(), ufmt % (o, o, s, dtype, 'out of place') + d = inp() + yield d, d, ufmt % (o, o, s, dtype, 'in place') + yield out[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'out of place') + yield inp()[:-1], inp()[1:], ufmt % \ + (o, o + 1, s - 1, dtype, 'aliased') + yield inp()[1:], inp()[:-1], ufmt % \ + (o + 1, o, s - 1, dtype, 'aliased') + if type == 'binary': + inp1 = lambda: arange(s, dtype=dtype)[o:] + inp2 = lambda: arange(s, dtype=dtype)[o:] + out = empty((s,), dtype=dtype)[o:] + yield out, inp1(), inp2(), bfmt % \ + (o, o, o, s, dtype, 'out of place') + d = inp1() + yield d, d, inp2(), bfmt % \ + (o, o, o, s, dtype, 'in place1') + d = inp2() + yield d, inp1(), d, bfmt % \ + (o, o, o, s, dtype, 'in place2') + yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'out of place') + yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'out of place') + yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \ + (o + 1, o, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \ + (o, o + 1, o, s - 1, dtype, 'aliased') + yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \ + (o, o, o + 1, s - 1, dtype, 'aliased') + + + +@contextlib.contextmanager +def tempdir(*args, **kwargs): + """Context manager to provide a temporary test folder. + + All arguments are passed as this to the underlying tempfile.mkdtemp + function. + + """ + tmpdir = mkdtemp(*args, **kwargs) + try: + yield tmpdir + finally: + shutil.rmtree(tmpdir) + + +@contextlib.contextmanager +def temppath(*args, **kwargs): + """Context manager for temporary files. + + Context manager that returns the path to a closed temporary file. Its + parameters are the same as for tempfile.mkstemp and are passed directly + to that function. The underlying file is removed when the context is + exited, so it should be closed at that time. + + Windows does not allow a temporary file to be opened if it is already + open, so the underlying file must be closed after opening before it + can be opened again. + + """ + fd, path = mkstemp(*args, **kwargs) + os.close(fd) + try: + yield path + finally: + os.remove(path) + + +class clear_and_catch_warnings(warnings.catch_warnings): + """ Context manager that resets warning registry for catching warnings + + Warnings can be slippery, because, whenever a warning is triggered, Python + adds a ``__warningregistry__`` member to the *calling* module. This makes + it impossible to retrigger the warning in this module, whatever you put in + the warnings filters. This context manager accepts a sequence of `modules` + as a keyword argument to its constructor and: + + * stores and removes any ``__warningregistry__`` entries in given `modules` + on entry; + * resets ``__warningregistry__`` to its previous state on exit. + + This makes it possible to trigger any warning afresh inside the context + manager without disturbing the state of warnings outside. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + Parameters + ---------- + record : bool, optional + Specifies whether warnings should be captured by a custom + implementation of ``warnings.showwarning()`` and be appended to a list + returned by the context manager. Otherwise None is returned by the + context manager. The objects appended to the list are arguments whose + attributes mirror the arguments to ``showwarning()``. + modules : sequence, optional + Sequence of modules for which to reset warnings registry on entry and + restore on exit. To work correctly, all 'ignore' filters should + filter by one of these modules. + + Examples + -------- + >>> import warnings + >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]): + ... warnings.simplefilter('always') + ... warnings.filterwarnings('ignore', module='np.core.fromnumeric') + ... # do something that raises a warning but ignore those in + ... # np.core.fromnumeric + """ + class_modules = () + + def __init__(self, record=False, modules=()): + self.modules = set(modules).union(self.class_modules) + self._warnreg_copies = {} + super(clear_and_catch_warnings, self).__init__(record=record) + + def __enter__(self): + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod_reg = mod.__warningregistry__ + self._warnreg_copies[mod] = mod_reg.copy() + mod_reg.clear() + return super(clear_and_catch_warnings, self).__enter__() + + def __exit__(self, *exc_info): + super(clear_and_catch_warnings, self).__exit__(*exc_info) + for mod in self.modules: + if hasattr(mod, '__warningregistry__'): + mod.__warningregistry__.clear() + if mod in self._warnreg_copies: + mod.__warningregistry__.update(self._warnreg_copies[mod]) + + +class suppress_warnings(object): + """ + Context manager and decorator doing much the same as + ``warnings.catch_warnings``. + + However, it also provides a filter mechanism to work around + http://bugs.python.org/issue4180. + + This bug causes Python before 3.4 to not reliably show warnings again + after they have been ignored once (even within catch_warnings). It + means that no "ignore" filter can be used easily, since following + tests might need to see the warning. Additionally it allows easier + specificity for testing warnings and can be nested. + + Parameters + ---------- + forwarding_rule : str, optional + One of "always", "once", "module", or "location". Analogous to + the usual warnings module filter mode, it is useful to reduce + noise mostly on the outmost level. Unsuppressed and unrecorded + warnings will be forwarded based on this rule. Defaults to "always". + "location" is equivalent to the warnings "default", match by exact + location the warning warning originated from. + + Notes + ----- + Filters added inside the context manager will be discarded again + when leaving it. Upon entering all filters defined outside a + context will be applied automatically. + + When a recording filter is added, matching warnings are stored in the + ``log`` attribute as well as in the list returned by ``record``. + + If filters are added and the ``module`` keyword is given, the + warning registry of this module will additionally be cleared when + applying it, entering the context, or exiting it. This could cause + warnings to appear a second time after leaving the context if they + were configured to be printed once (default) and were already + printed before the context was entered. + + Nesting this context manager will work as expected when the + forwarding rule is "always" (default). Unfiltered and unrecorded + warnings will be passed out and be matched by the outer level. + On the outmost level they will be printed (or caught by another + warnings context). The forwarding rule argument can modify this + behaviour. + + Like ``catch_warnings`` this context manager is not threadsafe. + + Examples + -------- + >>> with suppress_warnings() as sup: + ... sup.filter(DeprecationWarning, "Some text") + ... sup.filter(module=np.ma.core) + ... log = sup.record(FutureWarning, "Does this occur?") + ... command_giving_warnings() + ... # The FutureWarning was given once, the filtered warnings were + ... # ignored. All other warnings abide outside settings (may be + ... # printed/error) + ... assert_(len(log) == 1) + ... assert_(len(sup.log) == 1) # also stored in log attribute + + Or as a decorator: + + >>> sup = suppress_warnings() + >>> sup.filter(module=np.ma.core) # module must match exact + >>> @sup + >>> def some_function(): + ... # do something which causes a warning in np.ma.core + ... pass + """ + def __init__(self, forwarding_rule="always"): + self._entered = False + + # Suppressions are either instance or defined inside one with block: + self._suppressions = [] + + if forwarding_rule not in {"always", "module", "once", "location"}: + raise ValueError("unsupported forwarding rule.") + self._forwarding_rule = forwarding_rule + + def _clear_registries(self): + if hasattr(warnings, "_filters_mutated"): + # clearing the registry should not be necessary on new pythons, + # instead the filters should be mutated. + warnings._filters_mutated() + return + # Simply clear the registry, this should normally be harmless, + # note that on new pythons it would be invalidated anyway. + for module in self._tmp_modules: + if hasattr(module, "__warningregistry__"): + module.__warningregistry__.clear() + + def _filter(self, category=Warning, message="", module=None, record=False): + if record: + record = [] # The log where to store warnings + else: + record = None + if self._entered: + if module is None: + warnings.filterwarnings( + "always", category=category, message=message) + else: + module_regex = module.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=category, message=message, + module=module_regex) + self._tmp_modules.add(module) + self._clear_registries() + + self._tmp_suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + else: + self._suppressions.append( + (category, message, re.compile(message, re.I), module, record)) + + return record + + def filter(self, category=Warning, message="", module=None): + """ + Add a new suppressing filter or apply it if the state is entered. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + self._filter(category=category, message=message, module=module, + record=False) + + def record(self, category=Warning, message="", module=None): + """ + Append a new recording filter or apply it if the state is entered. + + All warnings matching will be appended to the ``log`` attribute. + + Parameters + ---------- + category : class, optional + Warning class to filter + message : string, optional + Regular expression matching the warning message. + module : module, optional + Module to filter for. Note that the module (and its file) + must match exactly and cannot be a submodule. This may make + it unreliable for external modules. + + Returns + ------- + log : list + A list which will be filled with all matched warnings. + + Notes + ----- + When added within a context, filters are only added inside + the context and will be forgotten when the context is exited. + """ + return self._filter(category=category, message=message, module=module, + record=True) + + def __enter__(self): + if self._entered: + raise RuntimeError("cannot enter suppress_warnings twice.") + + self._orig_show = warnings.showwarning + self._filters = warnings.filters + warnings.filters = self._filters[:] + + self._entered = True + self._tmp_suppressions = [] + self._tmp_modules = set() + self._forwarded = set() + + self.log = [] # reset global log (no need to keep same list) + + for cat, mess, _, mod, log in self._suppressions: + if log is not None: + del log[:] # clear the log + if mod is None: + warnings.filterwarnings( + "always", category=cat, message=mess) + else: + module_regex = mod.__name__.replace('.', r'\.') + '$' + warnings.filterwarnings( + "always", category=cat, message=mess, + module=module_regex) + self._tmp_modules.add(mod) + warnings.showwarning = self._showwarning + self._clear_registries() + + return self + + def __exit__(self, *exc_info): + warnings.showwarning = self._orig_show + warnings.filters = self._filters + self._clear_registries() + self._entered = False + del self._orig_show + del self._filters + + def _showwarning(self, message, category, filename, lineno, + *args, **kwargs): + use_warnmsg = kwargs.pop("use_warnmsg", None) + for cat, _, pattern, mod, rec in ( + self._suppressions + self._tmp_suppressions)[::-1]: + if (issubclass(category, cat) and + pattern.match(message.args[0]) is not None): + if mod is None: + # Message and category match, either recorded or ignored + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + # Use startswith, because warnings strips the c or o from + # .pyc/.pyo files. + elif mod.__file__.startswith(filename): + # The message and module (filename) match + if rec is not None: + msg = WarningMessage(message, category, filename, + lineno, **kwargs) + self.log.append(msg) + rec.append(msg) + return + + # There is no filter in place, so pass to the outside handler + # unless we should only pass it once + if self._forwarding_rule == "always": + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, + *args, **kwargs) + else: + self._orig_showmsg(use_warnmsg) + return + + if self._forwarding_rule == "once": + signature = (message.args, category) + elif self._forwarding_rule == "module": + signature = (message.args, category, filename) + elif self._forwarding_rule == "location": + signature = (message.args, category, filename, lineno) + + if signature in self._forwarded: + return + self._forwarded.add(signature) + if use_warnmsg is None: + self._orig_show(message, category, filename, lineno, *args, + **kwargs) + else: + self._orig_showmsg(use_warnmsg) + + def __call__(self, func): + """ + Function decorator to apply certain suppressions to a whole + function. + """ + @wraps(func) + def new_func(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return new_func diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py index a5e9656a3..5a0f977d9 100755 --- a/numpy/testing/setup.py +++ b/numpy/testing/setup.py @@ -7,6 +7,7 @@ def configuration(parent_package='',top_path=None): config = Configuration('testing', parent_package, top_path) config.add_subpackage('nose_tools') + config.add_subpackage('pytest_tools') config.add_data_dir('tests') return config diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py index 1258a9296..62329ab7d 100644 --- a/numpy/testing/tests/test_decorators.py +++ b/numpy/testing/tests/test_decorators.py @@ -48,7 +48,7 @@ def test_skip_functions_hardcoded(): f1('a') except DidntSkipException: raise Exception('Failed to skip') - except SkipTest: + except SkipTest().__class__: pass @dec.skipif(False) @@ -59,7 +59,7 @@ def test_skip_functions_hardcoded(): f2('a') except DidntSkipException: pass - except SkipTest: + except SkipTest().__class__: raise Exception('Skipped when not expected to') @@ -76,7 +76,7 @@ def test_skip_functions_callable(): f1('a') except DidntSkipException: raise Exception('Failed to skip') - except SkipTest: + except SkipTest().__class__: pass @dec.skipif(skip_tester) @@ -88,7 +88,7 @@ def test_skip_functions_callable(): f2('a') except DidntSkipException: pass - except SkipTest: + except SkipTest().__class__: raise Exception('Skipped when not expected to') @@ -101,7 +101,7 @@ def test_skip_generators_hardcoded(): try: for j in g1(10): pass - except KnownFailureException: + except KnownFailureException().__class__: pass else: raise Exception('Failed to mark as known failure') @@ -115,7 +115,7 @@ def test_skip_generators_hardcoded(): try: for j in g2(10): pass - except KnownFailureException: + except KnownFailureException().__class__: raise Exception('Marked incorrectly as known failure') except DidntSkipException: pass @@ -134,7 +134,7 @@ def test_skip_generators_callable(): skip_flag = 'skip me!' for j in g1(10): pass - except KnownFailureException: + except KnownFailureException().__class__: pass else: raise Exception('Failed to mark as known failure') @@ -149,7 +149,7 @@ def test_skip_generators_callable(): skip_flag = 'do not skip' for j in g2(10): pass - except KnownFailureException: + except KnownFailureException().__class__: raise Exception('Marked incorrectly as known failure') except DidntSkipException: pass diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py index 7ecb68f47..a0218c4e6 100644 --- a/numpy/testing/utils.py +++ b/numpy/testing/utils.py @@ -3,6 +3,10 @@ Back compatibility utils module. It will import the appropriate set of tools """ +import os + +from .nose_tools.utils import * + __all__ = [ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal', 'assert_array_equal', 'assert_array_less', 'assert_string_equal', @@ -16,5 +20,3 @@ __all__ = [ 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare', '_assert_valid_refcount', '_gen_alignment_data', ] - -from .nose_tools.utils import * |