diff options
author | Markus Lehtonen <markus.lehtonen@linux.intel.com> | 2013-08-29 12:32:40 +0300 |
---|---|---|
committer | Markus Lehtonen <markus.lehtonen@linux.intel.com> | 2013-08-29 12:32:40 +0300 |
commit | c07e4756def1c264799e682b10a052d8791d30cc (patch) | |
tree | 7936cea8ff6502caca0756e29af5068a0a35ff4e /unit_tests | |
download | python-nose-c07e4756def1c264799e682b10a052d8791d30cc.tar.gz python-nose-c07e4756def1c264799e682b10a052d8791d30cc.tar.bz2 python-nose-c07e4756def1c264799e682b10a052d8791d30cc.zip |
Imported Upstream version 0.11.4upstream/0.11.4
Diffstat (limited to 'unit_tests')
73 files changed, 4976 insertions, 0 deletions
diff --git a/unit_tests/helpers.py b/unit_tests/helpers.py new file mode 100644 index 0000000..0a5d68d --- /dev/null +++ b/unit_tests/helpers.py @@ -0,0 +1,6 @@ +def iter_compat(suite): + try: + suite.__iter__ + return suite + except AttributeError: + return suite._tests diff --git a/unit_tests/mock.py b/unit_tests/mock.py new file mode 100644 index 0000000..98e7d43 --- /dev/null +++ b/unit_tests/mock.py @@ -0,0 +1,107 @@ +import imp +import sys +from nose.config import Config +from nose import proxy +from nose.plugins.manager import NoPlugins +from nose.util import odict + + +def mod(name): + m = imp.new_module(name) + sys.modules[name] = m + return m + +class ResultProxyFactory: + def __call__(self, result, test): + return ResultProxy(result, test) + + +class ResultProxy(proxy.ResultProxy): + called = [] + def __init__(self, result, test): + self.result = result + self.test = test + def afterTest(self, test): + self.assertMyTest(test) + self.called.append(('afterTest', test)) + def beforeTest(self, test): + self.assertMyTest(test) + self.called.append(('beforeTest', test)) + def startTest(self, test): + print "proxy startTest" + self.assertMyTest(test) + self.called.append(('startTest', test)) + def stopTest(self, test): + print "proxy stopTest" + self.assertMyTest(test) + self.called.append(('stopTest', test)) + def addDeprecated(self, test, err): + print "proxy addDeprecated" + self.assertMyTest(test) + self.called.append(('addDeprecated', test, err)) + def addError(self, test, err): + print "proxy addError" + self.assertMyTest(test) + self.called.append(('addError', test, err)) + def addFailure(self, test, err): + print "proxy addFailure" + self.assertMyTest(test) + self.called.append(('addFailure', test, err)) + def addSkip(self, test, err): + print "proxy addSkip" + self.assertMyTest(test) + self.called.append(('addSkip', test, err)) + def addSuccess(self, test): + self.assertMyTest(test) + self.called.append(('addSuccess', test)) + + +class RecordingPluginManager(object): + + def __init__(self): + self.reset() + + def __getattr__(self, call): + return RecordingPluginProxy(self, call) + + def null_call(self, call, *arg, **kw): + return getattr(self._nullPluginManager, call)(*arg, **kw) + + def reset(self): + self._nullPluginManager = NoPlugins() + self.called = odict() + + def calls(self): + return self.called.keys() + + +class RecordingPluginProxy(object): + + def __init__(self, manager, call): + self.man = manager + self.call = call + + def __call__(self, *arg, **kw): + self.man.called.setdefault(self.call, []).append((arg, kw)) + return self.man.null_call(self.call, *arg, **kw) + + +class Bucket(object): + def __init__(self, **kw): + self.__dict__['d'] = {} + self.__dict__['d'].update(kw) + + def __getattr__(self, attr): + if not self.__dict__.has_key('d'): + return None + return self.__dict__['d'].get(attr) + + def __setattr__(self, attr, val): + self.d[attr] = val + + +class MockOptParser(object): + def __init__(self): + self.opts = [] + def add_option(self, *args, **kw): + self.opts.append((args, kw)) diff --git a/unit_tests/support/bug101/tests.py b/unit_tests/support/bug101/tests.py new file mode 100644 index 0000000..5d7c3e3 --- /dev/null +++ b/unit_tests/support/bug101/tests.py @@ -0,0 +1,9 @@ +def my_decor(func): + return lambda: func() + +def test_decor(): + pass + +def test_decor1(): + pass +test_decor1 = my_decor(test_decor1) diff --git a/unit_tests/support/bug105/tests.py b/unit_tests/support/bug105/tests.py new file mode 100644 index 0000000..63a368b --- /dev/null +++ b/unit_tests/support/bug105/tests.py @@ -0,0 +1,49 @@ +from nose import tools + +def test_z(): + """(1) test z""" + pass + +def test_a(): + """(2) test a""" + pass + +def test_rz(): + """(3) Test with raises decorator""" + raise TypeError("err") +test_rz = tools.raises(TypeError)(test_rz) + +def decorate(func): + func.attr = 1 + return func + +def dec_replace(func): + def newfunc(): + func() + pass + return newfunc + +def dec_makedecorator(func): + def newfunc(): + pass + newfunc = tools.make_decorator(func)(newfunc) + return newfunc + +def test_dz(): + """(4) Test with non-replacing decorator""" + pass +test_dz = decorate(test_dz) + +def test_rz(): + """(5) Test with replacing decorator""" + pass +test_rz = dec_replace(test_rz) + +def test_mdz(): + """(6) Test with make_decorator decorator""" + pass +test_mdz = dec_makedecorator(test_mdz) + +def test_b(): + """(7) test b""" + pass diff --git a/unit_tests/support/config_defaults/a.cfg b/unit_tests/support/config_defaults/a.cfg new file mode 100644 index 0000000..4bc5e22 --- /dev/null +++ b/unit_tests/support/config_defaults/a.cfg @@ -0,0 +1,2 @@ +[nosetests] +verbosity = 3 diff --git a/unit_tests/support/config_defaults/b.cfg b/unit_tests/support/config_defaults/b.cfg new file mode 100644 index 0000000..e329464 --- /dev/null +++ b/unit_tests/support/config_defaults/b.cfg @@ -0,0 +1,2 @@ +[nosetests] +verbosity = 5 diff --git a/unit_tests/support/config_defaults/invalid.cfg b/unit_tests/support/config_defaults/invalid.cfg new file mode 100644 index 0000000..34b6a0c --- /dev/null +++ b/unit_tests/support/config_defaults/invalid.cfg @@ -0,0 +1 @@ +spam diff --git a/unit_tests/support/config_defaults/invalid_value.cfg b/unit_tests/support/config_defaults/invalid_value.cfg new file mode 100644 index 0000000..bc05d74 --- /dev/null +++ b/unit_tests/support/config_defaults/invalid_value.cfg @@ -0,0 +1,2 @@ +[nosetests] +verbosity = spam diff --git a/unit_tests/support/doctest/err_doctests.py b/unit_tests/support/doctest/err_doctests.py new file mode 100644 index 0000000..6d60696 --- /dev/null +++ b/unit_tests/support/doctest/err_doctests.py @@ -0,0 +1,12 @@ +""" +Module with errors in doctest formatting. + + >>> 1 + 'this is\n an error' +""" +def foo(): + pass + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/unit_tests/support/doctest/no_doctests.py b/unit_tests/support/doctest/no_doctests.py new file mode 100644 index 0000000..7e3750e --- /dev/null +++ b/unit_tests/support/doctest/no_doctests.py @@ -0,0 +1,9 @@ +""" +Module without doctests. +""" +def foo(): + pass + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/unit_tests/support/foo/__init__.py b/unit_tests/support/foo/__init__.py new file mode 100644 index 0000000..66e0a5e --- /dev/null +++ b/unit_tests/support/foo/__init__.py @@ -0,0 +1,7 @@ +boodle = True + +def somefunc(): + """This is a doctest in somefunc. + >>> 'a' + 'a' + """ diff --git a/unit_tests/support/foo/bar/__init__.py b/unit_tests/support/foo/bar/__init__.py new file mode 100644 index 0000000..2ae2839 --- /dev/null +++ b/unit_tests/support/foo/bar/__init__.py @@ -0,0 +1 @@ +pass diff --git a/unit_tests/support/foo/bar/buz.py b/unit_tests/support/foo/bar/buz.py new file mode 100644 index 0000000..48c886d --- /dev/null +++ b/unit_tests/support/foo/bar/buz.py @@ -0,0 +1,8 @@ +from foo import boodle + +def afunc(): + """This is a doctest + >>> 2 + 3 + 5 + """ + pass diff --git a/unit_tests/support/foo/doctests.txt b/unit_tests/support/foo/doctests.txt new file mode 100644 index 0000000..e4b8d5b --- /dev/null +++ b/unit_tests/support/foo/doctests.txt @@ -0,0 +1,7 @@ +Doctests in a text file. + + >>> 1 + 2 + 3 + + >>> ['a', 'b'] + ['c'] + ['a', 'b', 'c'] diff --git a/unit_tests/support/foo/test_foo.py b/unit_tests/support/foo/test_foo.py new file mode 100644 index 0000000..2ae2839 --- /dev/null +++ b/unit_tests/support/foo/test_foo.py @@ -0,0 +1 @@ +pass diff --git a/unit_tests/support/foo/tests/dir_test_file.py b/unit_tests/support/foo/tests/dir_test_file.py new file mode 100644 index 0000000..79b86ec --- /dev/null +++ b/unit_tests/support/foo/tests/dir_test_file.py @@ -0,0 +1,3 @@ +# test file in test dir in a package +def test_foo(): + pass diff --git a/unit_tests/support/issue006/tests.py b/unit_tests/support/issue006/tests.py new file mode 100644 index 0000000..5c8ee60 --- /dev/null +++ b/unit_tests/support/issue006/tests.py @@ -0,0 +1,19 @@ +class Test1(object): + def test_nested_generator(self): + def func(): + pass + yield func, + + def test_nested_generator_mult(self): + def f2(a): + pass + for b in range(1, 4): + yield f2, b + + def try_something(self, a): + pass + + def test_normal_generator(self): + yield self.try_something, 1 + yield 'try_something', 2 + diff --git a/unit_tests/support/issue065/tests.py b/unit_tests/support/issue065/tests.py new file mode 100644 index 0000000..d246458 --- /dev/null +++ b/unit_tests/support/issue065/tests.py @@ -0,0 +1,5 @@ +class D(dict): + def __getattr__(self, k): + return dict.__getitem__(self, k) + +test = D() diff --git a/unit_tests/support/issue270/__init__.py b/unit_tests/support/issue270/__init__.py new file mode 100644 index 0000000..264b0f9 --- /dev/null +++ b/unit_tests/support/issue270/__init__.py @@ -0,0 +1,2 @@ +def setup(): + pass diff --git a/unit_tests/support/issue270/foo_test.py b/unit_tests/support/issue270/foo_test.py new file mode 100644 index 0000000..5a629d3 --- /dev/null +++ b/unit_tests/support/issue270/foo_test.py @@ -0,0 +1,7 @@ +class Foo_Test: + + def test_foo(self): + pass + + def test_bar(self): + pass diff --git a/unit_tests/support/other/file.txt b/unit_tests/support/other/file.txt new file mode 100644 index 0000000..792d600 --- /dev/null +++ b/unit_tests/support/other/file.txt @@ -0,0 +1 @@ +# diff --git a/unit_tests/support/pkgorg/lib/modernity.py b/unit_tests/support/pkgorg/lib/modernity.py new file mode 100644 index 0000000..2ae2839 --- /dev/null +++ b/unit_tests/support/pkgorg/lib/modernity.py @@ -0,0 +1 @@ +pass diff --git a/unit_tests/support/pkgorg/tests/test_mod.py b/unit_tests/support/pkgorg/tests/test_mod.py new file mode 100644 index 0000000..2516258 --- /dev/null +++ b/unit_tests/support/pkgorg/tests/test_mod.py @@ -0,0 +1,4 @@ +import modernity + +def test(): + pass diff --git a/unit_tests/support/script.py b/unit_tests/support/script.py new file mode 100755 index 0000000..9e33d77 --- /dev/null +++ b/unit_tests/support/script.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python + +print "FAIL" diff --git a/unit_tests/support/test-dir/test.py b/unit_tests/support/test-dir/test.py new file mode 100644 index 0000000..2ae2839 --- /dev/null +++ b/unit_tests/support/test-dir/test.py @@ -0,0 +1 @@ +pass diff --git a/unit_tests/support/test.py b/unit_tests/support/test.py new file mode 100644 index 0000000..9ad04e0 --- /dev/null +++ b/unit_tests/support/test.py @@ -0,0 +1,13 @@ +import unittest + +class Something(unittest.TestCase): + def test_something(self): + pass + +class TestTwo: + + def __repr__(self): + return 'TestTwo' + + def test_whatever(self): + pass diff --git a/unit_tests/test_attribute_plugin.py b/unit_tests/test_attribute_plugin.py new file mode 100644 index 0000000..94cdddb --- /dev/null +++ b/unit_tests/test_attribute_plugin.py @@ -0,0 +1,32 @@ + +from nose.tools import eq_ +from nose.plugins.attrib import attr + +def test_flags(): + # @attr('one','two') + def test(): + pass + test = attr('one','two')(test) + + eq_(test.one, 1) + eq_(test.two, 1) + +def test_values(): + # @attr(mood="hohum", colors=['red','blue']) + def test(): + pass + test = attr(mood="hohum", colors=['red','blue'])(test) + + eq_(test.mood, "hohum") + eq_(test.colors, ['red','blue']) + +def test_mixed(): + # @attr('slow', 'net', role='integration') + def test(): + pass + test = attr('slow', 'net', role='integration')(test) + + eq_(test.slow, 1) + eq_(test.net, 1) + eq_(test.role, 'integration') +
\ No newline at end of file diff --git a/unit_tests/test_bug105.py b/unit_tests/test_bug105.py new file mode 100644 index 0000000..e0362a8 --- /dev/null +++ b/unit_tests/test_bug105.py @@ -0,0 +1,32 @@ +import os +import unittest + +class TestBug105(unittest.TestCase): + + def test_load_in_def_order(self): + from nose.loader import TestLoader + + where = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'support', 'bug105')) + + l = TestLoader() + testmod = l.loadTestsFromDir(where).next() + print testmod + testmod.setUp() + + def fix(t): + s = str(t) + if ': ' in s: + return s[s.index(': ')+2:] + return s + + tests = map(fix, testmod) + print tests + self.assertEqual(tests, ['tests.test_z', 'tests.test_a', + 'tests.test_dz', 'tests.test_mdz', + 'tests.test_b']) + + +if __name__ == '__main__': + unittest.main() + diff --git a/unit_tests/test_capture_plugin.py b/unit_tests/test_capture_plugin.py new file mode 100644 index 0000000..8988665 --- /dev/null +++ b/unit_tests/test_capture_plugin.py @@ -0,0 +1,79 @@ +import sys +import unittest +from optparse import OptionParser +from nose.config import Config +from nose.plugins.capture import Capture + +class TestCapturePlugin(unittest.TestCase): + + def setUp(self): + self._stdout = sys.stdout + + def tearDown(self): + sys.stdout = self._stdout + + def test_enabled_by_default(self): + c = Capture() + assert c.enabled + + def test_can_be_disabled(self): + c = Capture() + parser = OptionParser() + c.addOptions(parser) + options, args = parser.parse_args(['test_can_be_disabled', + '-s']) + c.configure(options, Config()) + assert not c.enabled + + c = Capture() + options, args = parser.parse_args(['test_can_be_disabled_long', + '--nocapture']) + c.configure(options, Config()) + assert not c.enabled + + env = {'NOSE_NOCAPTURE': 1} + c = Capture() + parser = OptionParser() + c.addOptions(parser, env) + options, args = parser.parse_args(['test_can_be_disabled']) + c.configure(options, Config()) + assert not c.enabled + + c = Capture() + parser = OptionParser() + c.addOptions(parser) + + options, args = parser.parse_args(['test_can_be_disabled']) + c.configure(options, Config()) + assert c.enabled + + def test_captures_stdout(self): + c = Capture() + c.start() + print "Hello" + c.end() + self.assertEqual(c.buffer, "Hello\n") + + def test_format_error(self): + class Dummy: + pass + d = Dummy() + c = Capture() + c.start() + try: + print "Oh my!" + raise Exception("boom") + except: + err = sys.exc_info() + formatted = c.formatError(d, err) + ec, ev, tb = err + (fec, fev, ftb) = formatted + # print fec, fev, ftb + + self.assertEqual(ec, fec) + self.assertEqual(tb, ftb) + assert 'Oh my!' in fev, "Output not found in error message" + assert 'Oh my!' in d.capturedOutput, "Output not attached to test" + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_cases.py b/unit_tests/test_cases.py new file mode 100644 index 0000000..2e1ca2a --- /dev/null +++ b/unit_tests/test_cases.py @@ -0,0 +1,252 @@ +import unittest +import pdb +import sys +import nose.case +import nose.failure +from nose.config import Config +from mock import ResultProxyFactory, ResultProxy + +class TestNoseCases(unittest.TestCase): + + def test_function_test_case(self): + res = unittest.TestResult() + + a = [] + def func(a=a): + a.append(1) + + case = nose.case.FunctionTestCase(func) + case(res) + assert a[0] == 1 + + def test_method_test_case(self): + res = unittest.TestResult() + + a = [] + class TestClass(object): + def test_func(self, a=a): + a.append(1) + + case = nose.case.MethodTestCase(TestClass.test_func) + case(res) + assert a[0] == 1 + + def test_method_test_case_with_metaclass(self): + res = unittest.TestResult() + + class TestType(type): + def __new__(cls, name, bases, dct): + return type.__new__(cls, name, bases, dct) + a = [] + class TestClass(object): + __metaclass__ = TestType + def test_func(self, a=a): + a.append(1) + + case = nose.case.MethodTestCase(TestClass.test_func) + case(res) + assert a[0] == 1 + + def test_method_test_case_fixtures(self): + res = unittest.TestResult() + called = [] + class TestClass(object): + def setup(self): + called.append('setup') + def teardown(self): + called.append('teardown') + def test_func(self): + called.append('test') + + case = nose.case.MethodTestCase(TestClass.test_func) + case(res) + self.assertEqual(called, ['setup', 'test', 'teardown']) + + class TestClassFailingSetup(TestClass): + def setup(self): + called.append('setup') + raise Exception("failed") + called[:] = [] + case = nose.case.MethodTestCase(TestClassFailingSetup.test_func) + case(res) + self.assertEqual(called, ['setup']) + + class TestClassFailingTest(TestClass): + def test_func(self): + called.append('test') + raise Exception("failed") + + called[:] = [] + case = nose.case.MethodTestCase(TestClassFailingTest.test_func) + case(res) + self.assertEqual(called, ['setup', 'test', 'teardown']) + + def test_function_test_case_fixtures(self): + from nose.tools import with_setup + res = unittest.TestResult() + + called = {} + + def st(): + called['st'] = True + def td(): + called['td'] = True + + def func_exc(): + called['func'] = True + raise TypeError("An exception") + + func_exc = with_setup(st, td)(func_exc) + case = nose.case.FunctionTestCase(func_exc) + case(res) + assert 'st' in called + assert 'func' in called + assert 'td' in called + + def test_failure_case(self): + res = unittest.TestResult() + f = nose.failure.Failure(ValueError, "No such test spam") + f(res) + assert res.errors + + +class TestNoseTestWrapper(unittest.TestCase): + def test_case_fixtures_called(self): + """Instance fixtures are properly called for wrapped tests""" + res = unittest.TestResult() + called = [] + + class TC(unittest.TestCase): + def setUp(self): + print "TC setUp %s" % self + called.append('setUp') + def runTest(self): + print "TC runTest %s" % self + called.append('runTest') + def tearDown(self): + print "TC tearDown %s" % self + called.append('tearDown') + + case = nose.case.Test(TC()) + case(res) + assert not res.errors, res.errors + assert not res.failures, res.failures + self.assertEqual(called, ['setUp', 'runTest', 'tearDown']) + + def test_result_proxy_used(self): + """A result proxy is used to wrap the result for all tests""" + class TC(unittest.TestCase): + def runTest(self): + raise Exception("error") + + ResultProxy.called[:] = [] + res = unittest.TestResult() + config = Config() + case = nose.case.Test(TC(), config=config, + resultProxy=ResultProxyFactory()) + + case(res) + assert not res.errors, res.errors + assert not res.failures, res.failures + + calls = [ c[0] for c in ResultProxy.called ] + self.assertEqual(calls, ['beforeTest', 'startTest', 'addError', + 'stopTest', 'afterTest']) + + def test_address(self): + from nose.util import absfile, src + class TC(unittest.TestCase): + def runTest(self): + raise Exception("error") + + def dummy(i): + pass + + def test(): + pass + + class Test: + def test(self): + pass + + def test_gen(self): + def tryit(i): + pass + for i in range (0, 2): + yield tryit, i + + def try_something(self, a, b): + pass + + fl = src(absfile(__file__)) + case = nose.case.Test(TC()) + self.assertEqual(case.address(), (fl, __name__, 'TC.runTest')) + + case = nose.case.Test(nose.case.FunctionTestCase(test)) + self.assertEqual(case.address(), (fl, __name__, 'test')) + + case = nose.case.Test(nose.case.FunctionTestCase( + dummy, arg=(1,), descriptor=test)) + self.assertEqual(case.address(), (fl, __name__, 'test')) + + case = nose.case.Test(nose.case.MethodTestCase(Test.test)) + self.assertEqual(case.address(), (fl, __name__, 'Test.test')) + + case = nose.case.Test( + nose.case.MethodTestCase(Test.try_something, arg=(1,2,), + descriptor=Test.test_gen)) + self.assertEqual(case.address(), + (fl, __name__, 'Test.test_gen')) + + case = nose.case.Test( + nose.case.MethodTestCase(Test.test_gen, test=dummy, arg=(1,))) + self.assertEqual(case.address(), + (fl, __name__, 'Test.test_gen')) + + def test_context(self): + class TC(unittest.TestCase): + def runTest(self): + pass + def test(): + pass + + class Test: + def test(self): + pass + + case = nose.case.Test(TC()) + self.assertEqual(case.context, TC) + + case = nose.case.Test(nose.case.FunctionTestCase(test)) + self.assertEqual(case.context, sys.modules[__name__]) + + case = nose.case.Test(nose.case.MethodTestCase(Test.test)) + self.assertEqual(case.context, Test) + + def test_short_description(self): + class TC(unittest.TestCase): + def test_a(self): + """ + This is the description + """ + pass + + def test_b(self): + """This is the description + """ + pass + + def test_c(self): + pass + + case_a = nose.case.Test(TC('test_a')) + case_b = nose.case.Test(TC('test_b')) + case_c = nose.case.Test(TC('test_c')) + + assert case_a.shortDescription().endswith("This is the description") + assert case_b.shortDescription().endswith("This is the description") + assert case_c.shortDescription() in (None, # pre 2.7 + 'test_c (test_cases.TC)') # 2.7 + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_config.py b/unit_tests/test_config.py new file mode 100644 index 0000000..8887ddd --- /dev/null +++ b/unit_tests/test_config.py @@ -0,0 +1,106 @@ +import re +import os +import tempfile +import unittest +import warnings +import pickle +import sys + +import nose.config +from nose.plugins.manager import DefaultPluginManager +from nose.plugins.skip import SkipTest + + +class TestNoseConfig(unittest.TestCase): + + def test_defaults(self): + c = nose.config.Config() + assert c.addPaths == True + # FIXME etc + + def test_reset(self): + c = nose.config.Config() + c.include = 'include' + assert c.include == 'include' + c.reset() + assert c.include is None + + def test_update(self): + c = nose.config.Config() + c.update({'exclude':'x'}) + assert c.exclude == 'x' + + def test_multiple_include(self): + c = nose.config.Config() + c.configure(['program', '--include=a', '--include=b']) + self.assertEqual(len(c.include), 2) + a, b = c.include + assert a.match('a') + assert not a.match('b') + assert b.match('b') + assert not b.match('a') + + def test_single_include(self): + c = nose.config.Config() + c.configure(['program', '--include=b']) + self.assertEqual(len(c.include), 1) + b = c.include[0] + assert b.match('b') + assert not b.match('a') + + def test_plugins(self): + c = nose.config.Config() + assert c.plugins + c.plugins.begin() + + def test_testnames(self): + c = nose.config.Config() + c.configure(['program', 'foo', 'bar', 'baz.buz.biz']) + self.assertEqual(c.testNames, ['foo', 'bar', 'baz.buz.biz']) + + c = nose.config.Config(testNames=['foo']) + c.configure([]) + self.assertEqual(c.testNames, ['foo']) + + def test_where(self): + # we don't need to see our own warnings + warnings.filterwarnings(action='ignore', + category=DeprecationWarning, + module='nose.config') + + here = os.path.dirname(__file__) + support = os.path.join(here, 'support') + foo = os.path.abspath(os.path.join(support, 'foo')) + c = nose.config.Config() + c.configure(['program', '-w', foo, '-w', 'bar']) + self.assertEqual(c.workingDir, foo) + self.assertEqual(c.testNames, ['bar']) + + def test_progname_looks_like_option(self): + # issue #184 + c = nose.config.Config() + # the -v here is the program name, not an option + # this matters eg. with python -c "import nose; nose.main()" + c.configure(['-v', 'mytests']) + self.assertEqual(c.verbosity, 1) + + def test_pickle_empty(self): + c = nose.config.Config() + cp = pickle.dumps(c) + cc = pickle.loads(cp) + + def test_pickle_configured(self): + if 'java' in sys.version.lower(): + raise SkipTest("jython has no profiler plugin") + c = nose.config.Config(plugins=DefaultPluginManager()) + c.configure(['--with-doctest', '--with-coverage', '--with-profile', + '--with-id', '--attr=A', '--collect', '--all', + '--with-isolation', '-d', '--with-xunit', '--processes=2', + '--pdb']) + cp = pickle.dumps(c) + cc = pickle.loads(cp) + assert cc.plugins._plugins + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_config_defaults.rst b/unit_tests/test_config_defaults.rst new file mode 100644 index 0000000..944d370 --- /dev/null +++ b/unit_tests/test_config_defaults.rst @@ -0,0 +1,146 @@ + >>> from optparse import OptionParser + >>> import os + >>> from cStringIO import StringIO + + >>> import nose.config + +All commandline options to fall back to values configured in +configuration files. The configuration lives in a single section +("nosetests") in each configuration file. + + >>> support = os.path.join(os.path.dirname(__file__), "support", + ... "config_defaults") + + >>> def error(msg): + ... print "error: %s" % msg + + >>> def get_parser(): + ... parser = OptionParser() + ... parser.add_option( + ... "-v", "--verbose", + ... action="count", dest="verbosity", + ... default=1) + ... parser.add_option( + ... "--verbosity", action="store", dest="verbosity", + ... type="int") + ... return nose.config.ConfiguredDefaultsOptionParser(parser, + ... "nosetests", + ... error) + + >>> def parse(args, config_files): + ... argv = ["nosetests"] + list(args) + ... return get_parser().parseArgsAndConfigFiles(argv, config_files) + + +Options on the command line combine with the defaults from the config +files and the options' own defaults (here, -v adds 1 to verbosity of 3 +from a.cfg). Config file defaults take precedence over options' +defaults. + + >>> options, args = parse([], []) + >>> options.verbosity + 1 + >>> options, args = parse([], os.path.join(support, "a.cfg")) + >>> options.verbosity + 3 + >>> options, args = parse(["-v"], os.path.join(support, "a.cfg")) + >>> options.verbosity + 4 + +Command line arguments take precedence + + >>> options, args = parse(["--verbosity=7"], os.path.join(support, "a.cfg")) + >>> options.verbosity + 7 + +Where options appear in several config files, the last config file wins + + >>> files = [os.path.join(support, "b.cfg"), os.path.join(support, "a.cfg")] + >>> options, args = parse([], files) + >>> options.verbosity + 3 + + +Invalid values should cause an error specifically about configuration +files (not about a commandline option) + + >>> options, arguments = parse([], StringIO("""\ + ... [nosetests] + ... verbosity = spam + ... """)) + error: Error reading config file '<???>': option 'verbosity': invalid integer value: 'spam' + +Unrecognised option in nosetests config section + + >>> options, args = parse([], StringIO("[nosetests]\nspam=eggs\n")) + error: Error reading config file '<???>': no such option 'spam' + +If there were multiple config files, the error message tells us which +file contains the bad option name or value + + >>> options, args = parse([], [os.path.join(support, "a.cfg"), + ... os.path.join(support, "invalid_value.cfg"), + ... os.path.join(support, "b.cfg")]) + ... # doctest: +ELLIPSIS + error: Error reading config file '...invalid_value.cfg': option 'verbosity': invalid integer value: 'spam' + + +Invalid config files + +(file-like object) + + >>> options, args = parse([], StringIO("spam")) + error: Error reading config file '<???>': File contains no section headers. + file: <???>, line: 1 + 'spam' + +(filename) + + >>> options, args = parse([], os.path.join(support, "invalid.cfg")) + ... # doctest: +ELLIPSIS + error: Error reading config file '...invalid.cfg': File contains no section headers. + file: ...invalid.cfg, line: 1 + 'spam\n' + +(filenames, length == 1) + + >>> options, args = parse([], [os.path.join(support, "invalid.cfg")]) + ... # doctest: +ELLIPSIS + error: Error reading config file '...invalid.cfg': File contains no section headers. + file: ...invalid.cfg, line: 1 + 'spam\n' + +(filenames, length > 1) + +If there were multiple config files, the error message tells us which +file is bad + + >>> options, args = parse([], [os.path.join(support, "a.cfg"), + ... os.path.join(support, "invalid.cfg"), + ... os.path.join(support, "b.cfg")]) + ... # doctest: +ELLIPSIS + error: Error reading config file '...invalid.cfg': File contains no section headers. + file: ...invalid.cfg, line: 1 + 'spam\n' + + +Missing config files don't deserve an error or warning + +(filename) + + >>> options, args = parse([], os.path.join(support, "nonexistent.cfg")) + >>> print options.__dict__ + {'verbosity': 1} + +(filenames) + + >>> options, args = parse([], [os.path.join(support, "nonexistent.cfg")]) + >>> print options.__dict__ + {'verbosity': 1} + + +The same goes for missing config file section ("nosetests") + + >>> options, args = parse([], StringIO("[spam]\nfoo=bar\n")) + >>> print options.__dict__ + {'verbosity': 1} diff --git a/unit_tests/test_core.py b/unit_tests/test_core.py new file mode 100644 index 0000000..6fb6cef --- /dev/null +++ b/unit_tests/test_core.py @@ -0,0 +1,96 @@ +import os +import sys +import unittest +from cStringIO import StringIO +from optparse import OptionParser +import nose.core +from nose.config import Config +from nose.tools import set_trace +from mock import Bucket, MockOptParser + + +class NullLoader: + def loadTestsFromNames(self, names): + return unittest.TestSuite() + +class TestAPI_run(unittest.TestCase): + + def test_restore_stdout(self): + print "AHOY" + s = StringIO() + print s + stdout = sys.stdout + conf = Config(stream=s) + # set_trace() + print "About to run" + res = nose.core.run( + testLoader=NullLoader(), argv=['test_run'], env={}, config=conf) + print "Done running" + stdout_after = sys.stdout + self.assertEqual(stdout, stdout_after) + +class Undefined(object): + pass + +class TestUsage(unittest.TestCase): + + def test_from_directory(self): + usage_txt = nose.core.TestProgram.usage() + assert usage_txt.startswith('nose collects tests automatically'), ( + "Unexpected usage: '%s...'" % usage_txt[0:50].replace("\n", '\n')) + + def test_from_zip(self): + requested_data = [] + + # simulates importing nose from a zip archive + # with a zipimport.zipimporter instance + class fake_zipimporter(object): + + prefix = '' + zipfile = '<fake zipfile>' + + def get_data(self, path): + requested_data.append(path) + return "<usage>" + + existing_loader = getattr(nose, '__loader__', Undefined) + try: + nose.__loader__ = fake_zipimporter() + usage_txt = nose.core.TestProgram.usage() + self.assertEqual(usage_txt, '<usage>') + self.assertEqual(requested_data, ['nose%susage.txt' % os.sep]) + finally: + if existing_loader is not Undefined: + nose.__loader__ = existing_loader + else: + del nose.__loader__ + + def test_from_zip_with_prefix(self): + requested_data = [] + + # simulates importing nose from a zip archive + # with a zipimport.zipimporter instance + class fake_zipimporter(object): + + prefix = 'PREFIX' + zipfile = '<fake zipfile>' + + def get_data(self, path): + requested_data.append(path) + return "<usage>" + + existing_loader = getattr(nose, '__loader__', Undefined) + try: + nose.__loader__ = fake_zipimporter() + usage_txt = nose.core.TestProgram.usage() + self.assertEqual(usage_txt, '<usage>') + self.assertEqual(requested_data, + ['PREFIX%snose%susage.txt' % (os.sep, os.sep)]) + finally: + if existing_loader is not Undefined: + nose.__loader__ = existing_loader + else: + del nose.__loader__ + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_deprecated_plugin.py b/unit_tests/test_deprecated_plugin.py new file mode 100644 index 0000000..6c62481 --- /dev/null +++ b/unit_tests/test_deprecated_plugin.py @@ -0,0 +1,131 @@ +import unittest +from nose.config import Config +from nose.plugins.deprecated import Deprecated, DeprecatedTest +from nose.result import TextTestResult, _TextTestResult +from StringIO import StringIO +from optparse import OptionParser +try: + # 2.7+ + from unittest.runner import _WritelnDecorator +except ImportError: + from unittest import _WritelnDecorator + + +class TestDeprecatedPlugin(unittest.TestCase): + + def test_api_present(self): + sk = Deprecated() + sk.addOptions + sk.configure + sk.prepareTestResult + + def test_prepare_patches_result(self): + stream = _WritelnDecorator(StringIO()) + res = _TextTestResult(stream, 0, 1) + sk = Deprecated() + sk.prepareTestResult(res) + res._orig_addError + res._orig_printErrors + res._orig_wasSuccessful + res.deprecated + self.assertEqual( + res.errorClasses, + {DeprecatedTest: (res.deprecated, 'DEPRECATED', False)}) + + # result w/out print works too + res = unittest.TestResult() + sk = Deprecated() + sk.prepareTestResult(res) + res._orig_addError + res.deprecated + self.assertEqual( + res.errorClasses, + {DeprecatedTest: (res.deprecated, 'DEPRECATED', False)}) + + def test_patched_result_handles_deprecated(self): + res = unittest.TestResult() + sk = Deprecated() + sk.prepareTestResult(res) + + class TC(unittest.TestCase): + def test(self): + raise DeprecatedTest('deprecated me') + + test = TC('test') + test(res) + assert not res.errors, "Deprecated was not caught: %s" % res.errors + assert res.deprecated + assert res.deprecated[0][0] is test + + def test_patches_only_when_needed(self): + class NoPatch(unittest.TestResult): + def __init__(self): + self.errorClasses = {} + + res = NoPatch() + sk = Deprecated() + sk.prepareTestResult(res) + assert not hasattr(res, '_orig_addError'), \ + "Deprecated patched a result class it didn't need to patch" + + + def test_deprecated_output(self): + class TC(unittest.TestCase): + def test(self): + raise DeprecatedTest('deprecated me') + + stream = _WritelnDecorator(StringIO()) + res = _TextTestResult(stream, 0, 1) + sk = Deprecated() + sk.prepareTestResult(res) + + test = TC('test') + test(res) + assert not res.errors, "Deprecated was not caught: %s" % res.errors + assert res.deprecated + + res.printErrors() + out = stream.getvalue() + assert out + assert out.strip() == "D" + assert res.wasSuccessful() + + def test_deprecated_output_verbose(self): + + class TC(unittest.TestCase): + def test(self): + raise DeprecatedTest('deprecated me too') + + stream = _WritelnDecorator(StringIO()) + res = _TextTestResult(stream, 0, verbosity=2) + sk = Deprecated() + sk.prepareTestResult(res) + test = TC('test') + test(res) + assert not res.errors, "Deprecated was not caught: %s" % res.errors + assert res.deprecated + + res.printErrors() + out = stream.getvalue() + print out + assert out + + assert ' ... DEPRECATED' in out + assert 'deprecated me too' in out + + def test_enabled_by_default(self): + sk = Deprecated() + assert sk.enabled, "Deprecated was not enabled by default" + + def test_can_be_disabled(self): + parser = OptionParser() + sk = Deprecated() + sk.addOptions(parser) + options, args = parser.parse_args(['--no-deprecated']) + sk.configure(options, Config()) + assert not sk.enabled, \ + "Deprecated was not disabled by noDeprecated option" + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_doctest_error_handling.py b/unit_tests/test_doctest_error_handling.py new file mode 100644 index 0000000..fcdf388 --- /dev/null +++ b/unit_tests/test_doctest_error_handling.py @@ -0,0 +1,40 @@ +import os +import sys +import unittest +from nose.config import Config +from nose.plugins import doctests +from mock import Bucket + +class TestDoctestErrorHandling(unittest.TestCase): + + def setUp(self): + self._path = sys.path[:] + here = os.path.dirname(__file__) + testdir = os.path.join(here, 'support', 'doctest') + sys.path.insert(0, testdir) + p = doctests.Doctest() + p.can_configure = True + p.configure(Bucket(), Config()) + self.p = p + + def tearDown(self): + sys.path = self._path[:] + + def test_no_doctests_in_file(self): + p = self.p + mod = __import__('no_doctests') + loaded = [ t for t in p.loadTestsFromModule(mod) ] + assert not loaded, "Loaded %s from empty module" % loaded + + def test_err_doctests_raises_exception(self): + p = self.p + mod = __import__('err_doctests') + try: + loaded = [ t for t in p.loadTestsFromModule(mod) ] + except ValueError: + pass + else: + self.fail("Error doctests file did not raise ValueError") + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_doctest_munging.rst b/unit_tests/test_doctest_munging.rst new file mode 100644 index 0000000..fdbce64 --- /dev/null +++ b/unit_tests/test_doctest_munging.rst @@ -0,0 +1,105 @@ +doctest output normalization for plugin testing support +======================================================= + +nose.plugins.plugintest.run() is used for testing nose plugins in +doctests, so it needs to normalise nose output to remove information +that is not of interest to most plugin tests. + +We strip stack trace from formatted exceptions, using a regexp copied +from ``doctest.py``. That regexp always matches to the end of a +string, so we split on blank lines before running the regexp on each +resulting block. + + >>> from nose.plugins.plugintest import blankline_separated_blocks + >>> list(blankline_separated_blocks("spam\neggs\n\nfoo\nbar\n\n")) + ['spam\neggs\n\n', 'foo\nbar\n\n'] + >>> list(blankline_separated_blocks("spam\neggs\n\nfoo\nbar\n")) + ['spam\neggs\n\n', 'foo\nbar\n'] + >>> list(blankline_separated_blocks("spam\neggs\n\nfoo\nbar")) + ['spam\neggs\n\n', 'foo\nbar'] + >>> list(blankline_separated_blocks("")) + [] + >>> list(blankline_separated_blocks("spam")) + ['spam'] + +``remove_stack_traces`` removes the stack traces, replacing them with +an ellipsis. Note the first line here is chosen not to be "Traceback +(most recent...", since doctest would interpret that as meaning that +the example should raise an exception! + + >>> from nose.plugins.plugintest import remove_stack_traces + >>> print remove_stack_traces("""\ + ... Ceci n'est pas une traceback. + ... Traceback (most recent call last): + ... File "/some/dir/foomodule.py", line 15, in runTest + ... File "/some/dir/spam.py", line 293, in who_knows_what + ... AssertionError: something bad happened + ... """) + Ceci n'est pas une traceback. + Traceback (most recent call last): + ... + AssertionError: something bad happened + <BLANKLINE> + +Multiple tracebacks in an example are all replaced, as long as they're +separated by blank lines. + + >>> print remove_stack_traces("""\ + ... Ceci n'est pas une traceback. + ... Traceback (most recent call last): + ... File spam + ... AttributeError: eggs + ... + ... Traceback (most recent call last): + ... File eggs + ... AttributeError: spam + ... """) + Ceci n'est pas une traceback. + Traceback (most recent call last): + ... + AttributeError: eggs + <BLANKLINE> + Traceback (most recent call last): + ... + AttributeError: spam + <BLANKLINE> + + +Putting it together, ``munge_nose_output_for_doctest()`` removes stack +traces, removes test timings from "Ran n test(s)" output, and strips +trailing blank lines. + + >>> from nose.plugins.plugintest import munge_nose_output_for_doctest + >>> print munge_nose_output_for_doctest("""\ + ... runTest (foomodule.PassingTest) ... ok + ... runTest (foomodule.FailingTest) ... FAIL + ... + ... ====================================================================== + ... FAIL: runTest (foomodule.FailingTest) + ... ---------------------------------------------------------------------- + ... Traceback (most recent call last): + ... File "/some/dir/foomodule.py", line 15, in runTest + ... File "/some/dir/spam.py", line 293, in who_knows_what + ... AssertionError: something bad happened + ... + ... ---------------------------------------------------------------------- + ... Ran 1 test in 0.082s + ... + ... FAILED (failures=1) + ... + ... + ... """) + runTest (foomodule.PassingTest) ... ok + runTest (foomodule.FailingTest) ... FAIL + <BLANKLINE> + ====================================================================== + FAIL: runTest (foomodule.FailingTest) + ---------------------------------------------------------------------- + Traceback (most recent call last): + ... + AssertionError: something bad happened + <BLANKLINE> + ---------------------------------------------------------------------- + Ran 1 test in ...s + <BLANKLINE> + FAILED (failures=1) diff --git a/unit_tests/test_id_plugin.py b/unit_tests/test_id_plugin.py new file mode 100644 index 0000000..d70fc07 --- /dev/null +++ b/unit_tests/test_id_plugin.py @@ -0,0 +1,20 @@ +import unittest +from nose.config import Config +from nose.plugins.builtin import TestId +import mock + +class TestTestIdPlugin(unittest.TestCase): + + def test_default_id_file_is_in_working_dir(self): + tid = TestId() + c = Config() + opt = mock.Bucket() + opt.testIdFile = '.noseids' + tid.configure(opt, c) + print tid.idfile + assert tid.idfile.startswith(c.workingDir), \ + "%s is not under %s" % (tid.idfile, c.workingDir) + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_importer.py b/unit_tests/test_importer.py new file mode 100644 index 0000000..91de8a9 --- /dev/null +++ b/unit_tests/test_importer.py @@ -0,0 +1,55 @@ +import os +import sys +import unittest +import nose.config +import nose.importer + +class TestImporter(unittest.TestCase): + + def setUp(self): + self.p = sys.path[:] + + def tearDown(self): + sys.path = self.p[:] + + def test_add_paths(self): + where = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'support')) + foo = os.path.join(where, 'foo') + foobar = os.path.join(foo, 'bar') + nose.importer.add_path(foobar) + + assert not foobar in sys.path + assert not foo in sys.path + assert where in sys.path + assert sys.path[0] == where, "%s first should be %s" % (sys.path, where) + + def test_import(self): + where = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'support')) + foo = os.path.join(where, 'foo') + foobar = os.path.join(foo, 'bar') + + imp = nose.importer.Importer() + mod = imp.importFromDir(foobar, 'buz') + assert where in sys.path + # buz has an intra-package import that sets boodle + assert mod.boodle + + def test_module_no_file(self): + where = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'support')) + foo = os.path.join(where, 'foo') + foobar = os.path.join(foo, 'bar') + + # something that's not a real module and has no __file__ + sys.modules['buz'] = 'Whatever' + + imp = nose.importer.Importer() + mod = imp.importFromDir(foobar, 'buz') + assert where in sys.path + # buz has an intra-package import that sets boodle + assert mod.boodle + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_inspector.py b/unit_tests/test_inspector.py new file mode 100644 index 0000000..be18014 --- /dev/null +++ b/unit_tests/test_inspector.py @@ -0,0 +1,140 @@ +import inspect +import sys +import textwrap +import tokenize +import traceback +import unittest + +try: + from cStringIO import StringIO +except ImportError: + from StringIO import StringIO + +from nose.inspector import inspect_traceback, Expander, tbsource + +class TestExpander(unittest.TestCase): + + def test_simple_inspect_frame(self): + src = StringIO('a > 2') + lc = { 'a': 2} + gb = {} + exp = Expander(lc, gb) + + tokenize.tokenize(src.readline, exp) + # print "'%s'" % exp.expanded_source + self.assertEqual(exp.expanded_source.strip(), '2 > 2') + + def test_inspect_traceback_continued(self): + a = 6 + out = '' + try: + assert a < 1, \ + "This is a multline expression" + except AssertionError: + et, ev, tb = sys.exc_info() + out = inspect_traceback(tb) + # print "'%s'" % out.strip() + self.assertEqual(out.strip(), + '>> assert 6 < 1, \\\n ' + '"This is a multline expression"') + + def test_get_tb_source_simple(self): + # no func frame + try: + assert False + except AssertionError: + et, ev, tb = sys.exc_info() + lines, lineno = tbsource(tb, 1) + self.assertEqual(''.join(lines).strip(), 'assert False') + self.assertEqual(lineno, 0) + + def test_get_tb_source_func(self): + # func frame + def check_even(n): + print n + assert n % 2 == 0 + try: + check_even(1) + except AssertionError: + et, ev, tb = sys.exc_info() + lines, lineno = tbsource(tb) + out = textwrap.dedent(''.join(lines)) + self.assertEqual(out, + ' print n\n' + ' assert n % 2 == 0\n' + 'try:\n' + ' check_even(1)\n' + 'except AssertionError:\n' + ' et, ev, tb = sys.exc_info()\n' + ) + self.assertEqual(lineno, 3) + + # FIXME 2 func frames + + def test_pick_tb_lines(self): + try: + val = "fred" + def defred(n): + return n.replace('fred','') + assert defred(val) == 'barney', "Fred - fred != barney?" + except AssertionError: + et, ev, tb = sys.exc_info() + out = inspect_traceback(tb) + # print "'%s'" % out.strip() + self.assertEqual(out.strip(), + ">> assert defred('fred') == 'barney', " + '"Fred - fred != barney?"') + try: + val = "fred" + def defred(n): + return n.replace('fred','') + assert defred(val) == 'barney', \ + "Fred - fred != barney?" + def refred(n): + return n + 'fred' + except AssertionError: + et, ev, tb = sys.exc_info() + out = inspect_traceback(tb) + #print "'%s'" % out.strip() + self.assertEqual(out.strip(), + ">> assert defred('fred') == 'barney', " + '\\\n "Fred - fred != barney?"') + + S = {'setup':1} + def check_even(n, nn): + assert S['setup'] + print n, nn + assert n % 2 == 0 or nn % 2 == 0 + try: + check_even(1, 3) + except AssertionError: + et, ev, tb = sys.exc_info() + out = inspect_traceback(tb) + print "'%s'" % out.strip() + self.assertEqual(out.strip(), + "assert {'setup': 1}['setup']\n" + " print 1, 3\n" + ">> assert 1 % 2 == 0 or 3 % 2 == 0") + + def test_bug_95(self): + """Test that inspector can handle multi-line docstrings""" + try: + """docstring line 1 + docstring line 2 + """ + a = 2 + assert a == 4 + except AssertionError: + et, ev, tb = sys.exc_info() + out = inspect_traceback(tb) + print "'%s'" % out.strip() + self.assertEqual(out.strip(), + "2 = 2\n" + ">> assert 2 == 4") + +if __name__ == '__main__': + #import logging + #logging.basicConfig() + #logging.getLogger('').setLevel(10) + unittest.main() + diff --git a/unit_tests/test_isolation_plugin.py b/unit_tests/test_isolation_plugin.py new file mode 100644 index 0000000..497fe6d --- /dev/null +++ b/unit_tests/test_isolation_plugin.py @@ -0,0 +1,2 @@ +def test_lint(): + import nose.plugins.isolate diff --git a/unit_tests/test_issue155.rst b/unit_tests/test_issue155.rst new file mode 100644 index 0000000..450866a --- /dev/null +++ b/unit_tests/test_issue155.rst @@ -0,0 +1,46 @@ +AttributeError from a method call should not be hidden by exception +handling intended to ignore the case where the method is not present. + + >>> import sys + >>> import unittest + + >>> import nose.case + >>> import nose.proxy + >>> import nose.result + >>> import nose.util + >>> import nose.plugins.doctests + + >>> class Result(nose.result.TextTestResult): + ... + ... def afterTest(self, test): + ... raise AttributeError("bug in Result") + ... + ... def beforeTest(self, test): + ... raise AttributeError("bug in Result") + + >>> class TestCase(unittest.TestCase): + ... + ... def address(self): + ... raise AttributeError("bug in TestCase") + ... + ... def runTest(self): + ... pass + + + >>> test = nose.case.Test(TestCase()) + >>> result = Result(sys.stdout, True, 1) + >>> proxy = nose.proxy.ResultProxy(result, test) + >>> proxy.beforeTest(test) + Traceback (most recent call last): + AttributeError: bug in Result + >>> proxy.afterTest(test) + Traceback (most recent call last): + AttributeError: bug in Result + + >>> test.address() + Traceback (most recent call last): + AttributeError: bug in TestCase + + >>> nose.util.test_address(test) + Traceback (most recent call last): + AttributeError: bug in TestCase diff --git a/unit_tests/test_issue270.rst b/unit_tests/test_issue270.rst new file mode 100644 index 0000000..b509bfe --- /dev/null +++ b/unit_tests/test_issue270.rst @@ -0,0 +1,24 @@ +Multiprocess test collection from packages +------------------------------------------ + +Tests that the multiprocess plugin correctly collects tests from packages + + >>> import os + >>> from nose.plugins.plugintest import run_buffered as run + >>> from nose.plugins.multiprocess import MultiProcess + >>> support = os.path.join(os.path.dirname(__file__), 'support') + >>> issue270 = os.path.join(support, 'issue270') + +The test package has a package-level fixture, which causes the entire package +to be dispatched to a multiprocess worker. Tests are still collected and run +properly. + + >>> argv = [__file__, '-v', '--processes=2', issue270] + >>> run(argv=argv, plugins=[MultiProcess()]) + issue270.foo_test.Foo_Test.test_bar ... ok + issue270.foo_test.Foo_Test.test_foo ... ok + <BLANKLINE> + ---------------------------------------------------------------------- + Ran 2 tests in ...s + <BLANKLINE> + OK diff --git a/unit_tests/test_issue270_fixtures.py b/unit_tests/test_issue270_fixtures.py new file mode 100644 index 0000000..d18abba --- /dev/null +++ b/unit_tests/test_issue270_fixtures.py @@ -0,0 +1,11 @@ +from nose.plugins.skip import SkipTest +from nose.plugins.multiprocess import MultiProcess + +def setup_module(): + try: + import multiprocessing + if 'active' in MultiProcess.status: + raise SkipTest("Multiprocess plugin is active. Skipping tests of " + "plugin itself.") + except ImportError: + raise SkipTest("multiprocessing module not available") diff --git a/unit_tests/test_issue_006.py b/unit_tests/test_issue_006.py new file mode 100644 index 0000000..d04c174 --- /dev/null +++ b/unit_tests/test_issue_006.py @@ -0,0 +1,31 @@ +import os +import unittest + +class TestIssue006(unittest.TestCase): + def test_load_nested_generator(self): + from nose.config import Config + from nose.loader import TestLoader + + where = os.path.abspath(os.path.join(os.path.dirname(__file__), + 'support', 'issue006')) + l = TestLoader() + testmod = iter(l.loadTestsFromName(where)).next() + print testmod + testmod.setUp() + + testcase = iter(testmod).next() + expect = [ + ['tests.Test1.test_nested_generator'], + ['tests.Test1.test_nested_generator_mult(1,)', + 'tests.Test1.test_nested_generator_mult(2,)', + 'tests.Test1.test_nested_generator_mult(3,)'], + ['tests.Test1.test_normal_generator(1,)', + 'tests.Test1.test_normal_generator(2,)'] + ] + for test in testcase: + tests = map(str, test) + print tests + self.assertEqual(tests, expect.pop(0)) + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_issue_064.py b/unit_tests/test_issue_064.py new file mode 100644 index 0000000..5bf1ca8 --- /dev/null +++ b/unit_tests/test_issue_064.py @@ -0,0 +1,2 @@ +def test_is_generator_alias(): + from nose.util import is_generator, isgenerator diff --git a/unit_tests/test_issue_065.py b/unit_tests/test_issue_065.py new file mode 100644 index 0000000..425f197 --- /dev/null +++ b/unit_tests/test_issue_065.py @@ -0,0 +1,20 @@ +import os +from nose import loader +import unittest + +support = os.path.join(os.path.dirname(__file__), 'support') + +class TestIssue065(unittest.TestCase): + def test_dict_wrapper_instance_not_loaded(self): + wd = os.path.join(support, 'issue065') + l = loader.TestLoader() #workingDir=wd) + tests = l.loadTestsFromDir(wd) + tests = list(tests) + self.assertEqual(len(tests), 1) + tests = list(tests[0]) + assert not tests, "Tests were loaded from module with no tests" + + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_issue_100.rst b/unit_tests/test_issue_100.rst new file mode 100644 index 0000000..ea5d9d1 --- /dev/null +++ b/unit_tests/test_issue_100.rst @@ -0,0 +1,12 @@ +This is a test of the bug reported in issue 100: test.address() fails +for a case defined in a doctest. + + >>> import unittest + >>> import nose.case + >>> class SimpleTest(unittest.TestCase): + ... + ... def runTest(self): + ... pass + >>> test = nose.case.Test(SimpleTest()) + >>> test.address() + (None, '__builtin__', 'SimpleTest.runTest') diff --git a/unit_tests/test_issue_101.py b/unit_tests/test_issue_101.py new file mode 100644 index 0000000..cccfc42 --- /dev/null +++ b/unit_tests/test_issue_101.py @@ -0,0 +1,24 @@ +import sys +import unittest +import warnings +from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin + +class TestErrorClassWithStringException(unittest.TestCase): + + def test_string_exception_not_masked(self): + + class X(Exception): + pass + + class EP(ErrorClassPlugin): + xes = ErrorClass(X, label='XXX', isfailure=True) + + warnings.filterwarnings(action='ignore', category=DeprecationWarning) + try: + + raise "oh no!" + except: + exc = sys.exc_info() + + ep = EP() + self.assertEqual(ep.addError(None, exc), None) diff --git a/unit_tests/test_issue_159.rst b/unit_tests/test_issue_159.rst new file mode 100644 index 0000000..5ab7964 --- /dev/null +++ b/unit_tests/test_issue_159.rst @@ -0,0 +1,6 @@ + >>> from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin + >>> class X(Exception): + ... pass + >>> xes = ErrorClass(X, label='XXX') + Traceback (most recent call last): + TypeError: 'isfailure' is a required named argument for ErrorClass diff --git a/unit_tests/test_issue_227.py b/unit_tests/test_issue_227.py new file mode 100644 index 0000000..140862e --- /dev/null +++ b/unit_tests/test_issue_227.py @@ -0,0 +1,12 @@ +# -*- encoding: utf-8 -*- +from nose.plugins.skip import SkipTest +import sys + + +def setup(): + if 'java' in sys.version.lower(): + raise SkipTest("StringIO() in jython can't handle unicode") + + +def test_unicode(): + print u'b\u00f6y' diff --git a/unit_tests/test_issue_230.py b/unit_tests/test_issue_230.py new file mode 100644 index 0000000..41a717b --- /dev/null +++ b/unit_tests/test_issue_230.py @@ -0,0 +1,21 @@ +import os +import unittest + +class TestIssue230(unittest.TestCase): + + def test_generator_yield_value(self): + from nose.loader import TestLoader + + def test(): + pass + def gen(): + yield test + + loader = TestLoader() + suite = loader.loadTestsFromGenerator(gen, module=None) + testcase = iter(suite).next() + self.assertEqual(testcase.test.test, test) + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_lazy_suite.py b/unit_tests/test_lazy_suite.py new file mode 100644 index 0000000..79cdcf0 --- /dev/null +++ b/unit_tests/test_lazy_suite.py @@ -0,0 +1,21 @@ +import unittest +from nose.suite import LazySuite +from helpers import iter_compat + +def gen(): + for x in range(0, 10): + yield TestLazySuite.TC('test') + +class TestLazySuite(unittest.TestCase): + + class TC(unittest.TestCase): + def test(self): + pass + + def test_basic_iteration(self): + ls = LazySuite(gen) + for t in iter_compat(ls): + assert isinstance(t, unittest.TestCase) + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_loader.py b/unit_tests/test_loader.py new file mode 100644 index 0000000..08f0686 --- /dev/null +++ b/unit_tests/test_loader.py @@ -0,0 +1,501 @@ +import imp +import os +import sys +import unittest +from nose.loader import TestLoader as Loader + +from nose import util, loader, selector # so we can set mocks +import nose.case + + +def safepath(p): + """Helper function to make cross-platform safe paths + """ + return p.replace('/', os.sep) + + +def mods(): + # + # Setting up the fake modules that we'll use for testing + # test loading + # + M = {} + M['test_module'] = imp.new_module('test_module') + M['module'] = imp.new_module('module') + M['package'] = imp.new_module('package') + M['package'].__path__ = [safepath('/package')] + M['package'].__file__ = safepath('/package/__init__.py') + M['package.subpackage'] = imp.new_module('package.subpackage') + M['package'].subpackage = M['package.subpackage'] + M['package.subpackage'].__path__ = [safepath('/package/subpackage')] + M['package.subpackage'].__file__ = safepath( + '/package/subpackage/__init__.py') + M['test_module_with_generators'] = imp.new_module( + 'test_module_with_generators') + M['test_module_with_metaclass_tests'] = imp.new_module( + 'test_module_with_metaclass_tests') + + # a unittest testcase subclass + class TC(unittest.TestCase): + def runTest(self): + pass + + class TC2(unittest.TestCase): + def runTest(self): + pass + + # test class that uses a metaclass + class TCType(type): + def __new__(cls, name, bases, dct): + return type.__new__(cls, name, bases, dct) + class TestMetaclassed(object): + __metaclass__ = TCType + def test_one(self): + pass + def test_two(self): + pass + + # test function + def test_func(): + pass + + # non-testcase-subclass test class + class TestClass: + + def test_func(self): + pass + + def test_generator_inline(self): + """docstring for test generator inline + """ + def test_odd(v): + assert v % 2 + for i in range(0, 4): + yield test_odd, i + + def test_generator_method(self): + """docstring for test generator method + """ + for i in range(0, 4): + yield self.try_odd, i + + def test_generator_method_name(self): + """docstring for test generator method name + """ + for i in range(0, 4): + yield 'try_odd', i + + def try_odd(self, v): + assert v % 2 + + # test function that is generator + def test_func_generator(): + """docstring for test func generator + """ + def test_odd(v): + assert v % 2 + for i in range(0, 4): + yield test_odd, i + + def test_func_generator_name(): + """docstring for test func generator name + """ + for i in range(0, 4): + yield 'try_odd', i + + def try_odd(v): + assert v % 2 + + M['nose'] = nose + M['__main__'] = sys.modules['__main__'] + M['test_module'].TC = TC + TC.__module__ = 'test_module' + M['test_module'].test_func = test_func + test_func.__module__ = 'test_module' + M['module'].TC2 = TC2 + TC2.__module__ = 'module' + M['test_module_with_generators'].TestClass = TestClass + TestClass.__module__ = 'test_module_with_generators' + M['test_module_with_generators'].test_func_generator = test_func_generator + M['test_module_with_generators'].test_func_generator_name = \ + test_func_generator_name + M['test_module_with_generators'].try_odd = try_odd + test_func_generator_name.__module__ = 'test_module_with_generators' + test_func_generator.__module__ = 'test_module_with_generators' + try_odd.__module__ = 'test_module_with_generators' + M['test_module_with_metaclass_tests'].TestMetaclassed = TestMetaclassed + TestMetaclassed.__module__ = 'test_module_with_metaclass_tests' + del TC + del TC2 + del TestMetaclassed + # del TCType + del test_func + del TestClass + del test_func_generator + return M + +M = mods() + +# Mock the filesystem access so we don't have to maintain +# a support dir with real files +_listdir = os.listdir +_isdir = os.path.isdir +_isfile = os.path.isfile +_exists = os.path.exists +_import = __import__ + + +# +# Mock functions +# +def mock_listdir(path): + if path.endswith(safepath('/package')): + return ['.', '..', 'subpackage', '__init__.py'] + elif path.endswith(safepath('/subpackage')): + return ['.', '..', '__init__.py'] + elif path.endswith(safepath('/sort')): + return ['.', '..', 'lib', 'src', 'test', 'test_module.py', 'a_test'] + return ['.', '..', 'test_module.py', 'module.py'] + + +def mock_isdir(path): + print "is dir '%s'?" % path + paths = map(safepath, [ + '/a/dir/path', '/package', + '/package/subpackage', '/sort/lib', + '/sort/src', '/sort/a_test', + '/sort/test', '/sort']) + paths = paths + map(os.path.abspath, paths) + if path in paths: + return True + return False + + +def mock_isfile(path): + if path in ('.', '..'): + return False + return '.' in path + + +def mock_exists(path): + print "exists '%s'?" % path + paths = map(safepath, [ + '/package', '/package/__init__.py', '/package/subpackage', + '/package/subpackage/__init__.py' + ]) + paths = paths + map(os.path.abspath, paths) + return path in paths + + +def mock_import(modname, gl=None, lc=None, fr=None): + if gl is None: + gl = M + if lc is None: + lc = locals() + try: + mod = sys.modules[modname] + except KeyError: + pass + try: + pname = [] + for part in modname.split('.'): + pname.append(part) + mname = '.'.join(pname) + mod = gl[mname] + sys.modules[mname] = mod + return mod + except KeyError: + raise ImportError("No '%s' in fake module list" % modname) + + +class MockImporter: + def importFromPath(self, path, fqname): + try: + m = M[fqname] + except KeyError: + raise ImportError(fqname) + sys.modules[fqname] = m + return m + +# +# Tests +# +class TestTestLoader(unittest.TestCase): + + def setUp(self): + os.listdir = mock_listdir + loader.op_isdir = selector.op_isdir = os.path.isdir = mock_isdir + loader.op_isfile = selector.op_isfile = os.path.isfile = mock_isfile + selector.op_exists = os.path.exists = mock_exists + util.__import__ = mock_import + self.l = Loader(importer=MockImporter())#, context=MockContext) + + def tearDown(self): + os.listdir = _listdir + loader.op_isdir = selector.op_isdir = os.path.isdir = _isdir + loader.op_isfile = selector.op_isfile = os.path.isfile = _isfile + selector.op_exists = os.path.exists = _exists + util.__import__ = _import + + def test_lint(self): + """Test that main API functions exist + """ + l = self.l + l.loadTestsFromTestCase + l.loadTestsFromModule + l.loadTestsFromName + l.loadTestsFromNames + + def test_load_from_name_dir_abs(self): + print "load from name dir" + l = self.l + suite = l.loadTestsFromName(safepath('/a/dir/path')) + tests = [t for t in suite] + self.assertEqual(len(tests), 1) + + def test_load_from_name_module_filename(self): + print "load from name module filename" + l = self.l + suite = l.loadTestsFromName('test_module.py') + tests = [t for t in suite] + assert tests + + def test_load_from_name_module(self): + print "load from name module" + l = self.l + suite = l.loadTestsFromName('test_module') + tests = [t for t in suite] + assert tests + + def test_load_from_name_nontest_module(self): + print "load from name nontest module" + l = self.l + suite = l.loadTestsFromName('module') + tests = [t for t in suite] + assert tests + + def test_load_from_name_method(self): + print "load from name method" + res = unittest.TestResult() + l = self.l + suite = l.loadTestsFromName(':TC.runTest') + tests = [t for t in suite] + assert tests + for test in tests: + test(res) + assert res.errors, \ + "Expected a ValueError for unresolvable test name, got none" + + def test_load_from_name_module_class(self): + print "load from name module class" + l = self.l + suite = l.loadTestsFromName('test_module:TC') + tests = [t for t in suite] + print tests + assert tests + assert len(tests) == 1, \ + "Should have loaded 1 test, but got %s" % tests + + # the item in tests is a suite, we want to check that all of + # the members of the suite are wrapped -- though this is really + # a suite test and doesn't belong here.. + assert filter(lambda t: isinstance(t, nose.case.Test), tests[0]) + + def test_load_from_name_module_func(self): + print "load from name module func" + l = self.l + suite = l.loadTestsFromName('test_module:test_func') + tests = [t for t in suite] + assert tests + assert len(tests) == 1, \ + "Should have loaded 1 test, but got %s" % tests + assert isinstance(tests[0].test, nose.case.FunctionTestCase), \ + "Expected FunctionTestCase not %s" % tests[0].test + + def test_load_from_name_module_method(self): + print "load from name module method" + l = self.l + suite = l.loadTestsFromName('test_module:TC.runTest') + tests = [t for t in suite] + assert tests + assert len(tests) == 1, \ + "Should have loaded 1 test, but got %s" % tests + + def test_load_from_name_module_missing_class(self): + print "load from name module missing class" + res = unittest.TestResult() + l = self.l + suite = l.loadTestsFromName('test_module:TC2') + tests = [t for t in suite] + assert len(tests) == 1, \ + "Should have loaded 1 test, but got %s" % tests + tests[0](res) + assert res.errors, "Expected missing class test to raise exception" + + def test_load_from_name_module_missing_func(self): + print "load from name module missing func" + res = unittest.TestResult() + l = self.l + suite = l.loadTestsFromName('test_module:test_func2') + tests = [t for t in suite] + assert len(tests) == 1, \ + "Should have loaded 0 test, but got %s" % tests + tests[0](res) + assert res.errors, "Expected missing func test to raise exception" + + def test_load_from_name_module_missing_method(self): + print "load from name module missing method" + res = unittest.TestResult() + l = self.l + suite = l.loadTestsFromName('test_module:TC.testThat') + tests = [t for t in suite] + assert len(tests) == 1, \ + "Should have loaded 1 test, but got %s" % tests + tests[0](res) + assert res.errors, "Expected missing method test to raise exception" + + def test_load_from_name_missing_module(self): + print "load from name missing module" + res = unittest.TestResult() + l = self.l + suite = l.loadTestsFromName('other_test_module') + tests = [t for t in suite] + assert len(tests) == 1, \ + "Should have loaded 1 test, but got %s" % tests + tests[0](res) + assert res.errors, "Expected missing module test to raise exception" + + def test_cases_from_testcase_are_wrapped(self): + print "cases from testcase are wrapped" + test_module = M['test_module'] + l = self.l + suite = l.loadTestsFromTestCase(test_module.TC) + print suite + tests = [t for t in suite] + for test in tests: + assert isinstance(test, nose.case.Test), \ + "Test %r is not a test wrapper" % test + + def test_load_test_func(self): + print "load test func" + l = self.l + suite = l.loadTestsFromName('test_module') + tests = [t for t in suite] + self.assertEqual(len(tests), 2, "Wanted 2 tests, got %s" % tests) + assert filter(lambda t: isinstance(t, nose.case.Test), tests) + print tests + class_tests = tests[0] + for t in class_tests: + print "class test: ", t + func_tests = tests[1:] + assert class_tests, \ + "Expected class suite got %s" % class_tests + assert len(func_tests) == 1, \ + "Expected 1 func test got %s" % func_tests + for test in class_tests: + assert isinstance(test.test, unittest.TestCase), \ + "Expected TestCase npt %s" % tests[0].test + for test in func_tests: + assert isinstance(test.test, nose.case.FunctionTestCase), \ + "Expected FunctionTestCase not %s" % tests[1].test + + def test_load_from_name_package_root_path(self): + print "load from name package root path" + l = self.l + suite = l.loadTestsFromName(safepath('/package')) + print suite + tests = [t for t in suite] + assert len(tests) == 1, "Expected one test, got %s" % tests + tests = list(tests[0]) + assert not tests, "The full test list %s was not empty" % tests + + def test_load_from_name_subpackage_safepath(self): + print "load from name subpackage path" + l = self.l + suite = l.loadTestsFromName(safepath('/package/subpackage')) + print suite + tests = [t for t in suite] + assert len(tests) == 0, "Expected no tests, got %s" % tests + + def test_load_metaclass_customized_classes(self): + print "load metaclass-customized classes" + test_module_with_generators = M['test_module_with_metaclass_tests'] + l = self.l + suite = l.loadTestsFromModule(test_module_with_generators) + tc = [t for t in suite][0] + tc_methods = [m for m in tc] + self.assertEqual(len(tc_methods), 2) + + def test_load_generators(self): + print "load generators" + test_module_with_generators = M['test_module_with_generators'] + l = self.l + suite = l.loadTestsFromModule(test_module_with_generators) + tests = [t for t in suite] + + for t in tests: + print "test", t + assert isinstance(t, unittest.TestSuite), \ + "Test %s is not a suite" % t + + # the first item is a class, with both normal and generator methods + count = 0 + cl_tests = [t for t in tests[0]] + print "class tests", cl_tests + normal, gens = cl_tests[0], cl_tests[1:] + assert isinstance(normal, nose.case.Test), \ + "Expected a test case but got %s" % normal + for gen in gens: + assert isinstance(gen, unittest.TestSuite), \ + "Expected a generator test suite, but got %s" % gen + count = 0 + for t in gen: + print "generated test %s" % t + print t.shortDescription() + assert isinstance(t, nose.case.Test), \ + "Test %s is not a test?" % t + count += 1 + self.assertEqual(count, 4, "Expected to generate 4 tests, but " + "got %s from %s" % (count, gen)) + + # 2nd item is generated from test_func_generator + count = 0 + for t in tests[1]: + print "generated test %s" % t + print t.shortDescription() + assert isinstance(t, nose.case.Test), \ + "Test %s is not a Test?" % t + assert isinstance(t.test, nose.case.FunctionTestCase), \ + "Test %s is not a FunctionTestCase" % t.test + assert 'test_func_generator' in str(t), \ + "Bad str val '%s' for test" % str(t) + assert 'docstring for test func generator' \ + in t.shortDescription(), \ + "Bad shortDescription '%s' for test %s" % \ + (t.shortDescription(), t) + count += 1 + assert count == 4, \ + "Expected to generate 4 tests, but got %s" % count + + count = 0 + for t in tests[2]: + print "generated test %s" % t + print t.shortDescription() + assert isinstance(t, nose.case.Test), \ + "Test %s is not a Test?" % t + assert isinstance(t.test, nose.case.FunctionTestCase), \ + "Test %s is not a FunctionTestCase" % t.test + assert 'test_func_generator_name' in str(t), \ + "Bad str val '%s' for test" % str(t) + assert 'docstring for test func generator name' \ + in t.shortDescription(), \ + "Bad shortDescription '%s' for test %s" % \ + (t.shortDescription(), t) + count += 1 + assert count == 4, \ + "Expected to generate 4 tests, but got %s" % count + +if __name__ == '__main__': + #import logging + #logging.basicConfig(level=logging.DEBUG) + unittest.main() diff --git a/unit_tests/test_logcapture_plugin.py b/unit_tests/test_logcapture_plugin.py new file mode 100644 index 0000000..19363c8 --- /dev/null +++ b/unit_tests/test_logcapture_plugin.py @@ -0,0 +1,201 @@ +import sys +from optparse import OptionParser +from nose.config import Config +from nose.plugins.logcapture import LogCapture +from nose.tools import eq_ +import logging +from logging import StreamHandler +import unittest + +if sys.version_info >= (2, 7): + py27 = True +else: + py27 = False + +class TestLogCapturePlugin(object): + + def test_enabled_by_default(self): + c = LogCapture() + assert c.enabled + + def test_default_options(self): + c = LogCapture() + parser = OptionParser() + c.addOptions(parser) + + options, args = parser.parse_args(['default_options']) + c.configure(options, Config()) + assert c.enabled + eq_(LogCapture.logformat, c.logformat) + eq_(LogCapture.clear, c.clear) + eq_(LogCapture.filters, c.filters) + + def test_disable_option(self): + parser = OptionParser() + c = LogCapture() + c.addOptions(parser) + options, args = parser.parse_args(['test_can_be_disabled_long', + '--nologcapture']) + c.configure(options, Config()) + assert not c.enabled + + env = {'NOSE_NOLOGCAPTURE': 1} + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, env) + options, args = parser.parse_args(['test_can_be_disabled']) + c.configure(options, Config()) + assert not c.enabled + + def test_logging_format_option(self): + env = {'NOSE_LOGFORMAT': '++%(message)s++'} + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, env) + options, args = parser.parse_args(['logging_format']) + c.configure(options, Config()) + eq_('++%(message)s++', c.logformat) + + def test_logging_datefmt_option(self): + env = {'NOSE_LOGDATEFMT': '%H:%M:%S'} + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, env) + options, args = parser.parse_args(['logging_datefmt']) + c.configure(options, Config()) + eq_('%H:%M:%S', c.logdatefmt) + + def test_captures_logging(self): + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, {}) + options, args = parser.parse_args([]) + c.configure(options, Config()) + c.start() + log = logging.getLogger("foobar.something") + log.debug("Hello") + c.end() + eq_(1, len(c.handler.buffer)) + eq_("Hello", c.handler.buffer[0].msg) + + def test_clears_all_existing_log_handlers(self): + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, {}) + options, args = parser.parse_args(['--logging-clear-handlers']) + c.configure(options, Config()) + eq_(c.clear, True) + + def mktest(): + class TC(unittest.TestCase): + def runTest(self): + pass + test = TC() + return test + + logging.getLogger().addHandler(StreamHandler(sys.stdout)) + log = logging.getLogger("dummy") + log.addHandler(StreamHandler(sys.stdout)) + + c.start() + c.beforeTest(mktest()) + c.end() + + + if py27: + expect = ["<class 'nose.plugins.logcapture.MyMemoryHandler'>"] + else: + expect = ['nose.plugins.logcapture.MyMemoryHandler'] + eq_([str(c.__class__) for c in logging.getLogger().handlers], + expect) + eq_([str(c.__class__) for c in logging.getLogger("dummy").handlers], + []) + + def test_custom_formatter(self): + c = LogCapture() + c.logformat = '++%(message)s++' + c.start() + log = logging.getLogger("foobar.something") + log.debug("Hello") + c.end() + records = c.formatLogRecords() + eq_(1, len(records)) + eq_("++Hello++", records[0]) + + def test_logging_filter(self): + env = {'NOSE_LOGFILTER': 'foo,bar'} + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, env) + options, args = parser.parse_args(['foo']) + print options, args + c.configure(options, Config()) + c.start() + for name in ['foobar.something', 'foo', 'foo.x', 'abara', 'bar.quux']: + log = logging.getLogger(name) + log.info("Hello %s" % name) + c.end() + records = c.formatLogRecords() + eq_(3, len(records)) + assert records[0].startswith('foo:'), records[0] + assert records[1].startswith('foo.x:'), records[1] + assert records[2].startswith('bar.quux:'), records[2] + + def test_logging_filter_exclude(self): + env = {'NOSE_LOGFILTER': '-foo,-bar'} + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, env) + options, args = parser.parse_args(['foo']) + print options, args + c.configure(options, Config()) + c.start() + for name in ['foobar.something', 'foo', 'foo.x', 'abara', 'bar.quux']: + log = logging.getLogger(name) + log.info("Hello %s" % name) + c.end() + records = c.formatLogRecords() + eq_(2, len(records)) + assert records[0].startswith('foobar.something:'), records[0] + assert records[1].startswith('abara:'), records[1] + + def test_logging_filter_exclude_and_include(self): + env = {'NOSE_LOGFILTER': 'foo,-foo.bar'} + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, env) + options, args = parser.parse_args(['foo']) + print options, args + c.configure(options, Config()) + c.start() + for name in ['foo.yes', 'foo.bar', 'foo.bar.no', 'blah']: + log = logging.getLogger(name) + log.info("Hello %s" % name) + c.end() + records = c.formatLogRecords() + eq_(1, len(records)) + assert records[0].startswith('foo.yes:'), records[0] + + def test_unicode_messages_handled(self): + msg = u'Ivan Krsti\u0107' + c = LogCapture() + parser = OptionParser() + c.addOptions(parser, {}) + options, args = parser.parse_args([]) + c.configure(options, Config()) + c.start() + log = logging.getLogger("foobar.something") + log.debug(msg) + log.debug("ordinary string log") + c.end() + + class Dummy: + pass + test = Dummy() + try: + raise Exception(msg) + except: + err = sys.exc_info() + (ec, ev, tb) = c.formatError(test, err) + print ev + assert msg.encode('utf-8') in ev diff --git a/unit_tests/test_logging.py b/unit_tests/test_logging.py new file mode 100644 index 0000000..82386bf --- /dev/null +++ b/unit_tests/test_logging.py @@ -0,0 +1,40 @@ +import logging +import unittest +from nose.config import Config +#from nose.core import configure_logging +from mock import * + + +class TestLoggingConfig(unittest.TestCase): + + def setUp(self): + # install mock root logger so that these tests don't stomp on + # the real logging config of the test runner + class MockLogger(logging.Logger): + root = logging.RootLogger(logging.WARNING) + manager = logging.Manager(root) + + self.real_logger = logging.Logger + self.real_root = logging.root + logging.Logger = MockLogger + logging.root = MockLogger.root + + def tearDown(self): + # reset real root logger + logging.Logger = self.real_logger + logging.root = self.real_root + + def test_isolation(self): + """root logger settings ignored""" + + root = logging.getLogger('') + nose = logging.getLogger('nose') + + config = Config() + config.configureLogging() + + root.setLevel(logging.DEBUG) + self.assertEqual(nose.level, logging.WARN) + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_ls_tree.rst b/unit_tests/test_ls_tree.rst new file mode 100644 index 0000000..260c641 --- /dev/null +++ b/unit_tests/test_ls_tree.rst @@ -0,0 +1,50 @@ + >>> import os + >>> import tempfile + >>> import shutil + + >>> from nose.util import ls_tree + + >>> dir_path = tempfile.mkdtemp() + + >>> def create_file(filename): + ... fd = os.open(filename, os.O_WRONLY|os.O_CREAT, 0666) + ... os.close(fd) + + >>> os.mkdir(os.path.join(dir_path, "top")) + >>> os.mkdir(os.path.join(dir_path, "top/dir")) + >>> os.mkdir(os.path.join(dir_path, "top/dir2")) + >>> os.mkdir(os.path.join(dir_path, "top/dir3")) + >>> os.mkdir(os.path.join(dir_path, "top/dir/dir")) + >>> os.mkdir(os.path.join(dir_path, "top/dir/dir2")) + >>> os.mkdir(os.path.join(dir_path, "top/.svn")) + >>> os.mkdir(os.path.join(dir_path, "top/.notsvn")) + >>> os.mkdir(os.path.join(dir_path, "top/dir/.svn")) + >>> os.mkdir(os.path.join(dir_path, "top/dir/.notsvn")) + >>> create_file(os.path.join(dir_path, "top/file")) + >>> create_file(os.path.join(dir_path, "top/backup_file~")) + >>> create_file(os.path.join(dir_path, "top/file2")) + >>> create_file(os.path.join(dir_path, "top/dir/file")) + >>> create_file(os.path.join(dir_path, "top/dir/dir/file")) + >>> create_file(os.path.join(dir_path, "top/dir/dir/file2")) + >>> create_file(os.path.join(dir_path, "top/dir/backup_file~")) + >>> create_file(os.path.join(dir_path, "top/dir2/file")) + + Note that files matching skip_pattern (by default SVN files, + backup files and compiled Python files) are ignored + + >>> print ls_tree(os.path.join(dir_path, "top")) + |-- file + |-- file2 + |-- .notsvn + |-- dir + | |-- file + | |-- .notsvn + | |-- dir + | | |-- file + | | `-- file2 + | `-- dir2 + |-- dir2 + | `-- file + `-- dir3 + + >>> shutil.rmtree(dir_path) diff --git a/unit_tests/test_multiprocess.py b/unit_tests/test_multiprocess.py new file mode 100644 index 0000000..ec06ec9 --- /dev/null +++ b/unit_tests/test_multiprocess.py @@ -0,0 +1,55 @@ +import pickle +import sys +import unittest + +from nose import case +from nose.plugins import multiprocess +from nose.plugins.skip import SkipTest +from nose.config import Config +from nose.loader import TestLoader +try: + # 2.7+ + from unittest.runner import _WritelnDecorator +except ImportError: + from unittest import _WritelnDecorator + + +class ArgChecker: + def __init__(self, target, args): + self.target = target + self.args = args + # skip the id and queues + pargs = args[4:] + self.pickled = pickle.dumps(pargs) + def start(self): + pass + def is_alive(self): + return False + + +def setup(mod): + multiprocess._import_mp() + if not multiprocess.Process: + raise SkipTest("multiprocessing not available") + mod.Process = multiprocess.Process + multiprocess.Process = ArgChecker + + +class T(unittest.TestCase): + __test__ = False + def runTest(self): + pass + + +def test_mp_process_args_pickleable(): + test = case.Test(T('runTest')) + config = Config() + config.multiprocess_workers = 2 + config.multiprocess_timeout = 0.1 + runner = multiprocess.MultiProcessTestRunner( + stream=_WritelnDecorator(sys.stdout), + verbosity=2, + loaderClass=TestLoader, + config=config) + runner.run(test) + diff --git a/unit_tests/test_multiprocess_runner.py b/unit_tests/test_multiprocess_runner.py new file mode 100644 index 0000000..71ee398 --- /dev/null +++ b/unit_tests/test_multiprocess_runner.py @@ -0,0 +1,120 @@ +import unittest +import imp +import sys +from nose.loader import TestLoader +from nose.plugins import multiprocess +from nose.suite import ContextSuite + +class T_fixt: + def setupClass(cls): + pass + setupClass = classmethod(setupClass) + + def test_a(self): + pass + def test_b(self): + pass + +class T: + def test_a(self): + pass + def test_b(self): + pass + + + +class TestMultiProcessTestRunner(unittest.TestCase): + + def test_next_batch_with_classes(self): + r = multiprocess.MultiProcessTestRunner() + l = TestLoader() + tests = list(r.nextBatch(ContextSuite( + tests=[l.makeTest(T_fixt), l.makeTest(T)]))) + print tests + self.assertEqual(len(tests), 3) + + def test_next_batch_with_module_fixt(self): + mod_with_fixt = imp.new_module('mod_with_fixt') + sys.modules['mod_with_fixt'] = mod_with_fixt + + def teardown(): + pass + + class Test(T): + pass + + mod_with_fixt.Test = Test + mod_with_fixt.teardown = teardown + Test.__module__ = 'mod_with_fixt' + + r = multiprocess.MultiProcessTestRunner() + l = TestLoader() + tests = list(r.nextBatch(l.loadTestsFromModule(mod_with_fixt))) + print tests + self.assertEqual(len(tests), 1) + + def test_next_batch_with_module(self): + mod_no_fixt = imp.new_module('mod_no_fixt') + sys.modules['mod_no_fixt'] = mod_no_fixt + + class Test2(T): + pass + + class Test_fixt(T_fixt): + pass + + mod_no_fixt.Test = Test2 + Test2.__module__ = 'mod_no_fixt' + mod_no_fixt.Test_fixt = Test_fixt + Test_fixt.__module__ = 'mod_no_fixt' + + r = multiprocess.MultiProcessTestRunner() + l = TestLoader() + tests = list(r.nextBatch(l.loadTestsFromModule(mod_no_fixt))) + print tests + self.assertEqual(len(tests), 3) + + def test_next_batch_with_generator_method(self): + class Tg: + def test_gen(self): + for i in range(0, 3): + yield self.check, i + def check(self, val): + pass + r = multiprocess.MultiProcessTestRunner() + l = TestLoader() + tests = list(r.nextBatch(l.makeTest(Tg))) + print tests + print [r.address(t) for t in tests] + self.assertEqual(len(tests), 1) + + def test_next_batch_can_split_set(self): + + mod_with_fixt2 = imp.new_module('mod_with_fixt2') + sys.modules['mod_with_fixt2'] = mod_with_fixt2 + + def setup(): + pass + + class Test(T): + pass + + class Test_fixt(T_fixt): + pass + + mod_with_fixt2.Test = Test + mod_with_fixt2.Test_fixt = Test_fixt + mod_with_fixt2.setup = setup + mod_with_fixt2._multiprocess_can_split_ = True + Test.__module__ = 'mod_with_fixt2' + Test_fixt.__module__ = 'mod_with_fixt2' + + r = multiprocess.MultiProcessTestRunner() + l = TestLoader() + tests = list(r.nextBatch(l.loadTestsFromModule(mod_with_fixt2))) + print tests + self.assertEqual(len(tests), 3) + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_pdb_plugin.py b/unit_tests/test_pdb_plugin.py new file mode 100644 index 0000000..cdd43f2 --- /dev/null +++ b/unit_tests/test_pdb_plugin.py @@ -0,0 +1,117 @@ +import sys +import unittest +from nose.config import Config +from nose.plugins import debug +from optparse import OptionParser +from StringIO import StringIO + +class StubPdb: + called = False + def post_mortem(self, tb): + self.called = True + +class TestPdbPlugin(unittest.TestCase): + + def setUp(self): + self._pdb = debug.pdb + self._so = sys.stdout + debug.pdb = StubPdb() + + def tearDown(self): + debug.pdb = self._pdb + sys.stdout = self._so + + def test_plugin_api(self): + p = debug.Pdb() + p.addOptions + p.configure + p.addError + p.addFailure + + def test_plugin_calls_pdb(self): + p = debug.Pdb() + + try: + raise Exception("oops") + except: + err = sys.exc_info() + + p.enabled = True + p.enabled_for_errors = True + p.enabled_for_failures = True + + p.addError(None, err) + assert debug.pdb.called, "Did not call pdb.post_mortem on error" + + debug.pdb.called = False + p.addFailure(None, err) + assert debug.pdb.called, "Did not call pdb.post_mortem on failure" + + def test_command_line_options_enable(self): + parser = OptionParser() + + p = debug.Pdb() + p.addOptions(parser) + options, args = parser.parse_args(['test_configuration', + '--pdb', + '--pdb-failures']) + p.configure(options, Config()) + assert p.enabled + assert p.enabled_for_errors + assert p.enabled_for_failures + + def test_disabled_by_default(self): + p = debug.Pdb() + assert not p.enabled + assert not p.enabled_for_failures + + parser = OptionParser() + p.addOptions(parser) + options, args = parser.parse_args(['test_configuration']) + p.configure(options, Config()) + assert not p.enabled + assert not p.enabled_for_errors + assert not p.enabled_for_failures + + def test_env_settings_enable(self): + p = debug.Pdb() + assert not p.enabled + assert not p.enabled_for_failures + + env = {'NOSE_PDB': '1', + 'NOSE_PDB_FAILURES': '1'} + + parser = OptionParser() + p.addOptions(parser, env) + options, args = parser.parse_args(['test_configuration']) + p.configure(options, Config()) + assert p.enabled + assert p.enabled_for_errors + assert p.enabled_for_failures + + def test_real_stdout_restored_before_call(self): + + class CheckStdout(StubPdb): + def post_mortem(self, tb): + assert sys.stdout is sys.__stdout__, \ + "sys.stdout was not restored to sys.__stdout__ " \ + "before call" + debug.pdb = CheckStdout() + + patch = StringIO() + sys.stdout = patch + p = debug.Pdb() + p.enabled = True + p.enabled_for_errors = True + + try: + raise Exception("oops") + except: + err = sys.exc_info() + + p.addError(None, err) + assert sys.stdout is patch, "sys.stdout was not reset after call" + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_plugin.py b/unit_tests/test_plugin.py new file mode 100644 index 0000000..aa2d9cc --- /dev/null +++ b/unit_tests/test_plugin.py @@ -0,0 +1,33 @@ +import optparse +import unittest + +import nose.plugins + + +class OptionProcessingTests(unittest.TestCase): + + def test_enable_plugin(self): + class NamedPlugin(nose.plugins.Plugin): + name = "jim-bob" + def parse_options(env, args_in): + plugin = NamedPlugin() + parser = optparse.OptionParser() + plugin.options(parser, env) + options, args = parser.parse_args(args_in) + return options + options = parse_options({}, []) + assert not options.enable_plugin_jim_bob, \ + "Plugin should not be enabled" + options = parse_options({"NOSE_WITH_JIM_BOB": "1"}, []) + assert options.enable_plugin_jim_bob, \ + "Plugin should be enabled" + options = parse_options({}, ["--with-jim-bob"]) + assert options.enable_plugin_jim_bob, \ + "Plugin should be enabled" + options = parse_options({"NOSE_WITH_JIM_BOB": "1"}, ["--with-jim-bob"]) + assert options.enable_plugin_jim_bob, \ + "Plugin should be enabled" + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_plugin_interfaces.py b/unit_tests/test_plugin_interfaces.py new file mode 100644 index 0000000..499af6b --- /dev/null +++ b/unit_tests/test_plugin_interfaces.py @@ -0,0 +1,45 @@ +import unittest +from nose.plugins.base import IPluginInterface + +class TestPluginInterfaces(unittest.TestCase): + + def test_api_methods_present(self): + + from nose.loader import TestLoader + from nose.selector import Selector + + + exclude = [ 'loadTestsFromGenerator', + 'loadTestsFromGeneratorMethod' + ] + + selfuncs = [ f for f in dir(Selector) + if f.startswith('want') ] + loadfuncs = [ f for f in dir(TestLoader) + if f.startswith('load') and not f in exclude ] + + others = ['addDeprecated', 'addError', 'addFailure', + 'addSkip', 'addSuccess', 'startTest', 'stopTest', + 'prepareTest', 'begin', 'report' + ] + + expect = selfuncs + loadfuncs + others + + pd = dir(IPluginInterface) + + for f in expect: + assert f in pd, "No %s in IPluginInterface" % f + assert getattr(IPluginInterface, f).__doc__, \ + "No docs for %f in IPluginInterface" % f + + def test_no_instantiate(self): + try: + p = IPluginInterface() + except TypeError: + pass + else: + assert False, \ + "Should not be able to instantiate IPluginInterface" + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_plugin_manager.py b/unit_tests/test_plugin_manager.py new file mode 100644 index 0000000..578ce03 --- /dev/null +++ b/unit_tests/test_plugin_manager.py @@ -0,0 +1,74 @@ +import unittest +from nose import case +from nose.plugins import Plugin, PluginManager + + +class Plug(Plugin): + def loadTestsFromFile(self, path): + class TC(unittest.TestCase): + def test(self): + pass + return [TC('test')] + def addError(self, test, err): + return True + +class Plug2(Plugin): + def loadTestsFromFile(self, path): + class TCT(unittest.TestCase): + def test_2(self): + pass + return [TCT('test_2')] + def addError(self, test, err): + assert False, "Should not have been called" + +class Plug3(Plugin): + def loadTestsFromModule(self, module): + raise TypeError("I don't like to type") + +class Plug4(Plugin): + def loadTestsFromModule(self, module): + raise AttributeError("I am missing my nose") + +class BetterPlug2(Plugin): + name = 'plug2' + + +class TestPluginManager(unittest.TestCase): + + def test_proxy_to_plugins(self): + man = PluginManager(plugins=[Plug(), Plug2()]) + + # simple proxy: first plugin to return a value wins + self.assertEqual(man.addError(None, None), True) + + # multiple proxy: all plugins that return values get to run + all = [] + for res in man.loadTestsFromFile('foo'): + print res + all.append(res) + self.assertEqual(len(all), 2) + + def test_iter(self): + expect = [Plug(), Plug2()] + man = PluginManager(plugins=expect) + for plug in man: + self.assertEqual(plug, expect.pop(0)) + assert not expect, \ + "Some plugins were not found by iteration: %s" % expect + + def test_plugin_generative_method_errors_not_hidden(self): + import nose.failure + pm = PluginManager(plugins=[Plug3(), Plug4()]) + loaded = list(pm.loadTestsFromModule('whatever')) + self.assertEqual(len(loaded), 2) + for test in loaded: + assert isinstance(test, nose.failure.Failure), \ + "%s is not a failure" % test + + def test_plugin_override(self): + pm = PluginManager(plugins=[Plug2(), BetterPlug2()]) + self.assertEqual(len(pm.plugins), 1) + assert isinstance(pm.plugins[0], BetterPlug2) + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_plugins.py b/unit_tests/test_plugins.py new file mode 100644 index 0000000..05a5838 --- /dev/null +++ b/unit_tests/test_plugins.py @@ -0,0 +1,414 @@ +import logging +import os +import sys +import unittest +import nose.plugins +from optparse import OptionParser +import tempfile +from warnings import warn, filterwarnings, resetwarnings + +from nose import SkipTest +from nose.config import Config +from nose.plugins.attrib import AttributeSelector +from nose.plugins.base import Plugin +from nose.plugins.cover import Coverage +from nose.plugins.doctests import Doctest +from nose.plugins.prof import Profile + +from mock import * + +class P(Plugin): + """Plugin of destiny!""" + pass + +class ErrPlugin(object): + def load(self): + raise Exception("Failed to load the plugin") + +class ErrPkgResources(object): + def iter_entry_points(self, ep): + yield ErrPlugin() + + +# some plugins have 2.4-only features +compat_24 = sys.version_info >= (2, 4) + + +class TestBuiltinPlugins(unittest.TestCase): + + def setUp(self): + self.p = sys.path[:] + + def tearDown(self): + sys.path = self.p[:] + + def test_add_options(self): + conf = Config() + opt = Bucket() + parser = MockOptParser() + plug = P() + + plug.add_options(parser) + o, d = parser.opts[0] + # print d + assert o[0] == '--with-p' + assert d['action'] == 'store_true' + assert not d['default'] + assert d['dest'] == 'enable_plugin_p' + assert d['help'] == 'Enable plugin P: Plugin of destiny! [NOSE_WITH_P]' + + opt.enable_plugin_p = True + plug.configure(opt, conf) + assert plug.enabled + + +class TestDoctestPlugin(unittest.TestCase): + + def setUp(self): + self.p = sys.path[:] + + def tearDown(self): + sys.path = self.p[:] + + def test_add_options(self): + # doctest plugin adds some options... + conf = Config() + opt = Bucket() + parser = MockOptParser() + plug = Doctest() + + plug.add_options(parser, {}) + o, d = parser.opts[0] + assert o[0] == '--with-doctest' + + o2, d2 = parser.opts[1] + assert o2[0] == '--doctest-tests' + + o3, d3 = parser.opts[2] + assert o3[0] == '--doctest-extension' + + def test_config(self): + # test that configuration works properly when both environment + # and command line specify a doctest extension + parser = OptionParser() + env = {'NOSE_DOCTEST_EXTENSION':'ext'} + argv = ['--doctest-extension', 'txt'] + dtp = Doctest() + dtp.add_options(parser, env) + options, args = parser.parse_args(argv) + + print options + print args + self.assertEqual(options.doctestExtension, ['ext', 'txt']) + + env = {} + parser = OptionParser() + dtp.add_options(parser, env) + options, args = parser.parse_args(argv) + print options + print args + self.assertEqual(options.doctestExtension, ['txt']) + + def test_want_file(self): + # doctest plugin can select module and/or non-module files + conf = Config() + opt = Bucket() + plug = Doctest() + plug.can_configure = True + plug.configure(opt, conf) + + assert plug.wantFile('foo.py') + assert not plug.wantFile('bar.txt') + assert not plug.wantFile('buz.rst') + assert not plug.wantFile('bing.mov') + + plug.extension = ['.txt', '.rst'] + assert plug.wantFile('/path/to/foo.py') + assert plug.wantFile('/path/to/bar.txt') + assert plug.wantFile('/path/to/buz.rst') + assert not plug.wantFile('/path/to/bing.mov') + + def test_matches(self): + # doctest plugin wants tests from all NON-test modules + conf = Config() + opt = Bucket() + plug = Doctest() + plug.can_configure = True + plug.configure(opt, conf) + assert not plug.matches('test') + assert plug.matches('foo') + + def test_collect_pymodule(self): + here = os.path.dirname(__file__) + support = os.path.join(here, 'support') + if not support in sys.path: + sys.path.insert(0, support) + import foo.bar.buz + + conf = Config() + opt = Bucket() + plug = Doctest() + plug.can_configure = True + plug.configure(opt, conf) + suite = plug.loadTestsFromModule(foo.bar.buz) + expect = ['[afunc (foo.bar.buz)]'] + for test in suite: + self.assertEqual(str(test), expect.pop(0)) + + def test_addresses(self): + here = os.path.dirname(__file__) + support = os.path.join(here, 'support') + if not support in sys.path: + sys.path.insert(0, support) + import foo.bar.buz + + conf = Config() + opt = Bucket() + plug = Doctest() + plug.can_configure = True + plug.configure(opt, conf) + suite = plug.loadTestsFromModule(foo.bar.buz) + for test in suite: + print test.address() + file, mod, call = test.address() + self.assertEqual(mod, 'foo.bar.buz') + self.assertEqual(call, None) + for case in test: + print case.address() + file, mod, call = case.address() + self.assertEqual(mod, 'foo.bar.buz') + self.assertEqual(call, 'afunc') + + def test_collect_txtfile(self): + here = os.path.abspath(os.path.dirname(__file__)) + support = os.path.join(here, 'support') + fn = os.path.join(support, 'foo', 'doctests.txt') + + conf = Config() + opt = Bucket() + plug = Doctest() + plug.can_configure = True + plug.configure(opt, conf) + plug.extension = ['.txt'] + suite = plug.loadTestsFromFile(fn) + for test in suite: + assert str(test).endswith('doctests.txt') + assert test.address(), "Test %s has no address" + + def test_collect_no_collect(self): + # bug http://nose.python-hosting.com/ticket/55 + # we got "iteration over non-sequence" when no files match + here = os.path.abspath(os.path.dirname(__file__)) + support = os.path.join(here, 'support') + plug = Doctest() + for test in plug.loadTestsFromFile(os.path.join(support, 'foo')): + self.fail("Expected no tests, got %s" % test) + + +class TestAttribPlugin(unittest.TestCase): + + def test_add_options(self): + plug = AttributeSelector() + parser = MockOptParser() + plug.add_options(parser) + + expect = [(('-a', '--attr'), + {'dest': 'attr', 'action': 'append', 'default': None, + 'metavar': 'ATTR', + 'help': 'Run only tests that have attributes ' + 'specified by ATTR [NOSE_ATTR]'})] + + if compat_24: + expect.append( + (('-A', '--eval-attr'), + {'dest': 'eval_attr', 'action': 'append', + 'default': None, 'metavar': 'EXPR', + 'help': 'Run only tests for whose attributes the ' + 'Python expression EXPR evaluates to True ' + '[NOSE_EVAL_ATTR]'})) + self.assertEqual(parser.opts, expect) + + opt = Bucket() + opt.attr = ['!slow'] + plug.configure(opt, Config()) + assert plug.enabled + self.assertEqual(plug.attribs, [[('slow', False)]]) + + opt.attr = ['fast,quick', 'weird=66'] + plug.configure(opt, Config()) + self.assertEqual(plug.attribs, [[('fast', True), + ('quick', True)], + [('weird', '66')]]) + + # don't die on trailing , + opt.attr = [ 'something,' ] + plug.configure(opt, Config()) + self.assertEqual(plug.attribs, [[('something', True)]] ) + + if compat_24: + opt.attr = None + opt.eval_attr = [ 'weird >= 66' ] + plug.configure(opt, Config()) + self.assertEqual(plug.attribs[0][0][0], 'weird >= 66') + assert callable(plug.attribs[0][0][1]) + + def test_basic_attr(self): + def f(): + pass + f.a = 1 + + def g(): + pass + + plug = AttributeSelector() + plug.attribs = [[('a', True)]] + assert plug.wantFunction(f) is not False + assert not plug.wantFunction(g) + + def test_class_attr(self): + class TestP: + foo = True + def h(): + pass + + def i(): + pass + + plug = AttributeSelector() + plug.attribs = [[('foo', True)]] + assert plug.wantMethod(TestP.h) is not False + assert plug.wantFunction(i) is False + + def test_eval_attr(self): + if not compat_24: + warn("No support for eval attributes in python versions older" + " than 2.4") + return + def f(): + pass + f.monkey = 2 + + def g(): + pass + g.monkey = 6 + + def h(): + pass + h.monkey = 5 + + cnf = Config() + opt = Bucket() + opt.eval_attr = "monkey > 5" + plug = AttributeSelector() + plug.configure(opt, cnf) + + assert not plug.wantFunction(f) + assert plug.wantFunction(g) is not False + assert not plug.wantFunction(h) + + def test_attr_a_b(self): + def f1(): + pass + f1.tags = ['a', 'b'] + + def f2(): + pass + f2.tags = ['a', 'c'] + + def f3(): + pass + f3.tags = ['b', 'c'] + + def f4(): + pass + f4.tags = ['c', 'd'] + + cnf = Config() + parser = OptionParser() + plug = AttributeSelector() + + plug.add_options(parser) + + # OR + opt, args = parser.parse_args(['test', '-a', 'tags=a', + '-a', 'tags=b']) + print opt + plug.configure(opt, cnf) + + assert plug.wantFunction(f1) is None + assert plug.wantFunction(f2) is None + assert plug.wantFunction(f3) is None + assert not plug.wantFunction(f4) + + # AND + opt, args = parser.parse_args(['test', '-a', 'tags=a,tags=b']) + print opt + plug.configure(opt, cnf) + + assert plug.wantFunction(f1) is None + assert not plug.wantFunction(f2) + assert not plug.wantFunction(f3) + assert not plug.wantFunction(f4) + + +class TestProfPlugin(unittest.TestCase): + + def setUp(self): + if not Profile.available(): + raise SkipTest('profile plugin not available; skipping') + + def test_options(self): + parser = OptionParser() + conf = Config() + plug = Profile() + + plug.add_options(parser, {}) + opts = [ o._long_opts[0] for o in parser.option_list ] + assert '--profile-sort' in opts + assert '--profile-stats-file' in opts + assert '--with-profile' in opts + assert '--profile-restrict' in opts + + def test_begin(self): + plug = Profile() + plug.pfile = tempfile.mkstemp()[1] + try: + plug.begin() + assert plug.prof + finally: + plug.finalize(None) + + def test_prepare_test(self): + r = {} + class dummy: + def runcall(self, f, r): + r[1] = f(), "wrapped" + def func(): + return "func" + + plug = Profile() + plug.prof = dummy() + result = plug.prepareTest(func) + try: + result(r) + assert r[1] == ("func", "wrapped") + finally: + plug.finalize(None) + + def test_finalize(self): + def func(): + pass + + plug = Profile() + plug.begin() + plug.prepareTest(func) + pfile = plug.pfile + try: + assert os.path.exists(pfile) + finally: + plug.finalize(None) + assert not os.path.exists(pfile), \ + "finalize did not remove temp file %s" % pfile + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_result_proxy.py b/unit_tests/test_result_proxy.py new file mode 100644 index 0000000..9ed1e11 --- /dev/null +++ b/unit_tests/test_result_proxy.py @@ -0,0 +1,164 @@ +import sys +import unittest +from inspect import ismethod +from nose.config import Config +from nose.proxy import ResultProxyFactory, ResultProxy +from mock import RecordingPluginManager + +class TestResultProxy(unittest.TestCase): + + def test_proxy_has_basic_methods(self): + res = unittest.TestResult() + proxy = ResultProxy(res, test=None) + + methods = [ 'addError', 'addFailure', 'addSuccess', + 'startTest', 'stopTest', 'stop' ] + for method in methods: + m = getattr(proxy, method) + assert ismethod(m), "%s is not a method" % method + + def test_proxy_has_nose_methods(self): + res = unittest.TestResult() + proxy = ResultProxy(res, test=None) + + methods = [ 'beforeTest', 'afterTest' ] + for method in methods: + m = getattr(proxy, method) + assert ismethod(m), "%s is not a method" % method + + def test_proxy_proxies(self): + from nose.case import Test + class Dummy: + def __init__(self): + self.__dict__['called'] = [] + def __getattr__(self, attr): + c = self.__dict__['called'] + c.append(attr) + def dummy(*arg, **kw): + pass + return dummy + class TC(unittest.TestCase): + def runTest(self): + pass + try: + raise Exception("exception") + except: + err = sys.exc_info() + test = TC() + case = Test(test) + res = Dummy() + proxy = ResultProxy(res, test=case) + proxy.addError(test, err) + proxy.addFailure(test, err) + proxy.addSuccess(test) + proxy.startTest(test) + proxy.stopTest(test) + proxy.beforeTest(test) + proxy.afterTest(test) + proxy.stop() + proxy.shouldStop = 'yes please' + for method in ['addError', 'addFailure', 'addSuccess', + 'startTest', 'stopTest', 'beforeTest', 'afterTest', + 'stop']: + assert method in res.called, "%s was not proxied" + self.assertEqual(res.shouldStop, 'yes please') + + def test_attributes_are_proxied(self): + res = unittest.TestResult() + proxy = ResultProxy(res, test=None) + proxy.errors + proxy.failures + proxy.shouldStop + proxy.testsRun + + def test_test_cases_can_access_result_attributes(self): + from nose.case import Test + class TC(unittest.TestCase): + def run(self, result): + unittest.TestCase.run(self, result) + print "errors", result.errors + print "failures", result.failures + def runTest(self): + pass + test = TC() + case = Test(test) + res = unittest.TestResult() + proxy = ResultProxy(res, test=case) + case(proxy) + + def test_proxy_handles_missing_methods(self): + from nose.case import Test + class TC(unittest.TestCase): + def runTest(self): + pass + test = TC() + case = Test(test) + res = unittest.TestResult() + proxy = ResultProxy(res, case) + proxy.beforeTest(test) + proxy.afterTest(test) + + def test_proxy_calls_plugins(self): + from nose.case import Test + res = unittest.TestResult() + class TC(unittest.TestCase): + def test_error(self): + print "So long" + raise TypeError("oops") + def test_fail(self): + print "Hello" + self.fail() + def test(self): + pass + plugs = RecordingPluginManager() + config = Config(plugins=plugs) + + factory = ResultProxyFactory(config=config) + + case_e = Test(TC('test_error')) + case_f = Test(TC('test_fail')) + case_t = Test(TC('test')) + + pres_e = factory(res, case_e) + case_e(pres_e) + assert 'beforeTest' in plugs.called + assert 'startTest' in plugs.called + assert 'addError' in plugs.called + assert 'stopTest' in plugs.called + assert 'afterTest' in plugs.called + plugs.reset() + + pres_f = factory(res, case_f) + case_f(pres_f) + assert 'beforeTest' in plugs.called + assert 'startTest' in plugs.called + assert 'addFailure' in plugs.called + assert 'stopTest' in plugs.called + assert 'afterTest' in plugs.called + plugs.reset() + + pres_t = factory(res, case_t) + case_t(pres_t) + assert 'beforeTest' in plugs.called + assert 'startTest' in plugs.called + assert 'addSuccess' in plugs.called + assert 'stopTest' in plugs.called + assert 'afterTest' in plugs.called + plugs.reset() + + def test_stop_on_error(self): + from nose.case import Test + class TC(unittest.TestCase): + def runTest(self): + raise Exception("Enough!") + conf = Config(stopOnError=True) + test = TC() + case = Test(test) + res = unittest.TestResult() + proxy = ResultProxy(res, case, config=conf) + case(proxy) + assert proxy.shouldStop + assert res.shouldStop + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_selector.py b/unit_tests/test_selector.py new file mode 100644 index 0000000..f09f729 --- /dev/null +++ b/unit_tests/test_selector.py @@ -0,0 +1,181 @@ +import logging +import os +import re +import unittest +import nose.selector +from nose.config import Config +from nose.selector import log, Selector +from nose.util import absdir +from mock import mod + +class TestSelector(unittest.TestCase): + + def tearDown(self): + logging.getLogger('nose.selector').setLevel(logging.WARN) + + def test_exclude(self): + s = Selector(Config()) + c = Config() + c.exclude = [re.compile(r'me')] + s2 = Selector(c) + + assert s.matches('test_foo') + assert s2.matches('test_foo') + assert s.matches('test_me') + assert not s2.matches('test_me') + + def test_include(self): + s = Selector(Config()) + c = Config() + c.include = [re.compile(r'me')] + s2 = Selector(c) + + assert s.matches('test') + assert s2.matches('test') + assert not s.matches('meatball') + assert s2.matches('meatball') + assert not s.matches('toyota') + assert not s2.matches('toyota') + + c.include.append(re.compile('toy')) + assert s.matches('test') + assert s2.matches('test') + assert not s.matches('meatball') + assert s2.matches('meatball') + assert not s.matches('toyota') + assert s2.matches('toyota') + + def test_want_class(self): + class Foo: + pass + class Bar(unittest.TestCase): + pass + class TestMe: + pass + class TestType(type): + def __new__(cls, name, bases, dct): + return type.__new__(cls, name, bases, dct) + class TestClass(object): + __metaclass__ = TestType + + s = Selector(Config()) + assert not s.wantClass(Foo) + assert s.wantClass(Bar) + assert s.wantClass(TestMe) + assert s.wantClass(TestClass) + + TestMe.__test__ = False + assert not s.wantClass(TestMe), "Failed to respect __test__ = False" + Bar.__test__ = False + assert not s.wantClass(Bar), "Failed to respect __test__ = False" + + def test_want_directory(self): + s = Selector(Config()) + assert s.wantDirectory('test') + assert not s.wantDirectory('test/whatever') + assert s.wantDirectory('whatever/test') + assert not s.wantDirectory('/some/path/to/unit_tests/support') + + # default src directory + assert s.wantDirectory('lib') + assert s.wantDirectory('src') + + # FIXME move to functional tests + + # this looks on disk for support/foo, which is a package + here = os.path.abspath(os.path.dirname(__file__)) + support = os.path.join(here, 'support') + tp = os.path.normpath(os.path.join(support, 'foo')) + assert s.wantDirectory(tp) + # this looks for support, which is not a package + assert not s.wantDirectory(support) + + def test_want_file(self): + + #logging.getLogger('nose.selector').setLevel(logging.DEBUG) + #logging.basicConfig() + + c = Config() + c.where = [absdir(os.path.join(os.path.dirname(__file__), 'support'))] + base = c.where[0] + s = Selector(c) + + assert not s.wantFile('setup.py') + assert not s.wantFile('/some/path/to/setup.py') + assert not s.wantFile('ez_setup.py') + assert not s.wantFile('.test.py') + assert not s.wantFile('_test.py') + assert not s.wantFile('setup_something.py') + + assert s.wantFile('test.py') + assert s.wantFile('foo/test_foo.py') + assert s.wantFile('bar/baz/test.py') + assert not s.wantFile('foo.py') + assert not s.wantFile('test_data.txt') + assert not s.wantFile('data.text') + assert not s.wantFile('bar/baz/__init__.py') + + def test_want_function(self): + def foo(): + pass + def test_foo(): + pass + def test_bar(): + pass + + s = Selector(Config()) + assert s.wantFunction(test_bar) + assert s.wantFunction(test_foo) + assert not s.wantFunction(foo) + + test_foo.__test__ = False + assert not s.wantFunction(test_foo), \ + "Failed to respect __test__ = False" + + def test_want_method(self): + class Baz: + def test_me(self): + pass + def test_too(self): + pass + def other(self): + pass + def test_not_test(self): + pass + test_not_test.__test__ = False + + s = Selector(Config()) + + assert s.wantMethod(Baz.test_me) + assert s.wantMethod(Baz.test_too) + assert not s.wantMethod(Baz.other) + assert not s.wantMethod(Baz.test_not_test), \ + "Failed to respect __test__ = False" + + def test_want_module(self): + m = mod('whatever') + m2 = mod('this.that') + m3 = mod('this.that.another') + m4 = mod('this.that.another.one') + m5 = mod('test.something') + m6 = mod('a.test') + m7 = mod('my_tests') + m8 = mod('__main__') + + s = Selector(Config()) + assert not s.wantModule(m) + assert not s.wantModule(m2) + assert not s.wantModule(m3) + assert not s.wantModule(m4) + assert not s.wantModule(m5) + assert s.wantModule(m6) + assert s.wantModule(m7) + assert s.wantModule(m8) + + m6.__test__ = False + assert not s.wantModule(m6), "Failed to respect __test__ = False" + + +if __name__ == '__main__': + # log.setLevel(logging.DEBUG) + unittest.main() diff --git a/unit_tests/test_selector_plugins.py b/unit_tests/test_selector_plugins.py new file mode 100644 index 0000000..7682d79 --- /dev/null +++ b/unit_tests/test_selector_plugins.py @@ -0,0 +1,30 @@ +import unittest +import nose.selector +from nose.config import Config +from nose.plugins.base import Plugin +from nose.plugins.manager import PluginManager + +class TestSelectorPlugins(unittest.TestCase): + + def test_rejection(self): + class EvilSelector(Plugin): + def wantFile(self, filename, package=None): + if 'good' in filename: + return False + return None + + c = Config(plugins=PluginManager(plugins=[EvilSelector()])) + s = nose.selector.Selector(c) + s2 = nose.selector.Selector(Config()) + + assert s.wantFile('test_neutral.py') + assert s2.wantFile('test_neutral.py') + + assert s.wantFile('test_evil.py') + assert s2.wantFile('test_evil.py') + + assert not s.wantFile('test_good.py') + assert s2.wantFile('test_good.py') + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_skip_plugin.py b/unit_tests/test_skip_plugin.py new file mode 100644 index 0000000..c1dccee --- /dev/null +++ b/unit_tests/test_skip_plugin.py @@ -0,0 +1,130 @@ +import unittest +from nose.config import Config +from nose.plugins.skip import Skip, SkipTest +from nose.result import TextTestResult +from StringIO import StringIO +from nose.result import _TextTestResult +from optparse import OptionParser +try: + # 2.7+ + from unittest.runner import _WritelnDecorator +except ImportError: + from unittest import _WritelnDecorator + + +class TestSkipPlugin(unittest.TestCase): + + def test_api_present(self): + sk = Skip() + sk.addOptions + sk.configure + sk.prepareTestResult + + def test_prepare_patches_result(self): + stream = _WritelnDecorator(StringIO()) + res = _TextTestResult(stream, 0, 1) + sk = Skip() + sk.prepareTestResult(res) + res._orig_addError + res._orig_printErrors + res._orig_wasSuccessful + res.skipped + self.assertEqual(res.errorClasses, + {SkipTest: (res.skipped, 'SKIP', False)}) + + # result w/out print works too + res = unittest.TestResult() + sk = Skip() + sk.prepareTestResult(res) + res._orig_addError + res.skipped + self.assertEqual(res.errorClasses, + {SkipTest: (res.skipped, 'SKIP', False)}) + + def test_patched_result_handles_skip(self): + res = unittest.TestResult() + sk = Skip() + sk.prepareTestResult(res) + + class TC(unittest.TestCase): + def test(self): + raise SkipTest('skip me') + + test = TC('test') + test(res) + assert not res.errors, "Skip was not caught: %s" % res.errors + assert res.skipped + assert res.skipped[0][0] is test + + def test_patches_only_when_needed(self): + class NoPatch(unittest.TestResult): + def __init__(self): + self.errorClasses = {} + + res = NoPatch() + sk = Skip() + sk.prepareTestResult(res) + assert not hasattr(res, '_orig_addError'), \ + "Skip patched a result class it didn't need to patch" + + + def test_skip_output(self): + class TC(unittest.TestCase): + def test(self): + raise SkipTest('skip me') + + stream = _WritelnDecorator(StringIO()) + res = _TextTestResult(stream, 0, 1) + sk = Skip() + sk.prepareTestResult(res) + + test = TC('test') + test(res) + assert not res.errors, "Skip was not caught: %s" % res.errors + assert res.skipped + + res.printErrors() + out = stream.getvalue() + print out + assert out + assert out.strip() == "S" + assert res.wasSuccessful() + + def test_skip_output_verbose(self): + + class TC(unittest.TestCase): + def test(self): + raise SkipTest('skip me too') + + stream = _WritelnDecorator(StringIO()) + res = _TextTestResult(stream, 0, verbosity=2) + sk = Skip() + sk.prepareTestResult(res) + test = TC('test') + test(res) + assert not res.errors, "Skip was not caught: %s" % res.errors + assert res.skipped + + res.printErrors() + out = stream.getvalue() + print out + assert out + + assert ' ... SKIP' in out + assert 'skip me too' in out + + def test_enabled_by_default(self): + sk = Skip() + assert sk.enabled, "Skip was not enabled by default" + + def test_can_be_disabled(self): + parser = OptionParser() + sk = Skip() + sk.addOptions(parser) + options, args = parser.parse_args(['--no-skip']) + sk.configure(options, Config()) + assert not sk.enabled, "Skip was not disabled by noSkip option" + + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_suite.py b/unit_tests/test_suite.py new file mode 100644 index 0000000..b6eae20 --- /dev/null +++ b/unit_tests/test_suite.py @@ -0,0 +1,301 @@ +from nose.config import Config +from nose import case +from nose.suite import LazySuite, ContextSuite, ContextSuiteFactory, \ + ContextList +import imp +import sys +import unittest +from mock import ResultProxyFactory, ResultProxy + + +class TestLazySuite(unittest.TestCase): + + def setUp(self): + class TC(unittest.TestCase): + def test_one(self): + pass + def test_two(self): + pass + self.TC = TC + + def test_test_generator(self): + TC = self.TC + tests = [TC('test_one'), TC('test_two')] + def gen_tests(): + for test in tests: + yield test + suite = LazySuite(gen_tests) + self.assertEqual(list([test for test in suite]), tests) + + def test_lazy_and_nonlazy(self): + TC = self.TC + tests = [TC('test_one'), TC('test_two')] + def gen_tests(): + for test in tests: + yield test + + nonlazy = LazySuite(tests) + lazy = LazySuite(gen_tests) + + assert lazy + assert nonlazy + + lazytests = [] + nonlazytests = [] + for t in lazy: + print "lazy %s" % t + lazytests.append(t) + for t in nonlazy: + print "nonlazy %s" % t + nonlazytests.append(t) + slazy = map(str, lazytests) + snonlazy = map(str, nonlazytests) + assert slazy == snonlazy, \ + "Lazy and Nonlazy produced different test lists (%s vs %s)" \ + % (slazy, snonlazy) + + def test_lazy_nonzero(self): + """__nonzero__ works correctly for lazy suites""" + + TC = self.TC + tests = [TC('test_one'), TC('test_two')] + def gen_tests(): + for test in tests: + yield test + + lazy = LazySuite(gen_tests) + assert lazy + assert lazy + assert lazy + + count = 0 + for test in lazy: + print test + assert test + count += 1 + self.assertEqual(count, 2, "Expected 2 tests, got %s" % count) + assert lazy + + def gen_tests_empty(): + for test in []: + yield test + return + empty = LazySuite(gen_tests_empty) + assert not empty + for test in empty: + assert False, "Loaded a test from empty suite: %s" % test + +class TestContextSuite(unittest.TestCase): + + def setUp(self): + class TC(unittest.TestCase): + def test_one(self): + pass + def test_two(self): + pass + self.TC = TC + + def test_tests_are_wrapped(self): + """Tests in a context suite are wrapped""" + suite = ContextSuite( + [self.TC('test_one'), self.TC('test_two')]) + for test in suite: + assert isinstance(test.test, self.TC) + + def test_nested_context_suites(self): + """Nested suites don't re-wrap""" + suite = ContextSuite( + [self.TC('test_one'), self.TC('test_two')]) + suite2 = ContextSuite(suite) + suite3 = ContextSuite([suite2]) + + # suite3 is [suite2] + tests = [t for t in suite3] + assert isinstance(tests[0], ContextSuite) + # suite2 is [suite] + tests = [t for t in tests[0]] + assert isinstance(tests[0], ContextSuite) + # suite is full of wrapped tests + tests = [t for t in tests[0]] + cases = filter(lambda t: isinstance(t, case.Test), tests) + assert cases + assert len(cases) == len(tests) + + # sub-suites knows they have a context + #assert suite.context is None + #assert suite2.context is suite + #assert suite3.context is suite2 + + def test_context_fixtures_called(self): + class P: + was_setup = False + was_torndown = False + def setup(self): + self.was_setup = True + + def teardown(self): + self.was_torndown = True + + context = P() + suite = ContextSuite( + [self.TC('test_one'), self.TC('test_two')], + context=context) + res = unittest.TestResult() + suite(res) + + assert not res.errors, res.errors + assert not res.failures, res.failures + assert context.was_setup + assert context.was_torndown + + def test_context_fixtures_for_ancestors(self): + top = imp.new_module('top') + top.bot = imp.new_module('top.bot') + top.bot.end = imp.new_module('top.bot.end') + + sys.modules['top'] = top + sys.modules['top.bot'] = top.bot + sys.modules['top.bot.end'] = top.bot.end + + class TC(unittest.TestCase): + def runTest(self): + pass + top.bot.TC = TC + TC.__module__ = 'top.bot' + + # suite with just TC test + # this suite should call top and top.bot setup + csf = ContextSuiteFactory() + suite = csf(ContextList([TC()], context=top.bot)) + + suite.setUp() + assert top in csf.was_setup, "Ancestor not set up" + assert top.bot in csf.was_setup, "Context not set up" + suite.has_run = True + suite.tearDown() + assert top in csf.was_torndown, "Ancestor not torn down" + assert top.bot in csf.was_torndown, "Context not torn down" + + # wrapped suites + # the outer suite sets up its context, the inner + # its context only, without re-setting up the outer context + csf = ContextSuiteFactory() + inner_suite = csf(ContextList([TC()], context=top.bot)) + suite = csf(ContextList(inner_suite, context=top)) + + suite.setUp() + assert top in csf.was_setup + assert not top.bot in csf.was_setup + inner_suite.setUp() + assert top in csf.was_setup + assert top.bot in csf.was_setup + assert csf.was_setup[top] is suite + assert csf.was_setup[top.bot] is inner_suite + + def test_context_fixtures_setup_fails(self): + class P: + was_setup = False + was_torndown = False + def setup(self): + self.was_setup = True + assert False, "Setup failed" + + def teardown(self): + self.was_torndown = True + + context = P() + suite = ContextSuite( + [self.TC('test_one'), self.TC('test_two')], + context=context) + res = unittest.TestResult() + suite(res) + + assert not res.failures, res.failures + assert res.errors, res.errors + assert context.was_setup + assert not context.was_torndown + assert res.testsRun == 0, \ + "Expected to run no tests but ran %s" % res.testsRun + + def test_context_fixtures_no_tests_no_setup(self): + class P: + was_setup = False + was_torndown = False + def setup(self): + self.was_setup = True + + def teardown(self): + self.was_torndown = True + + context = P() + suite = ContextSuite([], context=context) + res = unittest.TestResult() + suite(res) + + assert not res.failures, res.failures + assert not res.errors, res.errors + assert not context.was_setup + assert not context.was_torndown + assert res.testsRun == 0, \ + "Expected to run no tests but ran %s" % res.testsRun + + def test_result_proxy_used(self): + class TC(unittest.TestCase): + def runTest(self): + raise Exception("error") + + ResultProxy.called[:] = [] + res = unittest.TestResult() + config = Config() + + suite = ContextSuite([TC()], resultProxy=ResultProxyFactory()) + suite(res) + calls = [ c[0] for c in ResultProxy.called ] + self.assertEqual(calls, ['beforeTest', 'startTest', + 'addError', 'stopTest', 'afterTest']) + + +class TestContextSuiteFactory(unittest.TestCase): + + def test_ancestry(self): + top = imp.new_module('top') + top.bot = imp.new_module('top.bot') + top.bot.end = imp.new_module('top.bot.end') + + sys.modules['top'] = top + sys.modules['top.bot'] = top.bot + sys.modules['top.bot.end'] = top.bot.end + + class P: + pass + top.bot.P = P + P.__module__ = 'top.bot' + + csf = ContextSuiteFactory() + P_ancestors = list([a for a in csf.ancestry(P)]) + self.assertEqual(P_ancestors, [top.bot, top]) + + end_ancestors = list([a for a in csf.ancestry(top.bot.end)]) + self.assertEqual(end_ancestors, [top.bot, top]) + + bot_ancestors = list([a for a in csf.ancestry(top.bot)]) + self.assertEqual(bot_ancestors, [top]) + + top_ancestors = list([a for a in csf.ancestry(top)]) + self.assertEqual(top_ancestors, []) + + +if __name__ == '__main__': + import logging + logging.basicConfig(level=logging.DEBUG) + unittest.main() + +# class TC(unittest.TestCase): +# def runTest(self): +# raise Exception("error") + +# ResultProxy.called[:] = [] +# res = unittest.TestResult() +# config = Config() + + diff --git a/unit_tests/test_tools.py b/unit_tests/test_tools.py new file mode 100644 index 0000000..839d12b --- /dev/null +++ b/unit_tests/test_tools.py @@ -0,0 +1,207 @@ +import sys +import time +import unittest +from nose.tools import * + +compat_24 = sys.version_info >= (2, 4) + +class TestTools(unittest.TestCase): + + def test_ok(self): + ok_(True) + try: + ok_(False, "message") + except AssertionError, e: + assert str(e) == "message" + else: + self.fail("ok_(False) did not raise assertion error") + + def test_eq(self): + eq_(1, 1) + try: + eq_(1, 0, "message") + except AssertionError, e: + assert str(e) == "message" + else: + self.fail("eq_(1, 0) did not raise assertion error") + try: + eq_(1, 0) + except AssertionError, e: + assert str(e) == "1 != 0" + else: + self.fail("eq_(1, 0) did not raise assertion error") + + def test_raises(self): + from nose.case import FunctionTestCase + + def raise_typeerror(): + raise TypeError("foo") + + def noraise(): + pass + + raise_good = raises(TypeError)(raise_typeerror) + raise_other = raises(ValueError)(raise_typeerror) + no_raise = raises(TypeError)(noraise) + + tc = FunctionTestCase(raise_good) + self.assertEqual(str(tc), "%s.%s" % (__name__, 'raise_typeerror')) + + raise_good() + try: + raise_other() + except TypeError, e: + pass + else: + self.fail("raises did pass through unwanted exception") + + try: + no_raise() + except AssertionError, e: + pass + else: + self.fail("raises did not raise assertion error on no exception") + + def test_timed(self): + + def too_slow(): + time.sleep(.3) + too_slow = timed(.2)(too_slow) + + def quick(): + time.sleep(.1) + quick = timed(.2)(quick) + + quick() + try: + too_slow() + except TimeExpired: + pass + else: + self.fail("Slow test did not throw TimeExpired") + + def test_make_decorator(self): + def func(): + pass + func.setup = 'setup' + func.teardown = 'teardown' + + def f1(): + pass + + f2 = make_decorator(func)(f1) + + assert f2.setup == 'setup' + assert f2.teardown == 'teardown' + + def test_nested_decorators(self): + from nose.tools import raises, timed, with_setup + + def test(): + pass + + def foo(): + pass + + test = with_setup(foo, foo)(test) + test = timed(1.0)(test) + test = raises(TypeError)(test) + assert test.setup == foo + assert test.teardown == foo + + def test_decorator_func_sorting(self): + from nose.tools import raises, timed, with_setup + from nose.util import func_lineno + + def test1(): + pass + + def test2(): + pass + + def test3(): + pass + + def foo(): + pass + + test1_pos = func_lineno(test1) + test2_pos = func_lineno(test2) + test3_pos = func_lineno(test3) + + test1 = raises(TypeError)(test1) + test2 = timed(1.0)(test2) + test3 = with_setup(foo)(test3) + + self.assertEqual(func_lineno(test1), test1_pos) + self.assertEqual(func_lineno(test2), test2_pos) + self.assertEqual(func_lineno(test3), test3_pos) + + def test_testcase_funcs(self): + import nose.tools + tc_asserts = [ at for at in dir(nose.tools) + if at.startswith('assert_') ] + print tc_asserts + + # FIXME: not sure which of these are in all supported + # versions of python + assert 'assert_raises' in tc_asserts + if compat_24: + assert 'assert_true' in tc_asserts + + def test_multiple_with_setup(self): + from nose.tools import with_setup + from nose.case import FunctionTestCase + from unittest import TestResult + + called = [] + def test(): + called.append('test') + + def test2(): + called.append('test2') + + def test3(): + called.append('test3') + + def s1(): + called.append('s1') + + def s2(): + called.append('s2') + + def s3(): + called.append('s3') + + def t1(): + called.append('t1') + + def t2(): + called.append('t2') + + def t3(): + called.append('t3') + + ws1 = with_setup(s1, t1)(test) + case1 = FunctionTestCase(ws1) + case1(TestResult()) + self.assertEqual(called, ['s1', 'test', 't1']) + + called[:] = [] + ws2 = with_setup(s2, t2)(test2) + ws2 = with_setup(s1, t1)(ws2) + case2 = FunctionTestCase(ws2) + case2(TestResult()) + self.assertEqual(called, ['s1', 's2', 'test2', 't2', 't1']) + + called[:] = [] + ws3 = with_setup(s3, t3)(test3) + ws3 = with_setup(s2, t2)(ws3) + ws3 = with_setup(s1, t1)(ws3) + case3 = FunctionTestCase(ws3) + case3(TestResult()) + self.assertEqual(called, ['s1', 's2', 's3', + 'test3', 't3', 't2', 't1']) + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_twisted.py b/unit_tests/test_twisted.py new file mode 100644 index 0000000..460dacd --- /dev/null +++ b/unit_tests/test_twisted.py @@ -0,0 +1,93 @@ +from nose.exc import SkipTest +from nose.tools import * +from nose.twistedtools import * +try: + from twisted.internet.defer import Deferred + from twisted.internet.error import DNSLookupError +except ImportError: + raise SkipTest('twisted not available; skipping') + +_multiprocess_ = False + + +def teardown(): + # print "stopping reactor" + stop_reactor() + +class CustomError(Exception): + pass + +# FIXME move all dns-using tests to functional + +# Should succeed unless google is down +#@deferred +def test_resolve(): + return reactor.resolve("www.google.com") +test_resolve = deferred()(test_resolve) + +# Raises TypeError because the function does not return a Deferred +#@raises(TypeError) +#@deferred() +def test_raises_bad_return(): + print reactor + reactor.resolve("nose.python-hosting.com") +test_raises_bad_return = raises(TypeError)(deferred()(test_raises_bad_return)) + +# Check we propagate twisted Failures as Exceptions +# (XXX this test might take some time: find something better?) +#@raises(DNSLookupError) +#@deferred() +def test_raises_twisted_error(): + return reactor.resolve("x.y.z") +test_raises_twisted_error = raises(DNSLookupError)( + deferred()(test_raises_twisted_error)) + +# Check we detect Exceptions inside the callback chain +#@raises(CustomError) +#@deferred(timeout=1.0) +def test_raises_callback_error(): + d = Deferred() + def raise_error(_): + raise CustomError() + def finish(): + d.callback(None) + d.addCallback(raise_error) + reactor.callLater(0.01, finish) + return d +test_raises_callback_error = raises(CustomError)( + deferred(timeout=1.0)(test_raises_callback_error)) + +# Check we detect Exceptions inside the test body +#@raises(CustomError) +#@deferred(timeout=1.0) +def test_raises_plain_error(): + raise CustomError +test_raises_plain_error = raises(CustomError)( + deferred(timeout=1.0)(test_raises_plain_error)) + +# The deferred is triggered before the timeout: ok +#@deferred(timeout=1.0) +def test_timeout_ok(): + d = Deferred() + def finish(): + d.callback(None) + reactor.callLater(0.01, finish) + return d +test_timeout_ok = deferred(timeout=1.0)(test_timeout_ok) + +# The deferred is triggered after the timeout: failure +#@raises(TimeExpired) +#@deferred(timeout=0.1) +def test_timeout_expired(): + d = Deferred() + def finish(): + d.callback(None) + reactor.callLater(1.0, finish) + return d +test_timeout_expired = raises(TimeExpired)( + deferred(timeout=0.1)(test_timeout_expired)) + + +if __name__ == '__main__': + from nose import runmodule + runmodule() diff --git a/unit_tests/test_twisted_testcase.py b/unit_tests/test_twisted_testcase.py new file mode 100644 index 0000000..850d6e7 --- /dev/null +++ b/unit_tests/test_twisted_testcase.py @@ -0,0 +1,11 @@ +try: + from twisted.trial import unittest +except ImportError: + from nose import SkipTest + raise SkipTest('twisted not available; skipping') + +class TestTwisted(unittest.TestCase): + + def test(self): + pass + diff --git a/unit_tests/test_utils.py b/unit_tests/test_utils.py new file mode 100644 index 0000000..4a26ec7 --- /dev/null +++ b/unit_tests/test_utils.py @@ -0,0 +1,179 @@ +import os +import unittest +import nose +from nose import case +# don't import * -- some util functions look testlike +from nose import util + +np = os.path.normpath + +class TestUtils(unittest.TestCase): + + def test_file_like(self): + file_like = util.file_like + assert file_like('a/file') + assert file_like('file.py') + assert file_like('/some/file.py') + assert not file_like('a.file') + assert not file_like('some.package') + assert file_like('a-file') + assert not file_like('test') + + def test_split_test_name(self): + split_test_name = util.split_test_name + assert split_test_name('a.package:Some.method') == \ + (None, 'a.package', 'Some.method') + assert split_test_name('some.module') == \ + (None, 'some.module', None) + assert split_test_name('this/file.py:func') == \ + (np('this/file.py'), None, 'func') + assert split_test_name('some/file.py') == \ + (np('some/file.py'), None, None) + assert split_test_name(':Baz') == \ + (None, None, 'Baz') + assert split_test_name('foo:bar/baz.py') == \ + (np('foo:bar/baz.py'), None, None) + + def test_split_test_name_windows(self): + # convenience + stn = util.split_test_name + self.assertEqual(stn(r'c:\some\path.py:a_test'), + (np(r'c:\some\path.py'), None, 'a_test')) + self.assertEqual(stn(r'c:\some\path.py'), + (np(r'c:\some\path.py'), None, None)) + self.assertEqual(stn(r'c:/some/other/path.py'), + (np(r'c:/some/other/path.py'), None, None)) + self.assertEqual(stn(r'c:/some/other/path.py:Class.test'), + (np(r'c:/some/other/path.py'), None, 'Class.test')) + try: + stn('cat:dog:something') + except ValueError: + pass + else: + self.fail("Nonsense test name should throw ValueError") + + def test_test_address(self): + # test addresses are specified as + # package.module:class.method + # /path/to/file.py:class.method + # converted into 3-tuples (file, module, callable) + # all terms optional + test_address = util.test_address + absfile = util.absfile + class Foo: + def bar(self): + pass + def baz(): + pass + + f = Foo() + + class FooTC(unittest.TestCase): + def test_one(self): + pass + def test_two(self): + pass + + class CustomTestType(type): + pass + class CustomTC(unittest.TestCase): + __metaclass__ = CustomTestType + def test_one(self): + pass + def test_two(self): + pass + + foo_funct = case.FunctionTestCase(baz) + foo_functu = unittest.FunctionTestCase(baz) + + foo_mtc = case.MethodTestCase(Foo.bar) + + me = util.src(absfile(__file__)) + self.assertEqual(test_address(baz), + (me, __name__, 'baz')) + assert test_address(Foo) == (me, __name__, 'Foo') + assert test_address(Foo.bar) == (me, __name__, + 'Foo.bar') + assert test_address(f) == (me, __name__, 'Foo') + assert test_address(f.bar) == (me, __name__, 'Foo.bar') + assert test_address(nose) == ( + util.src(absfile(nose.__file__)), 'nose', None) + + # test passing the actual test callable, as the + # missed test plugin must do + self.assertEqual(test_address(FooTC('test_one')), + (me, __name__, 'FooTC.test_one')) + self.assertEqual(test_address(CustomTC('test_one')), + (me, __name__, 'CustomTC.test_one')) + self.assertEqual(test_address(foo_funct), + (me, __name__, 'baz')) + self.assertEqual(test_address(foo_functu), + (me, __name__, 'baz')) + self.assertEqual(test_address(foo_mtc), + (me, __name__, 'Foo.bar')) + + def test_isclass_detects_classes(self): + class TC(unittest.TestCase): + pass + class TC_Classic: + pass + class TC_object(object): + pass + # issue153 -- was not detecting custom typed classes... + class TCType(type): + pass + class TC_custom_type(object): + __metaclass__ = TCType + class TC_unittest_custom_type(unittest.TestCase): + __metaclass__ = TCType + + assert util.isclass(TC), "failed to detect %s as class" % TC + assert util.isclass(TC_Classic), "failed to detect %s as class" % TC_Classic + assert util.isclass(TC_object), "failed to detect %s as class" % TC_object + assert util.isclass(TC_custom_type), "failed to detect %s as class" % TC_custom_type + assert util.isclass(TC_unittest_custom_type), "failed to detect %s as class" % TC_unittest_custom_type + + def test_isclass_ignores_nonclass_things(self): + anint = 1 + adict = {} + assert not util.isclass(anint), "should have ignored %s" % type(anint) + assert not util.isclass(adict), "should have ignored %s" % type(adict) + + def test_tolist(self): + tolist = util.tolist + assert tolist('foo') == ['foo'] + assert tolist(['foo', 'bar']) == ['foo', 'bar'] + assert tolist('foo,bar') == ['foo', 'bar'] + self.assertEqual(tolist('.*foo/.*,.1'), ['.*foo/.*', '.1']) + + def test_try_run(self): + try_run = util.try_run + import imp + + def bar(): + pass + + def bar_m(mod): + pass + + class Bar: + def __call__(self): + pass + + class Bar_m: + def __call__(self, mod): + pass + + foo = imp.new_module('foo') + foo.bar = bar + foo.bar_m = bar_m + foo.i_bar = Bar() + foo.i_bar_m = Bar_m() + + try_run(foo, ('bar',)) + try_run(foo, ('bar_m',)) + try_run(foo, ('i_bar',)) + try_run(foo, ('i_bar_m',)) + +if __name__ == '__main__': + unittest.main() diff --git a/unit_tests/test_xunit.py b/unit_tests/test_xunit.py new file mode 100644 index 0000000..0e87696 --- /dev/null +++ b/unit_tests/test_xunit.py @@ -0,0 +1,282 @@ + +import sys +import os +import optparse +import unittest +from xml.sax import saxutils + +from nose.tools import eq_ +from nose.plugins.xunit import Xunit, escape_cdata +from nose.exc import SkipTest +from nose.config import Config + +def mktest(): + class TC(unittest.TestCase): + def runTest(self): + pass + test = TC() + return test + +mktest.__test__ = False + +class TestEscaping(unittest.TestCase): + + def setUp(self): + self.x = Xunit() + + def test_all(self): + eq_(self.x._quoteattr( + '''<baz src="http://foo?f=1&b=2" quote="inix hubris 'maximus'?" />'''), + ('"<baz src="http://foo?f=1&b=2" ' + 'quote="inix hubris \'maximus\'?" />"')) + + def test_unicode_is_utf8_by_default(self): + eq_(self.x._quoteattr(u'Ivan Krsti\u0107'), + '"Ivan Krsti\xc4\x87"') + + def test_unicode_custom_utf16_madness(self): + self.x.encoding = 'utf-16' + utf16 = self.x._quoteattr(u'Ivan Krsti\u0107')[1:-1] + + # to avoid big/little endian bytes, assert that we can put it back: + eq_(utf16.decode('utf16'), u'Ivan Krsti\u0107') + + def test_control_characters(self): + # quoting of \n, \r varies in diff. python versions + n = saxutils.quoteattr('\n')[1:-1] + r = saxutils.quoteattr('\r')[1:-1] + eq_(self.x._quoteattr('foo\n\b\f\r'), '"foo%s??%s"' % (n, r)) + eq_(escape_cdata('foo\n\b\f\r'), 'foo\n??\r') + +class TestOptions(unittest.TestCase): + + def test_defaults(self): + parser = optparse.OptionParser() + x = Xunit() + x.add_options(parser, env={}) + (options, args) = parser.parse_args([]) + eq_(options.xunit_file, "nosetests.xml") + + def test_file_from_environ(self): + parser = optparse.OptionParser() + x = Xunit() + x.add_options(parser, env={'NOSE_XUNIT_FILE': "kangaroo.xml"}) + (options, args) = parser.parse_args([]) + eq_(options.xunit_file, "kangaroo.xml") + + def test_file_from_opt(self): + parser = optparse.OptionParser() + x = Xunit() + x.add_options(parser, env={}) + (options, args) = parser.parse_args(["--xunit-file=blagojevich.xml"]) + eq_(options.xunit_file, "blagojevich.xml") + +class TestXMLOutputWithXML(unittest.TestCase): + + def setUp(self): + self.xmlfile = os.path.abspath( + os.path.join(os.path.dirname(__file__), + 'support', 'xunit.xml')) + parser = optparse.OptionParser() + self.x = Xunit() + self.x.add_options(parser, env={}) + (options, args) = parser.parse_args([ + "--with-xunit", + "--xunit-file=%s" % self.xmlfile + ]) + self.x.configure(options, Config()) + + try: + import xml.etree.ElementTree + except ImportError: + self.ET = False + else: + self.ET = xml.etree.ElementTree + + def tearDown(self): + os.unlink(self.xmlfile) + + def get_xml_report(self): + class DummyStream: + pass + self.x.report(DummyStream()) + f = open(self.xmlfile, 'r') + return f.read() + f.close() + + def test_addFailure(self): + test = mktest() + self.x.startTest(test) + try: + raise AssertionError("one is not 'equal' to two") + except AssertionError: + some_err = sys.exc_info() + + self.x.addFailure(test, some_err) + + result = self.get_xml_report() + print result + + if self.ET: + tree = self.ET.fromstring(result) + eq_(tree.attrib['name'], "nosetests") + eq_(tree.attrib['tests'], "1") + eq_(tree.attrib['errors'], "0") + eq_(tree.attrib['failures'], "1") + eq_(tree.attrib['skip'], "0") + + tc = tree.find("testcase") + eq_(tc.attrib['classname'], "test_xunit.TC") + eq_(tc.attrib['name'], "runTest") + assert int(tc.attrib['time']) >= 0 + + err = tc.find("failure") + eq_(err.attrib['type'], "exceptions.AssertionError") + err_lines = err.text.strip().split("\n") + eq_(err_lines[0], 'Traceback (most recent call last):') + eq_(err_lines[-1], 'AssertionError: one is not \'equal\' to two') + eq_(err_lines[-2], ' raise AssertionError("one is not \'equal\' to two")') + else: + # this is a dumb test for 2.4- + assert '<?xml version="1.0" encoding="UTF-8"?>' in result + assert '<testsuite name="nosetests" tests="1" errors="0" failures="1" skip="0">' in result + assert '<testcase classname="test_xunit.TC" name="runTest"' in result + assert '<failure type="exceptions.AssertionError"' in result + assert "AssertionError: one is not 'equal' to two" in result + assert "AssertionError(\"one is not 'equal' to two\")" in result + assert '</failure></testcase></testsuite>' in result + + def test_addFailure_early(self): + test = mktest() + try: + raise AssertionError("one is not equal to two") + except AssertionError: + some_err = sys.exc_info() + + # add failure without startTest, due to custom TestResult munging? + self.x.addFailure(test, some_err) + + result = self.get_xml_report() + print result + + if self.ET: + tree = self.ET.fromstring(result) + tc = tree.find("testcase") + eq_(tc.attrib['time'], "0") + else: + # this is a dumb test for 2.4- + assert '<?xml version="1.0" encoding="UTF-8"?>' in result + assert ('<testcase classname="test_xunit.TC" ' + 'name="runTest" time="0">') in result + + def test_addError(self): + test = mktest() + self.x.startTest(test) + try: + raise RuntimeError("some error happened") + except RuntimeError: + some_err = sys.exc_info() + + self.x.addError(test, some_err) + + result = self.get_xml_report() + print result + + if self.ET: + tree = self.ET.fromstring(result) + eq_(tree.attrib['name'], "nosetests") + eq_(tree.attrib['tests'], "1") + eq_(tree.attrib['errors'], "1") + eq_(tree.attrib['failures'], "0") + eq_(tree.attrib['skip'], "0") + + tc = tree.find("testcase") + eq_(tc.attrib['classname'], "test_xunit.TC") + eq_(tc.attrib['name'], "runTest") + assert int(tc.attrib['time']) >= 0 + + err = tc.find("error") + eq_(err.attrib['type'], "exceptions.RuntimeError") + err_lines = err.text.strip().split("\n") + eq_(err_lines[0], 'Traceback (most recent call last):') + eq_(err_lines[-1], 'RuntimeError: some error happened') + eq_(err_lines[-2], ' raise RuntimeError("some error happened")') + else: + # this is a dumb test for 2.4- + assert '<?xml version="1.0" encoding="UTF-8"?>' in result + assert '<testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">' in result + assert '<testcase classname="test_xunit.TC" name="runTest"' in result + assert '<error type="exceptions.RuntimeError"' in result + assert 'RuntimeError: some error happened' in result + assert '</error></testcase></testsuite>' in result + + def test_addError_early(self): + test = mktest() + try: + raise RuntimeError("some error happened") + except RuntimeError: + some_err = sys.exc_info() + + # call addError without startTest + # which can happen if setup() raises an error + self.x.addError(test, some_err) + + result = self.get_xml_report() + print result + + if self.ET: + tree = self.ET.fromstring(result) + tc = tree.find("testcase") + eq_(tc.attrib['time'], "0") + else: + # this is a dumb test for 2.4- + assert '<?xml version="1.0" encoding="UTF-8"?>' in result + assert ('<testcase classname="test_xunit.TC" ' + 'name="runTest" time="0">') in result + + def test_addSuccess(self): + test = mktest() + self.x.startTest(test) + self.x.addSuccess(test, (None,None,None)) + + result = self.get_xml_report() + print result + + if self.ET: + tree = self.ET.fromstring(result) + eq_(tree.attrib['name'], "nosetests") + eq_(tree.attrib['tests'], "1") + eq_(tree.attrib['errors'], "0") + eq_(tree.attrib['failures'], "0") + eq_(tree.attrib['skip'], "0") + + tc = tree.find("testcase") + eq_(tc.attrib['classname'], "test_xunit.TC") + eq_(tc.attrib['name'], "runTest") + assert int(tc.attrib['time']) >= 0 + else: + # this is a dumb test for 2.4- + assert '<?xml version="1.0" encoding="UTF-8"?>' in result + assert '<testsuite name="nosetests" tests="1" errors="0" failures="0" skip="0">' in result + assert '<testcase classname="test_xunit.TC" name="runTest"' in result + assert '</testsuite>' in result + + def test_addSuccess_early(self): + test = mktest() + # call addSuccess without startTest + # which can happen (?) -- did happen with JsLint plugin + self.x.addSuccess(test, (None,None,None)) + + result = self.get_xml_report() + print result + + if self.ET: + tree = self.ET.fromstring(result) + tc = tree.find("testcase") + eq_(tc.attrib['time'], "0") + else: + # this is a dumb test for 2.4- + assert '<?xml version="1.0" encoding="UTF-8"?>' in result + assert ('<testcase classname="test_xunit.TC" ' + 'name="runTest" time="0" />') in result + |