diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -563,16 +563,56 @@ The following decorators implement test Usually you can use :meth:`TestCase.skipTest` or one of the skipping decorators instead of raising this directly. Skipped tests will not have :meth:`~TestCase.setUp` or :meth:`~TestCase.tearDown` run around them. Skipped classes will not have :meth:`~TestCase.setUpClass` or :meth:`~TestCase.tearDownClass` run. Skipped modules will not have :func:`setUpModule` or :func:`tearDownModule` run. +.. _unittest-warnings: + +Showing warnings +---------------- + +.. versionadded:: 3.5 + +Unittest catches warnings (like :exc:`ResourceWarning`, +:exc:`DeprecationWarning` etc.) and shows the traceback of warnings in the test +result. The default warning filter is ``'default'`` and it can be changed by +passing a ``warning`` keyword argument to :func:`unittest.main`. + +For example:: + + import unittest + + class SpamTestCase(unittest.TestCase): + + def test_egg(self): + self.assertEquals('eric idle', 'eric idle') + + if __name__ == '__main__': + unittest.main() + +This is the output of running the example above:: + + w. + + ====================================================================== + WARNING: test_spam (__main__.SpamTestCase) + ---------------------------------------------------------------------- + test_spam.py:6: DeprecationWarning: Please use assertEqual instead. + self.assertEquals('eric idle', 'eric idle') + + ---------------------------------------------------------------------- + Ran 1 test in 0.001s + + OK (warnings=1) + + .. _subtests: Distinguishing test iterations using subtests --------------------------------------------- .. versionadded:: 3.4 When some of your tests differ only by a some very small differences, for @@ -1726,16 +1766,23 @@ Loading and running tests .. attribute:: skipped A list containing 2-tuples of :class:`TestCase` instances and strings holding the reason for skipping the test. .. versionadded:: 3.1 + .. attribute:: warnings + + A list containing 2-tuples of :class:`TestCase` instances and strings + holding formatted warnings. + + .. versionadded:: 3.5 + .. attribute:: expectedFailures A list containing 2-tuples of :class:`TestCase` instances and strings holding formatted tracebacks. Each tuple represents an expected failure of the test case. .. attribute:: unexpectedSuccesses diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py --- a/Lib/test/test_doctest.py +++ b/Lib/test/test_doctest.py @@ -2077,98 +2077,98 @@ def test_DocTestSuite(): We create a Suite by providing a module. A module can be provided by passing a module object: >>> import unittest >>> import test.sample_doctest >>> suite = doctest.DocTestSuite(test.sample_doctest) >>> suite.run(unittest.TestResult()) - + We can also supply the module by name: >>> suite = doctest.DocTestSuite('test.sample_doctest') >>> suite.run(unittest.TestResult()) - + The module need not contain any doctest examples: >>> suite = doctest.DocTestSuite('test.sample_doctest_no_doctests') >>> suite.run(unittest.TestResult()) - + The module need not contain any docstrings either: >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings') >>> suite.run(unittest.TestResult()) - + We can use the current module: >>> suite = test.sample_doctest.test_suite() >>> suite.run(unittest.TestResult()) - + We can also provide a DocTestFinder: >>> finder = doctest.DocTestFinder() >>> suite = doctest.DocTestSuite('test.sample_doctest', ... test_finder=finder) >>> suite.run(unittest.TestResult()) - + The DocTestFinder need not return any tests: >>> finder = doctest.DocTestFinder() >>> suite = doctest.DocTestSuite('test.sample_doctest_no_docstrings', ... test_finder=finder) >>> suite.run(unittest.TestResult()) - + We can supply global variables. If we pass globs, they will be used instead of the module globals. Here we'll pass an empty globals, triggering an extra error: >>> suite = doctest.DocTestSuite('test.sample_doctest', globs={}) >>> suite.run(unittest.TestResult()) - + Alternatively, we can provide extra globals. Here we'll make an error go away by providing an extra global variable: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... extraglobs={'y': 1}) >>> suite.run(unittest.TestResult()) - + You can pass option flags. Here we'll cause an extra error by disabling the blank-line feature: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... optionflags=doctest.DONT_ACCEPT_BLANKLINE) >>> suite.run(unittest.TestResult()) - + You can supply setUp and tearDown functions: >>> def setUp(t): ... import test.test_doctest ... test.test_doctest.sillySetup = True >>> def tearDown(t): ... import test.test_doctest ... del test.test_doctest.sillySetup Here, we installed a silly variable that the test expects: >>> suite = doctest.DocTestSuite('test.sample_doctest', ... setUp=setUp, tearDown=tearDown) >>> suite.run(unittest.TestResult()) - + But the tearDown restores sanity: >>> import test.test_doctest >>> test.test_doctest.sillySetup Traceback (most recent call last): ... AttributeError: module 'test.test_doctest' has no attribute 'sillySetup' @@ -2176,17 +2176,17 @@ def test_DocTestSuite(): The setUp and tearDown funtions are passed test objects. Here we'll use the setUp function to supply the missing variable y: >>> def setUp(test): ... test.globs['y'] = 1 >>> suite = doctest.DocTestSuite('test.sample_doctest', setUp=setUp) >>> suite.run(unittest.TestResult()) - + Here, we didn't need to use a tearDown function because we modified the test globals, which are a copy of the sample_doctest module dictionary. The test globals are automatically cleared for us after a test. """ def test_DocFileSuite(): diff --git a/Lib/test/test_statistics.py b/Lib/test/test_statistics.py --- a/Lib/test/test_statistics.py +++ b/Lib/test/test_statistics.py @@ -171,17 +171,17 @@ class NumericTestCase(unittest.TestCase) ... a = [1.001, 1.001e-10, 1.001e10] ... b = [1.0, 1e-10, 1e10] ... self.assertApproxEqual(a, b, rel=1e-3) ... >>> import unittest >>> from io import StringIO # Suppress test runner output. >>> suite = unittest.TestLoader().loadTestsFromTestCase(MyTest) >>> unittest.TextTestRunner(stream=StringIO()).run(suite) - + """ if tol is None: tol = self.tol if rel is None: rel = self.rel if ( isinstance(first, collections.Sequence) and diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py --- a/Lib/unittest/case.py +++ b/Lib/unittest/case.py @@ -44,19 +44,29 @@ class _Outcome(object): def __init__(self, result=None): self.expecting_failure = False self.result = result self.result_supports_subtests = hasattr(result, "addSubTest") self.success = True self.skipped = [] self.expectedFailure = None self.errors = [] + self.warnings = [] @contextlib.contextmanager def testPartExecutor(self, test_case, isTest=False): + if isTest: + with warnings.catch_warnings(record=True) as warning_list: + yield from self._testPartExecutor(test_case, isTest) + for warn in warning_list: + self.warnings.append((test_case, warn)) + else: + yield from self._testPartExecutor(test_case, isTest) + + def _testPartExecutor(self, test_case, isTest): old_success = self.success self.success = True try: yield except KeyboardInterrupt: raise except SkipTest as e: self.success = False @@ -471,16 +481,25 @@ class TestCase(object): addSkip = getattr(result, 'addSkip', None) if addSkip is not None: addSkip(test_case, reason) else: warnings.warn("TestResult has no addSkip method, skips not reported", RuntimeWarning, 2) result.addSuccess(test_case) + def _addWarning(self, result, test_case, warning): + addWarning = getattr(result, 'addWarning', None) + if addWarning is not None: + addWarning(test_case, warning) + else: + warnings.warn("TestResult has no addWarning method, warnings " + "not reported", RuntimeWarning, stacksize=2) + result.addSuccess(test_case) + @contextlib.contextmanager def subTest(self, msg=None, **params): """Return a context manager that will return the enclosed block of code in a subtest identified by the optional message and keyword parameters. A failure in the subtest marks the test case as failed but resumes execution at the end of the enclosed block, allowing further test code to be executed. """ @@ -577,16 +596,18 @@ class TestCase(object): testMethod() outcome.expecting_failure = False with outcome.testPartExecutor(self): self.tearDown() self.doCleanups() for test, reason in outcome.skipped: self._addSkip(result, test, reason) + for test, warning in outcome.warnings: + self._addWarning(result, test, warning) self._feedErrorsToResult(result, outcome.errors) if outcome.success: if expecting_failure: if outcome.expectedFailure: self._addExpectedFailure(result, outcome.expectedFailure) else: self._addUnexpectedSuccess(result) else: diff --git a/Lib/unittest/result.py b/Lib/unittest/result.py --- a/Lib/unittest/result.py +++ b/Lib/unittest/result.py @@ -36,29 +36,33 @@ class TestResult(object): _testRunEntered = False _moduleSetUpFailed = False def __init__(self, stream=None, descriptions=None, verbosity=None): self.failfast = False self.failures = [] self.errors = [] self.testsRun = 0 self.skipped = [] + self.warnings = [] self.expectedFailures = [] self.unexpectedSuccesses = [] self.shouldStop = False self.buffer = False self._stdout_buffer = None self._stderr_buffer = None self._original_stdout = sys.stdout self._original_stderr = sys.stderr self._mirrorOutput = False def printErrors(self): "Called by TestRunner after test run" + def printWarnings(self): + "Called by TestRunner after test run" + def startTest(self, test): "Called when the given test is about to be run" self.testsRun += 1 self._mirrorOutput = False self._setupStdout() def _setupStdout(self): if self.buffer: @@ -140,16 +144,20 @@ class TestResult(object): def addSuccess(self, test): "Called when a test has completed successfully" pass def addSkip(self, test, reason): """Called when a test is skipped.""" self.skipped.append((test, reason)) + def addWarning(self, test, warning): + """Called when a test produces a warning.""" + self.warnings.append((test, warning)) + def addExpectedFailure(self, test, err): """Called when an expected failure/error occured.""" self.expectedFailures.append( (test, self._exc_info_to_string(err, test))) @failfast def addUnexpectedSuccess(self, test): """Called when a test was expected to fail, but succeed.""" @@ -202,11 +210,11 @@ class TestResult(object): def _count_relevant_tb_levels(self, tb): length = 0 while tb and not self._is_relevant_tb_level(tb): length += 1 tb = tb.tb_next return length def __repr__(self): - return ("<%s run=%i errors=%i failures=%i>" % + return ("<%s run=%i errors=%i failures=%i warnings=%i>" % (util.strclass(self.__class__), self.testsRun, len(self.errors), - len(self.failures))) + len(self.failures), len(self.warnings))) diff --git a/Lib/unittest/runner.py b/Lib/unittest/runner.py --- a/Lib/unittest/runner.py +++ b/Lib/unittest/runner.py @@ -82,16 +82,24 @@ class TextTestResult(result.TestResult): def addSkip(self, test, reason): super(TextTestResult, self).addSkip(test, reason) if self.showAll: self.stream.writeln("skipped {0!r}".format(reason)) elif self.dots: self.stream.write("s") self.stream.flush() + def addWarning(self, test, warning): + super(TextTestResult, self).addWarning(test, warning) + if self.showAll: + self.stream.writeln("WARNING") + elif self.dots: + self.stream.write("w") + self.stream.flush() + def addExpectedFailure(self, test, err): super(TextTestResult, self).addExpectedFailure(test, err) if self.showAll: self.stream.writeln("expected failure") elif self.dots: self.stream.write("x") self.stream.flush() @@ -107,20 +115,34 @@ class TextTestResult(result.TestResult): if self.dots or self.showAll: self.stream.writeln() self.printErrorList('ERROR', self.errors) self.printErrorList('FAIL', self.failures) def printErrorList(self, flavour, errors): for test, err in errors: self.stream.writeln(self.separator1) - self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) + self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln("%s" % err) + def printWarnings(self): + if self.dots or self.showAll: + self.stream.writeln() + self.printWarningList('WARNING', self.warnings) + + def printWarningList(self, flavour, warning_list): + for test, warn in warning_list: + self.stream.writeln(self.separator1) + self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) + self.stream.writeln(self.separator2) + format_warning = warnings.formatwarning(warn.message, warn.category, + warn.filename, warn.lineno) + self.stream.writeln("%s" % format_warning) + class TextTestRunner(object): """A test runner class that displays results in textual form. It prints out the names of tests as they are run, errors as they occur, and a summary of the results at the end of the test run. """ resultclass = TextTestResult @@ -168,45 +190,49 @@ class TextTestRunner(object): test(result) finally: stopTestRun = getattr(result, 'stopTestRun', None) if stopTestRun is not None: stopTestRun() stopTime = time.time() timeTaken = stopTime - startTime result.printErrors() + result.printWarnings() if hasattr(result, 'separator2'): self.stream.writeln(result.separator2) run = result.testsRun self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() - expectedFails = unexpectedSuccesses = skipped = 0 + expectedFails = unexpectedSuccesses = skipped = warns = 0 try: results = map(len, (result.expectedFailures, result.unexpectedSuccesses, - result.skipped)) + result.skipped, + result.warnings)) except AttributeError: pass else: - expectedFails, unexpectedSuccesses, skipped = results + expectedFails, unexpectedSuccesses, skipped, warns = results infos = [] if not result.wasSuccessful(): self.stream.write("FAILED") failed, errored = len(result.failures), len(result.errors) if failed: infos.append("failures=%d" % failed) if errored: infos.append("errors=%d" % errored) else: self.stream.write("OK") if skipped: infos.append("skipped=%d" % skipped) + if warns: + infos.append("warnings=%d" % warns) if expectedFails: infos.append("expected failures=%d" % expectedFails) if unexpectedSuccesses: infos.append("unexpected successes=%d" % unexpectedSuccesses) if infos: self.stream.writeln(" (%s)" % (", ".join(infos),)) else: self.stream.write("\n") diff --git a/Lib/unittest/test/_test_warnings.py b/Lib/unittest/test/_test_warnings.py --- a/Lib/unittest/test/_test_warnings.py +++ b/Lib/unittest/test/_test_warnings.py @@ -5,17 +5,16 @@ This module has a number of tests that r When the tests are run, the warnings are caught and their messages are printed to stdout. This module also accepts an arg that is then passed to unittest.main to affect the behavior of warnings. Test_TextTestRunner.test_warnings executes this script with different combinations of warnings args and -W flags and check that the output is correct. See #10535. """ -import sys import unittest import warnings def warnfun(): warnings.warn('rw', RuntimeWarning) class TestWarnings(unittest.TestCase): # unittest warnings will be printed at most once per type (max one message @@ -53,21 +52,28 @@ class TestWarnings(unittest.TestCase): # these warnings come from the same place; they will be printed # only once by default or three times if the 'always' filter is used def test_function(self): warnfun() warnfun() warnfun() +if __name__ == '__main__': + import sys + if len(sys.argv) == 2: + current_warning = sys.argv.pop() + # mimick the unittest.main behavior + elif not sys.warnoptions: + current_warning = 'default' + else: + current_warning = None -if __name__ == '__main__': - with warnings.catch_warnings(record=True) as ws: - # if an arg is provided pass it to unittest.main as 'warnings' - if len(sys.argv) == 2: - unittest.main(exit=False, warnings=sys.argv.pop()) - else: - unittest.main(exit=False) + loader = unittest.TestLoader() + runner = unittest.TextTestRunner(warnings=current_warning) + suite = unittest.TestSuite() + tests = loader.loadTestsFromTestCase(TestWarnings) + suite.addTests(tests) + result = runner.run(suite) - # print all the warning messages collected - for w in ws: - print(w.message) + for test, warning in result.warnings: + print(warning.message) diff --git a/Lib/unittest/test/test_result.py b/Lib/unittest/test/test_result.py --- a/Lib/unittest/test/test_result.py +++ b/Lib/unittest/test/test_result.py @@ -127,16 +127,40 @@ class Test_TestResult(unittest.TestCase) result.stopTest(test) self.assertTrue(result.wasSuccessful()) self.assertEqual(len(result.errors), 0) self.assertEqual(len(result.failures), 0) self.assertEqual(result.testsRun, 1) self.assertEqual(result.shouldStop, False) + def test_addWarning(self): + class Foo(unittest.TestCase): + def test_1(self): + pass + + test = Foo('test_1') + try: + raise UserWarning + except: + exc_info_tuple = sys.exc_info() + + result = unittest.TestResult() + + result.startTest(test) + result.addWarning(test, exc_info_tuple) + result.stopTest(test) + + self.assertTrue(result.wasSuccessful()) + self.assertEqual(len(result.errors), 0) + self.assertEqual(len(result.failures), 0) + self.assertEqual(len(result.warnings), 1) + self.assertEqual(result.testsRun, 1) + self.assertEqual(result.shouldStop, False) + # "addFailure(test, err)" # ... # "Called when the test case test signals a failure. err is a tuple of # the form returned by sys.exc_info(): (type, value, traceback)" # ... # "wasSuccessful() - Returns True if all tests run so far have passed, # otherwise returns False" # ... diff --git a/Lib/unittest/test/test_runner.py b/Lib/unittest/test/test_runner.py --- a/Lib/unittest/test/test_runner.py +++ b/Lib/unittest/test/test_runner.py @@ -209,16 +209,18 @@ class Test_TextTestRunner(unittest.TestC runner.run(unittest.TestSuite()) self.assertEqual(self.wasRegistered, 1) def test_works_with_result_without_startTestRun_stopTestRun(self): class OldTextResult(ResultWithNoStartTestRunStopTestRun): separator2 = '' def printErrors(self): pass + def printWarnings(self): + pass class Runner(unittest.TextTestRunner): def __init__(self): super(Runner, self).__init__(io.StringIO()) def _makeResult(self): return OldTextResult() @@ -281,47 +283,49 @@ class Test_TextTestRunner(unittest.TestC opts = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.dirname(__file__)) ae_msg = b'Please use assertEqual instead.' at_msg = b'Please use assertTrue instead.' # no args -> all the warnings are printed, unittest warnings only once p = subprocess.Popen([sys.executable, '_test_warnings.py'], **opts) out, err = get_parse_out_err(p) - self.assertIn(b'OK', err) + self.assertRegex(err.pop(), b'OK \(warnings=(.*)\)') # check that the total number of warnings in the output is correct self.assertEqual(len(out), 12) # check that the numbers of the different kind of warnings is correct for msg in [b'dw', b'iw', b'uw']: - self.assertEqual(out.count(msg), 3) + with self.subTest(msg=msg): + self.assertEqual(out.count(msg), 3) for msg in [ae_msg, at_msg, b'rw']: - self.assertEqual(out.count(msg), 1) + with self.subTest(msg=msg): + self.assertEqual(out.count(msg), 1) args_list = ( # passing 'ignore' as warnings arg -> no warnings [sys.executable, '_test_warnings.py', 'ignore'], # -W doesn't affect the result if the arg is passed [sys.executable, '-Wa', '_test_warnings.py', 'ignore'], # -W affects the result if the arg is not passed [sys.executable, '-Wi', '_test_warnings.py'] ) # in all these cases no warnings are printed for args in args_list: - p = subprocess.Popen(args, **opts) - out, err = get_parse_out_err(p) - self.assertIn(b'OK', err) - self.assertEqual(len(out), 0) - + with self.subTest(args=args): + p = subprocess.Popen(args, **opts) + out, err = get_parse_out_err(p) + self.assertEqual(err.pop(), b'OK') + self.assertEqual(len(out), 0) # passing 'always' as warnings arg -> all the warnings printed, # unittest warnings only once p = subprocess.Popen([sys.executable, '_test_warnings.py', 'always'], **opts) out, err = get_parse_out_err(p) - self.assertIn(b'OK', err) + self.assertRegex(err.pop(), b'OK \(warnings=(.*)\)') self.assertEqual(len(out), 14) for msg in [b'dw', b'iw', b'uw', b'rw']: self.assertEqual(out.count(msg), 3) for msg in [ae_msg, at_msg]: self.assertEqual(out.count(msg), 1) def testStdErrLookedUpAtInstantiationTime(self): # see issue 10786