Index: Doc/library/unittest.rst =================================================================== --- Doc/library/unittest.rst (revision 76265) +++ Doc/library/unittest.rst (working copy) @@ -168,9 +168,9 @@ Running the revised script from the interpreter or another script produces the following output:: - testchoice (__main__.TestSequenceFunctions) ... ok - testsample (__main__.TestSequenceFunctions) ... ok - testshuffle (__main__.TestSequenceFunctions) ... ok + testchoice (__main__.TestSequenceFunctions) ... [0.090175s] ok + testsample (__main__.TestSequenceFunctions) ... [0.009331s] ok + testshuffle (__main__.TestSequenceFunctions) ... [0.000726s] ok ---------------------------------------------------------------------- Ran 3 tests in 0.110s @@ -181,6 +181,7 @@ are sufficient to meet many everyday testing needs. The remainder of the documentation explores the full feature set from first principles. +In verbose mode, execution time of each test case is measured and displayed. .. _unittest-command-line-interface: @@ -527,12 +528,12 @@ This is the output of running the example above in verbose mode: :: - test_format (__main__.MyTestCase) ... skipped 'not supported in this library version' - test_nothing (__main__.MyTestCase) ... skipped 'demonstrating skipping' - test_windows_support (__main__.MyTestCase) ... skipped 'requires Windows' + test_format (__main__.MyTestCase) ... [0.000612s] skipped 'not supported in this library version' + test_nothing (__main__.MyTestCase) ... [0.000486s] skipped 'demonstrating skipping' + test_windows_support (__main__.MyTestCase) ... [0.000090s] skipped 'requires Windows' ---------------------------------------------------------------------- - Ran 3 tests in 0.005s + Ran 3 tests in 0.001s OK (skipped=3) Index: Lib/unittest/runner.py =================================================================== --- Lib/unittest/runner.py (revision 76265) +++ Lib/unittest/runner.py (working copy) @@ -53,7 +53,7 @@ def addSuccess(self, test): super(_TextTestResult, self).addSuccess(test) if self.showAll: - self.stream.writeln("ok") + self.stream.writeln("[{0:.6f}s] ok".format(self.runTime)) elif self.dots: self.stream.write('.') self.stream.flush() @@ -61,7 +61,7 @@ def addError(self, test, err): super(_TextTestResult, self).addError(test, err) if self.showAll: - self.stream.writeln("ERROR") + self.stream.writeln("[{0:.6f}s] ERROR".format(self.runTime)) elif self.dots: self.stream.write('E') self.stream.flush() @@ -69,7 +69,7 @@ def addFailure(self, test, err): super(_TextTestResult, self).addFailure(test, err) if self.showAll: - self.stream.writeln("FAIL") + self.stream.writeln("[{0:.6f}s] FAIL".format(self.runTime)) elif self.dots: self.stream.write('F') self.stream.flush() @@ -77,7 +77,7 @@ def addSkip(self, test, reason): super(_TextTestResult, self).addSkip(test, reason) if self.showAll: - self.stream.writeln("skipped {0!r}".format(reason)) + self.stream.writeln("[{0:.6f}s] skipped {1!r}".format(self.runTime, reason)) elif self.dots: self.stream.write("s") self.stream.flush() @@ -85,7 +85,7 @@ def addExpectedFailure(self, test, err): super(_TextTestResult, self).addExpectedFailure(test, err) if self.showAll: - self.stream.writeln("expected failure") + self.stream.writeln("[{0:.6f}s] expected failure".format(self.runTime)) elif self.dots: self.stream.write("x") self.stream.flush() @@ -93,7 +93,7 @@ def addUnexpectedSuccess(self, test): super(_TextTestResult, self).addUnexpectedSuccess(test) if self.showAll: - self.stream.writeln("unexpected success") + self.stream.writeln("[{0:.6f}s] unexpected success".format(self.runTime)) elif self.dots: self.stream.write("u") self.stream.flush() Index: Lib/unittest/case.py =================================================================== --- Lib/unittest/case.py (revision 76265) +++ Lib/unittest/case.py (working copy) @@ -6,6 +6,7 @@ import pprint import re import warnings +import time from . import result, util @@ -293,16 +294,23 @@ result.addError(self, sys.exc_info()) else: try: + startTime = time.time() testMethod() + result.runTime = time.time() - startTime except self.failureException: + result.runTime = time.time() - startTime result.addFailure(self, sys.exc_info()) except _ExpectedFailure as e: + result.runTime = time.time() - startTime result.addExpectedFailure(self, e.exc_info) except _UnexpectedSuccess: + result.runTime = time.time() - startTime result.addUnexpectedSuccess(self) except SkipTest as e: + result.runTime = time.time() - startTime result.addSkip(self, str(e)) except Exception: + result.runTime = time.time() - startTime result.addError(self, sys.exc_info()) else: success = True Index: Lib/test/test_unittest.py =================================================================== --- Lib/test/test_unittest.py (revision 76265) +++ Lib/test/test_unittest.py (working copy) @@ -1591,6 +1591,20 @@ self.assertEqual(events, ['run test1', 'run test2']) + + # Check if test case execution time is positive + def test_runTime(self): + class Foo(unittest.TestCase): + def test(self): + pass + + test = Foo('test') + result = unittest.TestResult() + test.run(result) + + self.assertEqual(result.runTime > 0.0 , True) + + # "Add a TestCase ... to the suite" def test_addTest__TestCase(self): class Foo(unittest.TestCase):