diff --git a/Lib/test/test_doctest.py b/Lib/test/test_doctest.py --- a/Lib/test/test_doctest.py +++ b/Lib/test/test_doctest.py @@ -2664,41 +2664,35 @@ These tests test this CLI functionality. We'll use the support module's script_helpers for this, and write a test files to a temp dir to run the command against. Due to a current limitation in script_helpers, though, we need a little utility function to turn the returned output into something we can doctest against: >>> def normalize(s): ... return '\n'.join(s.decode().splitlines()) -Note: we also pass TERM='' to all the assert_python calls to avoid a bug -in the readline library that is triggered in these tests because we are -running them in a new python process. See: - - http://lists.gnu.org/archive/html/bug-readline/2013-06/msg00000.html - With those preliminaries out of the way, we'll start with a file with two simple tests and no errors. We'll run both the unadorned doctest command, and the verbose version, and then check the output: >>> from test import script_helper >>> with script_helper.temp_dir() as tmpdir: ... fn = os.path.join(tmpdir, 'myfile.doc') ... with open(fn, 'w') as f: ... _ = f.write('This is a very simple test file.\n') ... _ = f.write(' >>> 1 + 1\n') ... _ = f.write(' 2\n') ... _ = f.write(' >>> "a"\n') ... _ = f.write(" 'a'\n") ... _ = f.write('\n') ... _ = f.write('And that is it.\n') ... rc1, out1, err1 = script_helper.assert_python_ok( - ... '-m', 'doctest', fn, TERM='') + ... '-m', 'doctest', fn) ... rc2, out2, err2 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-v', fn, TERM='') + ... '-m', 'doctest', '-v', fn) With no arguments and passing tests, we should get no output: >>> rc1, out1, err1 (0, b'', b'') With the verbose flag, we should see the test output, but no error output: @@ -2751,27 +2745,27 @@ text files). ... _ = f.write(' >>> 1 + 1\n') ... _ = f.write(' 2\n') ... _ = f.write(' >>> "abc def"\n') ... _ = f.write(" 'abc def'\n") ... _ = f.write("\n") ... _ = f.write(' \"\"\"\n') ... import shutil ... rc1, out1, err1 = script_helper.assert_python_failure( - ... '-m', 'doctest', fn, fn2, TERM='') + ... '-m', 'doctest', fn, fn2) ... rc2, out2, err2 = script_helper.assert_python_ok( - ... '-m', 'doctest', '-o', 'ELLIPSIS', fn, TERM='') + ... '-m', 'doctest', '-o', 'ELLIPSIS', fn) ... rc3, out3, err3 = script_helper.assert_python_ok( ... '-m', 'doctest', '-o', 'ELLIPSIS', - ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2, TERM='') + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) ... rc4, out4, err4 = script_helper.assert_python_failure( - ... '-m', 'doctest', '-f', fn, fn2, TERM='') + ... '-m', 'doctest', '-f', fn, fn2) ... rc5, out5, err5 = script_helper.assert_python_ok( ... '-m', 'doctest', '-v', '-o', 'ELLIPSIS', - ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2, TERM='') + ... '-o', 'NORMALIZE_WHITESPACE', fn, fn2) Our first test run will show the errors from the first file (doctest stops if a file has errors). Note that doctest test-run error output appears on stdout, not stderr: >>> rc1, err1 (1, b'') >>> print(normalize(out1)) # doctest: +ELLIPSIS @@ -2867,28 +2861,28 @@ success output for the tests in both fil 2 passed and 0 failed. Test passed. We should also check some typical error cases. Invalid file name: >>> rc, out, err = script_helper.assert_python_failure( - ... '-m', 'doctest', 'nosuchfile', TERM='') + ... '-m', 'doctest', 'nosuchfile') >>> rc, out (1, b'') >>> print(normalize(err)) # doctest: +ELLIPSIS Traceback (most recent call last): ... FileNotFoundError: [Errno ...] No such file or directory: 'nosuchfile' Invalid doctest option: >>> rc, out, err = script_helper.assert_python_failure( - ... '-m', 'doctest', '-o', 'nosuchoption', TERM='') + ... '-m', 'doctest', '-o', 'nosuchoption') >>> rc, out (2, b'') >>> print(normalize(err)) # doctest: +ELLIPSIS usage...invalid...nosuchoption... """ ######################################################################