mirror of
https://github.com/python/cpython.git
synced 2025-12-31 04:23:37 +00:00
gh-127221: Add colour to unittest output (#127223)
Co-authored-by: Kirill Podoprigora <kirill.bast9@mail.ru>
This commit is contained in:
parent
d958d9f4a1
commit
23f2e8f13c
15 changed files with 136 additions and 60 deletions
|
|
@ -78,6 +78,13 @@
|
|||
.. |python_version_literal| replace:: ``Python {version}``
|
||||
.. |python_x_dot_y_literal| replace:: ``python{version}``
|
||||
.. |usr_local_bin_python_x_dot_y_literal| replace:: ``/usr/local/bin/python{version}``
|
||||
|
||||
.. Apparently this how you hack together a formatted link:
|
||||
(https://www.docutils.org/docs/ref/rst/directives.html#replacement-text)
|
||||
.. |FORCE_COLOR| replace:: ``FORCE_COLOR``
|
||||
.. _FORCE_COLOR: https://force-color.org/
|
||||
.. |NO_COLOR| replace:: ``NO_COLOR``
|
||||
.. _NO_COLOR: https://no-color.org/
|
||||
"""
|
||||
|
||||
# There are two options for replacing |today|. Either, you set today to some
|
||||
|
|
|
|||
|
|
@ -136,6 +136,10 @@ examples of doctests in the standard Python test suite and libraries.
|
|||
Especially useful examples can be found in the standard test file
|
||||
:file:`Lib/test/test_doctest/test_doctest.py`.
|
||||
|
||||
.. versionadded:: 3.13
|
||||
Output is colorized by default and can be
|
||||
:ref:`controlled using environment variables <using-on-controlling-color>`.
|
||||
|
||||
|
||||
.. _doctest-simple-testmod:
|
||||
|
||||
|
|
|
|||
|
|
@ -44,6 +44,10 @@ The module's API can be divided into two parts:
|
|||
necessary for later formatting without holding references to actual exception
|
||||
and traceback objects.
|
||||
|
||||
.. versionadded:: 3.13
|
||||
Output is colorized by default and can be
|
||||
:ref:`controlled using environment variables <using-on-controlling-color>`.
|
||||
|
||||
|
||||
Module-Level Functions
|
||||
----------------------
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@ test runner
|
|||
a textual interface, or return a special value to indicate the results of
|
||||
executing the tests.
|
||||
|
||||
|
||||
.. seealso::
|
||||
|
||||
Module :mod:`doctest`
|
||||
|
|
@ -198,6 +197,9 @@ For a list of all the command-line options::
|
|||
In earlier versions it was only possible to run individual test methods and
|
||||
not modules or classes.
|
||||
|
||||
.. versionadded:: 3.14
|
||||
Output is colorized by default and can be
|
||||
:ref:`controlled using environment variables <using-on-controlling-color>`.
|
||||
|
||||
Command-line options
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
|
|
|||
|
|
@ -663,14 +663,6 @@ output. To control the color output only in the Python interpreter, the
|
|||
precedence over ``NO_COLOR``, which in turn takes precedence over
|
||||
``FORCE_COLOR``.
|
||||
|
||||
.. Apparently this how you hack together a formatted link:
|
||||
|
||||
.. |FORCE_COLOR| replace:: ``FORCE_COLOR``
|
||||
.. _FORCE_COLOR: https://force-color.org/
|
||||
|
||||
.. |NO_COLOR| replace:: ``NO_COLOR``
|
||||
.. _NO_COLOR: https://no-color.org/
|
||||
|
||||
Options you shouldn't use
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
|
|
|||
|
|
@ -252,15 +252,6 @@ Improved error messages
|
|||
the canonical |NO_COLOR|_ and |FORCE_COLOR|_ environment variables.
|
||||
(Contributed by Pablo Galindo Salgado in :gh:`112730`.)
|
||||
|
||||
.. Apparently this how you hack together a formatted link:
|
||||
(https://www.docutils.org/docs/ref/rst/directives.html#replacement-text)
|
||||
|
||||
.. |FORCE_COLOR| replace:: ``FORCE_COLOR``
|
||||
.. _FORCE_COLOR: https://force-color.org/
|
||||
|
||||
.. |NO_COLOR| replace:: ``NO_COLOR``
|
||||
.. _NO_COLOR: https://no-color.org/
|
||||
|
||||
* A common mistake is to write a script with the same name as a
|
||||
standard library module. When this results in errors, we now
|
||||
display a more helpful error message:
|
||||
|
|
|
|||
|
|
@ -616,6 +616,13 @@ unicodedata
|
|||
unittest
|
||||
--------
|
||||
|
||||
* :mod:`unittest` output is now colored by default.
|
||||
This can be controlled via the :envvar:`PYTHON_COLORS` environment
|
||||
variable as well as the canonical |NO_COLOR|_
|
||||
and |FORCE_COLOR|_ environment variables.
|
||||
See also :ref:`using-on-controlling-color`.
|
||||
(Contributed by Hugo van Kemenade in :gh:`127221`.)
|
||||
|
||||
* unittest discovery supports :term:`namespace package` as start
|
||||
directory again. It was removed in Python 3.11.
|
||||
(Contributed by Jacob Walls in :gh:`80958`.)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
import contextvars
|
||||
import unittest
|
||||
from test import support
|
||||
from test.support import force_not_colorized
|
||||
|
||||
support.requires_working_socket(module=True)
|
||||
|
||||
|
|
@ -252,6 +253,7 @@ async def on_cleanup(self):
|
|||
test.doCleanups()
|
||||
self.assertEqual(events, ['asyncSetUp', 'test', 'asyncTearDown', 'cleanup'])
|
||||
|
||||
@force_not_colorized
|
||||
def test_exception_in_tear_clean_up(self):
|
||||
class Test(unittest.IsolatedAsyncioTestCase):
|
||||
async def asyncSetUp(self):
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
from test import support
|
||||
import unittest
|
||||
import test.test_unittest
|
||||
from test.support import force_not_colorized
|
||||
from test.test_unittest.test_result import BufferedWriter
|
||||
|
||||
|
||||
|
|
@ -120,6 +121,7 @@ def run(self, test):
|
|||
self.assertEqual(['test.test_unittest', 'test.test_unittest2'],
|
||||
program.testNames)
|
||||
|
||||
@force_not_colorized
|
||||
def test_NonExit(self):
|
||||
stream = BufferedWriter()
|
||||
program = unittest.main(exit=False,
|
||||
|
|
@ -135,6 +137,7 @@ def test_NonExit(self):
|
|||
'expected failures=1, unexpected successes=1)\n')
|
||||
self.assertTrue(out.endswith(expected))
|
||||
|
||||
@force_not_colorized
|
||||
def test_Exit(self):
|
||||
stream = BufferedWriter()
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
|
|
@ -152,6 +155,7 @@ def test_Exit(self):
|
|||
'expected failures=1, unexpected successes=1)\n')
|
||||
self.assertTrue(out.endswith(expected))
|
||||
|
||||
@force_not_colorized
|
||||
def test_ExitAsDefault(self):
|
||||
stream = BufferedWriter()
|
||||
with self.assertRaises(SystemExit):
|
||||
|
|
@ -167,6 +171,7 @@ def test_ExitAsDefault(self):
|
|||
'expected failures=1, unexpected successes=1)\n')
|
||||
self.assertTrue(out.endswith(expected))
|
||||
|
||||
@force_not_colorized
|
||||
def test_ExitSkippedSuite(self):
|
||||
stream = BufferedWriter()
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
|
|
@ -179,6 +184,7 @@ def test_ExitSkippedSuite(self):
|
|||
expected = '\n\nOK (skipped=1)\n'
|
||||
self.assertTrue(out.endswith(expected))
|
||||
|
||||
@force_not_colorized
|
||||
def test_ExitEmptySuite(self):
|
||||
stream = BufferedWriter()
|
||||
with self.assertRaises(SystemExit) as cm:
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
import traceback
|
||||
import unittest
|
||||
from unittest.util import strclass
|
||||
from test.support import force_not_colorized
|
||||
from test.test_unittest.support import BufferedWriter
|
||||
|
||||
|
||||
|
|
@ -14,7 +15,7 @@ class MockTraceback(object):
|
|||
class TracebackException:
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.capture_locals = kwargs.get('capture_locals', False)
|
||||
def format(self):
|
||||
def format(self, **kwargs):
|
||||
result = ['A traceback']
|
||||
if self.capture_locals:
|
||||
result.append('locals')
|
||||
|
|
@ -205,6 +206,7 @@ def test_1(self):
|
|||
self.assertIs(test_case, test)
|
||||
self.assertIsInstance(formatted_exc, str)
|
||||
|
||||
@force_not_colorized
|
||||
def test_addFailure_filter_traceback_frames(self):
|
||||
class Foo(unittest.TestCase):
|
||||
def test_1(self):
|
||||
|
|
@ -231,6 +233,7 @@ def get_exc_info():
|
|||
self.assertEqual(len(dropped), 1)
|
||||
self.assertIn("raise self.failureException(msg)", dropped[0])
|
||||
|
||||
@force_not_colorized
|
||||
def test_addFailure_filter_traceback_frames_context(self):
|
||||
class Foo(unittest.TestCase):
|
||||
def test_1(self):
|
||||
|
|
@ -260,6 +263,7 @@ def get_exc_info():
|
|||
self.assertEqual(len(dropped), 1)
|
||||
self.assertIn("raise self.failureException(msg)", dropped[0])
|
||||
|
||||
@force_not_colorized
|
||||
def test_addFailure_filter_traceback_frames_chained_exception_self_loop(self):
|
||||
class Foo(unittest.TestCase):
|
||||
def test_1(self):
|
||||
|
|
@ -285,6 +289,7 @@ def get_exc_info():
|
|||
formatted_exc = result.failures[0][1]
|
||||
self.assertEqual(formatted_exc.count("Exception: Loop\n"), 1)
|
||||
|
||||
@force_not_colorized
|
||||
def test_addFailure_filter_traceback_frames_chained_exception_cycle(self):
|
||||
class Foo(unittest.TestCase):
|
||||
def test_1(self):
|
||||
|
|
@ -446,6 +451,7 @@ def testFailFast(self):
|
|||
result.addUnexpectedSuccess(None)
|
||||
self.assertTrue(result.shouldStop)
|
||||
|
||||
@force_not_colorized
|
||||
def testFailFastSetByRunner(self):
|
||||
stream = BufferedWriter()
|
||||
runner = unittest.TextTestRunner(stream=stream, failfast=True)
|
||||
|
|
@ -619,6 +625,7 @@ def _run_test(self, test_name, verbosity, tearDownError=None):
|
|||
test.run(result)
|
||||
return stream.getvalue()
|
||||
|
||||
@force_not_colorized
|
||||
def testDotsOutput(self):
|
||||
self.assertEqual(self._run_test('testSuccess', 1), '.')
|
||||
self.assertEqual(self._run_test('testSkip', 1), 's')
|
||||
|
|
@ -627,6 +634,7 @@ def testDotsOutput(self):
|
|||
self.assertEqual(self._run_test('testExpectedFailure', 1), 'x')
|
||||
self.assertEqual(self._run_test('testUnexpectedSuccess', 1), 'u')
|
||||
|
||||
@force_not_colorized
|
||||
def testLongOutput(self):
|
||||
classname = f'{__name__}.{self.Test.__qualname__}'
|
||||
self.assertEqual(self._run_test('testSuccess', 2),
|
||||
|
|
@ -642,17 +650,21 @@ def testLongOutput(self):
|
|||
self.assertEqual(self._run_test('testUnexpectedSuccess', 2),
|
||||
f'testUnexpectedSuccess ({classname}.testUnexpectedSuccess) ... unexpected success\n')
|
||||
|
||||
@force_not_colorized
|
||||
def testDotsOutputSubTestSuccess(self):
|
||||
self.assertEqual(self._run_test('testSubTestSuccess', 1), '.')
|
||||
|
||||
@force_not_colorized
|
||||
def testLongOutputSubTestSuccess(self):
|
||||
classname = f'{__name__}.{self.Test.__qualname__}'
|
||||
self.assertEqual(self._run_test('testSubTestSuccess', 2),
|
||||
f'testSubTestSuccess ({classname}.testSubTestSuccess) ... ok\n')
|
||||
|
||||
@force_not_colorized
|
||||
def testDotsOutputSubTestMixed(self):
|
||||
self.assertEqual(self._run_test('testSubTestMixed', 1), 'sFE')
|
||||
|
||||
@force_not_colorized
|
||||
def testLongOutputSubTestMixed(self):
|
||||
classname = f'{__name__}.{self.Test.__qualname__}'
|
||||
self.assertEqual(self._run_test('testSubTestMixed', 2),
|
||||
|
|
@ -661,6 +673,7 @@ def testLongOutputSubTestMixed(self):
|
|||
f' testSubTestMixed ({classname}.testSubTestMixed) [fail] (c=3) ... FAIL\n'
|
||||
f' testSubTestMixed ({classname}.testSubTestMixed) [error] (d=4) ... ERROR\n')
|
||||
|
||||
@force_not_colorized
|
||||
def testDotsOutputTearDownFail(self):
|
||||
out = self._run_test('testSuccess', 1, AssertionError('fail'))
|
||||
self.assertEqual(out, 'F')
|
||||
|
|
@ -671,6 +684,7 @@ def testDotsOutputTearDownFail(self):
|
|||
out = self._run_test('testSkip', 1, AssertionError('fail'))
|
||||
self.assertEqual(out, 'sF')
|
||||
|
||||
@force_not_colorized
|
||||
def testLongOutputTearDownFail(self):
|
||||
classname = f'{__name__}.{self.Test.__qualname__}'
|
||||
out = self._run_test('testSuccess', 2, AssertionError('fail'))
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
import pickle
|
||||
import subprocess
|
||||
from test import support
|
||||
from test.support import force_not_colorized
|
||||
|
||||
import unittest
|
||||
from unittest.case import _Outcome
|
||||
|
|
@ -106,6 +107,7 @@ def cleanup2(*args, **kwargs):
|
|||
self.assertTrue(test.doCleanups())
|
||||
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
|
||||
|
||||
@force_not_colorized
|
||||
def testCleanUpWithErrors(self):
|
||||
class TestableTest(unittest.TestCase):
|
||||
def testNothing(self):
|
||||
|
|
@ -416,6 +418,7 @@ def cleanup2():
|
|||
self.assertIsInstance(e2[1], CustomError)
|
||||
self.assertEqual(str(e2[1]), 'cleanup1')
|
||||
|
||||
@force_not_colorized
|
||||
def test_with_errors_addCleanUp(self):
|
||||
ordering = []
|
||||
class TestableTest(unittest.TestCase):
|
||||
|
|
@ -439,6 +442,7 @@ def tearDownClass(cls):
|
|||
['setUpClass', 'setUp', 'cleanup_exc',
|
||||
'tearDownClass', 'cleanup_good'])
|
||||
|
||||
@force_not_colorized
|
||||
def test_run_with_errors_addClassCleanUp(self):
|
||||
ordering = []
|
||||
class TestableTest(unittest.TestCase):
|
||||
|
|
@ -462,6 +466,7 @@ def tearDownClass(cls):
|
|||
['setUpClass', 'setUp', 'test', 'cleanup_good',
|
||||
'tearDownClass', 'cleanup_exc'])
|
||||
|
||||
@force_not_colorized
|
||||
def test_with_errors_in_addClassCleanup_and_setUps(self):
|
||||
ordering = []
|
||||
class_blow_up = False
|
||||
|
|
@ -514,6 +519,7 @@ def tearDownClass(cls):
|
|||
['setUpClass', 'setUp', 'tearDownClass',
|
||||
'cleanup_exc'])
|
||||
|
||||
@force_not_colorized
|
||||
def test_with_errors_in_tearDownClass(self):
|
||||
ordering = []
|
||||
class TestableTest(unittest.TestCase):
|
||||
|
|
@ -590,6 +596,7 @@ def test(self):
|
|||
'inner setup', 'inner test', 'inner cleanup',
|
||||
'end outer test', 'outer cleanup'])
|
||||
|
||||
@force_not_colorized
|
||||
def test_run_empty_suite_error_message(self):
|
||||
class EmptyTest(unittest.TestCase):
|
||||
pass
|
||||
|
|
@ -663,6 +670,7 @@ class Module(object):
|
|||
self.assertEqual(cleanups,
|
||||
[((1, 2), {'function': 'hello'})])
|
||||
|
||||
@force_not_colorized
|
||||
def test_run_module_cleanUp(self):
|
||||
blowUp = True
|
||||
ordering = []
|
||||
|
|
@ -802,6 +810,7 @@ def tearDownClass(cls):
|
|||
'tearDownClass', 'cleanup_good'])
|
||||
self.assertEqual(unittest.case._module_cleanups, [])
|
||||
|
||||
@force_not_colorized
|
||||
def test_run_module_cleanUp_when_teardown_exception(self):
|
||||
ordering = []
|
||||
class Module(object):
|
||||
|
|
@ -963,6 +972,7 @@ def testNothing(self):
|
|||
self.assertEqual(cleanups,
|
||||
[((1, 2), {'function': 3, 'self': 4})])
|
||||
|
||||
@force_not_colorized
|
||||
def test_with_errors_in_addClassCleanup(self):
|
||||
ordering = []
|
||||
|
||||
|
|
@ -996,6 +1006,7 @@ def tearDownClass(cls):
|
|||
['setUpModule', 'setUpClass', 'test', 'tearDownClass',
|
||||
'cleanup_exc', 'tearDownModule', 'cleanup_good'])
|
||||
|
||||
@force_not_colorized
|
||||
def test_with_errors_in_addCleanup(self):
|
||||
ordering = []
|
||||
class Module(object):
|
||||
|
|
@ -1026,6 +1037,7 @@ def tearDown(self):
|
|||
['setUpModule', 'setUp', 'test', 'tearDown',
|
||||
'cleanup_exc', 'tearDownModule', 'cleanup_good'])
|
||||
|
||||
@force_not_colorized
|
||||
def test_with_errors_in_addModuleCleanup_and_setUps(self):
|
||||
ordering = []
|
||||
module_blow_up = False
|
||||
|
|
@ -1318,6 +1330,7 @@ def MockResultClass(*args):
|
|||
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
|
||||
self.assertEqual(runner._makeResult(), expectedresult)
|
||||
|
||||
@force_not_colorized
|
||||
@support.requires_subprocess()
|
||||
def test_warnings(self):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import unittest
|
||||
|
||||
from test.support import force_not_colorized
|
||||
from test.test_unittest.support import LoggingResult
|
||||
|
||||
|
||||
|
|
@ -293,6 +294,7 @@ def test_die(self):
|
|||
self.assertFalse(result.unexpectedSuccesses)
|
||||
self.assertTrue(result.wasSuccessful())
|
||||
|
||||
@force_not_colorized
|
||||
def test_expected_failure_and_fail_in_cleanup(self):
|
||||
class Foo(unittest.TestCase):
|
||||
@unittest.expectedFailure
|
||||
|
|
@ -372,6 +374,7 @@ def test_die(self):
|
|||
self.assertEqual(result.unexpectedSuccesses, [test])
|
||||
self.assertFalse(result.wasSuccessful())
|
||||
|
||||
@force_not_colorized
|
||||
def test_unexpected_success_and_fail_in_cleanup(self):
|
||||
class Foo(unittest.TestCase):
|
||||
@unittest.expectedFailure
|
||||
|
|
|
|||
|
|
@ -189,7 +189,9 @@ def _exc_info_to_string(self, err, test):
|
|||
tb_e = traceback.TracebackException(
|
||||
exctype, value, tb,
|
||||
capture_locals=self.tb_locals, compact=True)
|
||||
msgLines = list(tb_e.format())
|
||||
from _colorize import can_colorize
|
||||
|
||||
msgLines = list(tb_e.format(colorize=can_colorize()))
|
||||
|
||||
if self.buffer:
|
||||
output = sys.stdout.getvalue()
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
import time
|
||||
import warnings
|
||||
|
||||
from _colorize import get_colors
|
||||
|
||||
from . import result
|
||||
from .case import _SubTest
|
||||
from .signals import registerResult
|
||||
|
|
@ -13,18 +15,18 @@
|
|||
|
||||
class _WritelnDecorator(object):
|
||||
"""Used to decorate file-like objects with a handy 'writeln' method"""
|
||||
def __init__(self,stream):
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr in ('stream', '__getstate__'):
|
||||
raise AttributeError(attr)
|
||||
return getattr(self.stream,attr)
|
||||
return getattr(self.stream, attr)
|
||||
|
||||
def writeln(self, arg=None):
|
||||
if arg:
|
||||
self.write(arg)
|
||||
self.write('\n') # text-mode streams translate to \r\n if needed
|
||||
self.write('\n') # text-mode streams translate to \r\n if needed
|
||||
|
||||
|
||||
class TextTestResult(result.TestResult):
|
||||
|
|
@ -43,6 +45,7 @@ def __init__(self, stream, descriptions, verbosity, *, durations=None):
|
|||
self.showAll = verbosity > 1
|
||||
self.dots = verbosity == 1
|
||||
self.descriptions = descriptions
|
||||
self._ansi = get_colors()
|
||||
self._newline = True
|
||||
self.durations = durations
|
||||
|
||||
|
|
@ -76,86 +79,102 @@ def _write_status(self, test, status):
|
|||
|
||||
def addSubTest(self, test, subtest, err):
|
||||
if err is not None:
|
||||
red, reset = self._ansi.RED, self._ansi.RESET
|
||||
if self.showAll:
|
||||
if issubclass(err[0], subtest.failureException):
|
||||
self._write_status(subtest, "FAIL")
|
||||
self._write_status(subtest, f"{red}FAIL{reset}")
|
||||
else:
|
||||
self._write_status(subtest, "ERROR")
|
||||
self._write_status(subtest, f"{red}ERROR{reset}")
|
||||
elif self.dots:
|
||||
if issubclass(err[0], subtest.failureException):
|
||||
self.stream.write('F')
|
||||
self.stream.write(f"{red}F{reset}")
|
||||
else:
|
||||
self.stream.write('E')
|
||||
self.stream.write(f"{red}E{reset}")
|
||||
self.stream.flush()
|
||||
super(TextTestResult, self).addSubTest(test, subtest, err)
|
||||
|
||||
def addSuccess(self, test):
|
||||
super(TextTestResult, self).addSuccess(test)
|
||||
green, reset = self._ansi.GREEN, self._ansi.RESET
|
||||
if self.showAll:
|
||||
self._write_status(test, "ok")
|
||||
self._write_status(test, f"{green}ok{reset}")
|
||||
elif self.dots:
|
||||
self.stream.write('.')
|
||||
self.stream.write(f"{green}.{reset}")
|
||||
self.stream.flush()
|
||||
|
||||
def addError(self, test, err):
|
||||
super(TextTestResult, self).addError(test, err)
|
||||
red, reset = self._ansi.RED, self._ansi.RESET
|
||||
if self.showAll:
|
||||
self._write_status(test, "ERROR")
|
||||
self._write_status(test, f"{red}ERROR{reset}")
|
||||
elif self.dots:
|
||||
self.stream.write('E')
|
||||
self.stream.write(f"{red}E{reset}")
|
||||
self.stream.flush()
|
||||
|
||||
def addFailure(self, test, err):
|
||||
super(TextTestResult, self).addFailure(test, err)
|
||||
red, reset = self._ansi.RED, self._ansi.RESET
|
||||
if self.showAll:
|
||||
self._write_status(test, "FAIL")
|
||||
self._write_status(test, f"{red}FAIL{reset}")
|
||||
elif self.dots:
|
||||
self.stream.write('F')
|
||||
self.stream.write(f"{red}F{reset}")
|
||||
self.stream.flush()
|
||||
|
||||
def addSkip(self, test, reason):
|
||||
super(TextTestResult, self).addSkip(test, reason)
|
||||
yellow, reset = self._ansi.YELLOW, self._ansi.RESET
|
||||
if self.showAll:
|
||||
self._write_status(test, "skipped {0!r}".format(reason))
|
||||
self._write_status(test, f"{yellow}skipped{reset} {reason!r}")
|
||||
elif self.dots:
|
||||
self.stream.write("s")
|
||||
self.stream.write(f"{yellow}s{reset}")
|
||||
self.stream.flush()
|
||||
|
||||
def addExpectedFailure(self, test, err):
|
||||
super(TextTestResult, self).addExpectedFailure(test, err)
|
||||
yellow, reset = self._ansi.YELLOW, self._ansi.RESET
|
||||
if self.showAll:
|
||||
self.stream.writeln("expected failure")
|
||||
self.stream.writeln(f"{yellow}expected failure{reset}")
|
||||
self.stream.flush()
|
||||
elif self.dots:
|
||||
self.stream.write("x")
|
||||
self.stream.write(f"{yellow}x{reset}")
|
||||
self.stream.flush()
|
||||
|
||||
def addUnexpectedSuccess(self, test):
|
||||
super(TextTestResult, self).addUnexpectedSuccess(test)
|
||||
red, reset = self._ansi.RED, self._ansi.RESET
|
||||
if self.showAll:
|
||||
self.stream.writeln("unexpected success")
|
||||
self.stream.writeln(f"{red}unexpected success{reset}")
|
||||
self.stream.flush()
|
||||
elif self.dots:
|
||||
self.stream.write("u")
|
||||
self.stream.write(f"{red}u{reset}")
|
||||
self.stream.flush()
|
||||
|
||||
def printErrors(self):
|
||||
bold_red = self._ansi.BOLD_RED
|
||||
red = self._ansi.RED
|
||||
reset = self._ansi.RESET
|
||||
if self.dots or self.showAll:
|
||||
self.stream.writeln()
|
||||
self.stream.flush()
|
||||
self.printErrorList('ERROR', self.errors)
|
||||
self.printErrorList('FAIL', self.failures)
|
||||
unexpectedSuccesses = getattr(self, 'unexpectedSuccesses', ())
|
||||
self.printErrorList(f"{red}ERROR{reset}", self.errors)
|
||||
self.printErrorList(f"{red}FAIL{reset}", self.failures)
|
||||
unexpectedSuccesses = getattr(self, "unexpectedSuccesses", ())
|
||||
if unexpectedSuccesses:
|
||||
self.stream.writeln(self.separator1)
|
||||
for test in unexpectedSuccesses:
|
||||
self.stream.writeln(f"UNEXPECTED SUCCESS: {self.getDescription(test)}")
|
||||
self.stream.writeln(
|
||||
f"{red}UNEXPECTED SUCCESS{bold_red}: "
|
||||
f"{self.getDescription(test)}{reset}"
|
||||
)
|
||||
self.stream.flush()
|
||||
|
||||
def printErrorList(self, flavour, errors):
|
||||
bold_red, reset = self._ansi.BOLD_RED, self._ansi.RESET
|
||||
for test, err in errors:
|
||||
self.stream.writeln(self.separator1)
|
||||
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
|
||||
self.stream.writeln(
|
||||
f"{flavour}{bold_red}: {self.getDescription(test)}{reset}"
|
||||
)
|
||||
self.stream.writeln(self.separator2)
|
||||
self.stream.writeln("%s" % err)
|
||||
self.stream.flush()
|
||||
|
|
@ -232,7 +251,7 @@ def run(self, test):
|
|||
if self.warnings:
|
||||
# if self.warnings is set, use it to filter all the warnings
|
||||
warnings.simplefilter(self.warnings)
|
||||
startTime = time.perf_counter()
|
||||
start_time = time.perf_counter()
|
||||
startTestRun = getattr(result, 'startTestRun', None)
|
||||
if startTestRun is not None:
|
||||
startTestRun()
|
||||
|
|
@ -242,8 +261,8 @@ def run(self, test):
|
|||
stopTestRun = getattr(result, 'stopTestRun', None)
|
||||
if stopTestRun is not None:
|
||||
stopTestRun()
|
||||
stopTime = time.perf_counter()
|
||||
timeTaken = stopTime - startTime
|
||||
stop_time = time.perf_counter()
|
||||
time_taken = stop_time - start_time
|
||||
result.printErrors()
|
||||
if self.durations is not None:
|
||||
self._printDurations(result)
|
||||
|
|
@ -253,10 +272,10 @@ def run(self, test):
|
|||
|
||||
run = result.testsRun
|
||||
self.stream.writeln("Ran %d test%s in %.3fs" %
|
||||
(run, run != 1 and "s" or "", timeTaken))
|
||||
(run, run != 1 and "s" or "", time_taken))
|
||||
self.stream.writeln()
|
||||
|
||||
expectedFails = unexpectedSuccesses = skipped = 0
|
||||
expected_fails = unexpected_successes = skipped = 0
|
||||
try:
|
||||
results = map(len, (result.expectedFailures,
|
||||
result.unexpectedSuccesses,
|
||||
|
|
@ -264,26 +283,35 @@ def run(self, test):
|
|||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
expectedFails, unexpectedSuccesses, skipped = results
|
||||
expected_fails, unexpected_successes, skipped = results
|
||||
|
||||
infos = []
|
||||
ansi = get_colors()
|
||||
bold_red = ansi.BOLD_RED
|
||||
green = ansi.GREEN
|
||||
red = ansi.RED
|
||||
reset = ansi.RESET
|
||||
yellow = ansi.YELLOW
|
||||
|
||||
if not result.wasSuccessful():
|
||||
self.stream.write("FAILED")
|
||||
self.stream.write(f"{bold_red}FAILED{reset}")
|
||||
failed, errored = len(result.failures), len(result.errors)
|
||||
if failed:
|
||||
infos.append("failures=%d" % failed)
|
||||
infos.append(f"{bold_red}failures={failed}{reset}")
|
||||
if errored:
|
||||
infos.append("errors=%d" % errored)
|
||||
infos.append(f"{bold_red}errors={errored}{reset}")
|
||||
elif run == 0 and not skipped:
|
||||
self.stream.write("NO TESTS RAN")
|
||||
self.stream.write(f"{yellow}NO TESTS RAN{reset}")
|
||||
else:
|
||||
self.stream.write("OK")
|
||||
self.stream.write(f"{green}OK{reset}")
|
||||
if skipped:
|
||||
infos.append("skipped=%d" % skipped)
|
||||
if expectedFails:
|
||||
infos.append("expected failures=%d" % expectedFails)
|
||||
if unexpectedSuccesses:
|
||||
infos.append("unexpected successes=%d" % unexpectedSuccesses)
|
||||
infos.append(f"{yellow}skipped={skipped}{reset}")
|
||||
if expected_fails:
|
||||
infos.append(f"{yellow}expected failures={expected_fails}{reset}")
|
||||
if unexpected_successes:
|
||||
infos.append(
|
||||
f"{red}unexpected successes={unexpected_successes}{reset}"
|
||||
)
|
||||
if infos:
|
||||
self.stream.writeln(" (%s)" % (", ".join(infos),))
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
Add colour to :mod:`unittest` output. Patch by Hugo van Kemenade.
|
||||
Loading…
Add table
Add a link
Reference in a new issue