From 9200d2e4b94e5ffede9dc6d577a3bee3fc11b2b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Mus=C3=ADlek?= Date: Sat, 13 Jul 2024 15:04:11 +0200 Subject: [PATCH] gh-68437: Add TestCase.status attribute This attribute can be used in teardown/cleanup phase to check status of the test being run. --- Doc/library/unittest.rst | 19 +++++ Lib/test/test_unittest/test_case.py | 70 +++++++++++++++++++ Lib/unittest/__init__.py | 5 +- Lib/unittest/case.py | 22 ++++++ ...4-07-13-14-46-00.gh-issue-68437.wAeQB0.rst | 3 + 5 files changed, 117 insertions(+), 2 deletions(-) create mode 100644 Misc/NEWS.d/next/Library/2024-07-13-14-46-00.gh-issue-68437.wAeQB0.rst diff --git a/Doc/library/unittest.rst b/Doc/library/unittest.rst index eb42210e096..7bfc43ef8b5 100644 --- a/Doc/library/unittest.rst +++ b/Doc/library/unittest.rst @@ -1442,6 +1442,25 @@ Test cases .. versionadded:: 3.2 + .. attribute:: status + + This attribute contains resulting status of the single test run. + This can be used to check the test status in :meth:`tearDown` and + cleanup phase. Possible values are ``None`` or any of the values + stored in ``unittest.TestStatus`` enum: + + ======================= ================================================= + Enum name Details + ======================= ================================================= + ``SUCCESS`` Test succeeded. + ``UNEXPECTED_SUCCESS`` Test succeeded, but was expected to fail. + ``FAILED`` Test failed. + ``EXPECTED_FAILURE`` Test failed, as was expected. + ``SKIPPED`` Test was skipped. + ======================= ================================================= + + .. versionadded:: 3.14 + Testing frameworks can use the following methods to collect information on the test: diff --git a/Lib/test/test_unittest/test_case.py b/Lib/test/test_unittest/test_case.py index b4b2194a09c..d31ef2f78ed 100644 --- a/Lib/test/test_unittest/test_case.py +++ b/Lib/test/test_unittest/test_case.py @@ -2016,6 +2016,76 @@ def test2(self): gc_collect() # For PyPy or other GCs. self.assertEqual(MyException.ninstance, 0) + def test_status(self): + # Issue 68437 - unittest API for detecting test failure in cleanup/teardown + class Foo(unittest.TestCase): + def test_success(self): + pass + @unittest.expectedFailure + def test_unexpected_success(self): + pass + def test_failed(self): + self.assertTrue(False) + @unittest.expectedFailure + def test_expected_failure(self): + self.assertTrue(False) + @unittest.skip + def test_skipped(self): + self.assertTrue(False) + def test_skipped_exception(self): + raise unittest.SkipTest() + def test_subtest_success(self): + with self.subTest(): + pass + with self.subTest(): + pass + def test_subtest_failed1(self): + with self.subTest(): + self.assertTrue(True) + with self.subTest(): + self.assertTrue(False) + def test_subtest_failed2(self): + with self.subTest(): + self.assertTrue(False) + with self.subTest(): + self.assertTrue(True) + + test = Foo('test_success') + test.run() + self.assertEqual(test.status, unittest.TestStatus.SUCCESS) + + test = Foo('test_unexpected_success') + test.run() + self.assertEqual(test.status, unittest.TestStatus.UNEXPECTED_SUCCESS) + + test = Foo('test_failed') + test.run() + self.assertEqual(test.status, unittest.TestStatus.FAILED) + + test = Foo('test_expected_failure') + test.run() + self.assertEqual(test.status, unittest.TestStatus.EXPECTED_FAILURE) + + test = Foo('test_skipped') + test.run() + self.assertEqual(test.status, unittest.TestStatus.SKIPPED) + + test = Foo('test_skipped_exception') + test.run() + self.assertEqual(test.status, unittest.TestStatus.SKIPPED) + + test = Foo('test_subtest_success') + test.run() + self.assertEqual(test.status, unittest.TestStatus.SUCCESS) + + test = Foo('test_subtest_failed1') + test.run() + self.assertEqual(test.status, unittest.TestStatus.FAILED) + + test = Foo('test_subtest_failed2') + test.run() + self.assertEqual(test.status, unittest.TestStatus.FAILED) + if __name__ == "__main__": unittest.main() diff --git a/Lib/unittest/__init__.py b/Lib/unittest/__init__.py index 324e5d038ae..43cc5156a4f 100644 --- a/Lib/unittest/__init__.py +++ b/Lib/unittest/__init__.py @@ -49,14 +49,15 @@ def testMultiply(self): 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', 'expectedFailure', 'TextTestResult', 'installHandler', 'registerResult', 'removeResult', 'removeHandler', - 'addModuleCleanup', 'doModuleCleanups', 'enterModuleContext'] + 'addModuleCleanup', 'doModuleCleanups', 'enterModuleContext', + 'TestStatus'] __unittest = True from .result import TestResult from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, skipIf, skipUnless, expectedFailure, doModuleCleanups, - enterModuleContext) + enterModuleContext, TestStatus) from .suite import BaseTestSuite, TestSuite # noqa: F401 from .loader import TestLoader, defaultTestLoader from .main import TestProgram, main # noqa: F401 diff --git a/Lib/unittest/case.py b/Lib/unittest/case.py index 55c79d35353..9ddec1a8310 100644 --- a/Lib/unittest/case.py +++ b/Lib/unittest/case.py @@ -3,6 +3,7 @@ import sys import functools import difflib +import enum import pprint import re import warnings @@ -42,24 +43,35 @@ class _UnexpectedSuccess(Exception): """ +class TestStatus(enum.StrEnum): + SUCCESS = "success" + UNEXPECTED_SUCCESS = "unexpected_success" + FAILED = "failed" + EXPECTED_FAILURE = "expected_failure" + SKIPPED = "skipped" + + class _Outcome(object): def __init__(self, result=None): self.expecting_failure = False self.result = result self.result_supports_subtests = hasattr(result, "addSubTest") self.success = True + self.status = None self.expectedFailure = None @contextlib.contextmanager def testPartExecutor(self, test_case, subTest=False): old_success = self.success self.success = True + status = TestStatus.SUCCESS try: yield except KeyboardInterrupt: raise except SkipTest as e: self.success = False + status = TestStatus.SKIPPED _addSkip(self.result, test_case, str(e)) except _ShouldStop: pass @@ -67,8 +79,10 @@ def testPartExecutor(self, test_case, subTest=False): exc_info = sys.exc_info() if self.expecting_failure: self.expectedFailure = exc_info + status = TestStatus.EXPECTED_FAILURE else: self.success = False + status = TestStatus.FAILED if subTest: self.result.addSubTest(test_case.test_case, test_case, exc_info) else: @@ -77,10 +91,15 @@ def testPartExecutor(self, test_case, subTest=False): # exc_info -> frame -> exc_info exc_info = None else: + if self.expecting_failure: + status = TestStatus.UNEXPECTED_SUCCESS if subTest and self.success: self.result.addSubTest(test_case.test_case, test_case, None) finally: self.success = self.success and old_success + # If any of the previous subTests failed, keep the failed status + if self.status not in (TestStatus.FAILED, TestStatus.EXPECTED_FAILURE): + self.status = status def _addSkip(result, test_case, reason): @@ -428,6 +447,7 @@ def __init__(self, methodName='runTest'): self._testMethodDoc = testMethod.__doc__ self._cleanups = [] self._subtest = None + self.status = None # Map types to custom assertEqual functions that will compare # instances of said type in more detail to generate a more useful @@ -640,6 +660,7 @@ def run(self, result=None): # If the class or method was skipped. skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') or getattr(testMethod, '__unittest_skip_why__', '')) + self.status = TestStatus.SKIPPED _addSkip(result, self, skip_why) return result @@ -658,6 +679,7 @@ def run(self, result=None): outcome.expecting_failure = expecting_failure with outcome.testPartExecutor(self): self._callTestMethod(testMethod) + self.status = outcome.status outcome.expecting_failure = False with outcome.testPartExecutor(self): self._callTearDown() diff --git a/Misc/NEWS.d/next/Library/2024-07-13-14-46-00.gh-issue-68437.wAeQB0.rst b/Misc/NEWS.d/next/Library/2024-07-13-14-46-00.gh-issue-68437.wAeQB0.rst new file mode 100644 index 00000000000..c73343620ac --- /dev/null +++ b/Misc/NEWS.d/next/Library/2024-07-13-14-46-00.gh-issue-68437.wAeQB0.rst @@ -0,0 +1,3 @@ +Add new attribute ``status`` to :class:`unittest.TestCase` containing member +of (also new) ``unittest.TestStatus`` enum. This can be used to check +the test result in ``tearDown`` / ``cleanup`` phase.