aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/tools/python3/src/Lib/unittest/runner.py
diff options
context:
space:
mode:
authororivej <orivej@yandex-team.ru>2022-02-10 16:44:49 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:49 +0300
commit718c552901d703c502ccbefdfc3c9028d608b947 (patch)
tree46534a98bbefcd7b1f3faa5b52c138ab27db75b7 /contrib/tools/python3/src/Lib/unittest/runner.py
parente9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (diff)
downloadydb-718c552901d703c502ccbefdfc3c9028d608b947.tar.gz
Restoring authorship annotation for <orivej@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/tools/python3/src/Lib/unittest/runner.py')
-rw-r--r--contrib/tools/python3/src/Lib/unittest/runner.py442
1 files changed, 221 insertions, 221 deletions
diff --git a/contrib/tools/python3/src/Lib/unittest/runner.py b/contrib/tools/python3/src/Lib/unittest/runner.py
index caf159002d..b0bab333da 100644
--- a/contrib/tools/python3/src/Lib/unittest/runner.py
+++ b/contrib/tools/python3/src/Lib/unittest/runner.py
@@ -1,230 +1,230 @@
-"""Running tests"""
-
-import sys
-import time
-import warnings
-
-from . import result
-from .signals import registerResult
-
-__unittest = True
-
-
-class _WritelnDecorator(object):
- """Used to decorate file-like objects with a handy 'writeln' method"""
- def __init__(self,stream):
- self.stream = stream
-
- def __getattr__(self, attr):
- if attr in ('stream', '__getstate__'):
- raise AttributeError(attr)
- return getattr(self.stream,attr)
-
- def writeln(self, arg=None):
- if arg:
- self.write(arg)
- self.write('\n') # text-mode streams translate to \r\n if needed
-
-
-class TextTestResult(result.TestResult):
- """A test result class that can print formatted text results to a stream.
-
- Used by TextTestRunner.
- """
- separator1 = '=' * 70
- separator2 = '-' * 70
-
- def __init__(self, stream, descriptions, verbosity):
- super(TextTestResult, self).__init__(stream, descriptions, verbosity)
- self.stream = stream
- self.showAll = verbosity > 1
- self.dots = verbosity == 1
- self.descriptions = descriptions
-
- def getDescription(self, test):
- doc_first_line = test.shortDescription()
- if self.descriptions and doc_first_line:
- return '\n'.join((str(test), doc_first_line))
- else:
- return str(test)
-
- def startTest(self, test):
- super(TextTestResult, self).startTest(test)
- if self.showAll:
- self.stream.write(self.getDescription(test))
- self.stream.write(" ... ")
+"""Running tests"""
+
+import sys
+import time
+import warnings
+
+from . import result
+from .signals import registerResult
+
+__unittest = True
+
+
+class _WritelnDecorator(object):
+ """Used to decorate file-like objects with a handy 'writeln' method"""
+ def __init__(self,stream):
+ self.stream = stream
+
+ def __getattr__(self, attr):
+ if attr in ('stream', '__getstate__'):
+ raise AttributeError(attr)
+ return getattr(self.stream,attr)
+
+ def writeln(self, arg=None):
+ if arg:
+ self.write(arg)
+ self.write('\n') # text-mode streams translate to \r\n if needed
+
+
+class TextTestResult(result.TestResult):
+ """A test result class that can print formatted text results to a stream.
+
+ Used by TextTestRunner.
+ """
+ separator1 = '=' * 70
+ separator2 = '-' * 70
+
+ def __init__(self, stream, descriptions, verbosity):
+ super(TextTestResult, self).__init__(stream, descriptions, verbosity)
+ self.stream = stream
+ self.showAll = verbosity > 1
+ self.dots = verbosity == 1
+ self.descriptions = descriptions
+
+ def getDescription(self, test):
+ doc_first_line = test.shortDescription()
+ if self.descriptions and doc_first_line:
+ return '\n'.join((str(test), doc_first_line))
+ else:
+ return str(test)
+
+ def startTest(self, test):
+ super(TextTestResult, self).startTest(test)
+ if self.showAll:
+ self.stream.write(self.getDescription(test))
+ self.stream.write(" ... ")
+ self.stream.flush()
+
+ def addSuccess(self, test):
+ super(TextTestResult, self).addSuccess(test)
+ if self.showAll:
+ self.stream.writeln("ok")
self.stream.flush()
-
- def addSuccess(self, test):
- super(TextTestResult, self).addSuccess(test)
- if self.showAll:
- self.stream.writeln("ok")
+ elif self.dots:
+ self.stream.write('.')
+ self.stream.flush()
+
+ def addError(self, test, err):
+ super(TextTestResult, self).addError(test, err)
+ if self.showAll:
+ self.stream.writeln("ERROR")
self.stream.flush()
- elif self.dots:
- self.stream.write('.')
+ elif self.dots:
+ self.stream.write('E')
+ self.stream.flush()
+
+ def addFailure(self, test, err):
+ super(TextTestResult, self).addFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("FAIL")
self.stream.flush()
-
- def addError(self, test, err):
- super(TextTestResult, self).addError(test, err)
- if self.showAll:
- self.stream.writeln("ERROR")
+ elif self.dots:
+ self.stream.write('F')
+ self.stream.flush()
+
+ def addSkip(self, test, reason):
+ super(TextTestResult, self).addSkip(test, reason)
+ if self.showAll:
+ self.stream.writeln("skipped {0!r}".format(reason))
self.stream.flush()
- elif self.dots:
- self.stream.write('E')
+ elif self.dots:
+ self.stream.write("s")
+ self.stream.flush()
+
+ def addExpectedFailure(self, test, err):
+ super(TextTestResult, self).addExpectedFailure(test, err)
+ if self.showAll:
+ self.stream.writeln("expected failure")
self.stream.flush()
-
- def addFailure(self, test, err):
- super(TextTestResult, self).addFailure(test, err)
- if self.showAll:
- self.stream.writeln("FAIL")
+ elif self.dots:
+ self.stream.write("x")
+ self.stream.flush()
+
+ def addUnexpectedSuccess(self, test):
+ super(TextTestResult, self).addUnexpectedSuccess(test)
+ if self.showAll:
+ self.stream.writeln("unexpected success")
self.stream.flush()
- elif self.dots:
- self.stream.write('F')
+ elif self.dots:
+ self.stream.write("u")
+ self.stream.flush()
+
+ def printErrors(self):
+ if self.dots or self.showAll:
+ self.stream.writeln()
self.stream.flush()
-
- def addSkip(self, test, reason):
- super(TextTestResult, self).addSkip(test, reason)
- if self.showAll:
- self.stream.writeln("skipped {0!r}".format(reason))
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+
+ def printErrorList(self, flavour, errors):
+ for test, err in errors:
+ self.stream.writeln(self.separator1)
+ self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
+ self.stream.writeln(self.separator2)
+ self.stream.writeln("%s" % err)
self.stream.flush()
- elif self.dots:
- self.stream.write("s")
- self.stream.flush()
-
- def addExpectedFailure(self, test, err):
- super(TextTestResult, self).addExpectedFailure(test, err)
- if self.showAll:
- self.stream.writeln("expected failure")
- self.stream.flush()
- elif self.dots:
- self.stream.write("x")
- self.stream.flush()
-
- def addUnexpectedSuccess(self, test):
- super(TextTestResult, self).addUnexpectedSuccess(test)
- if self.showAll:
- self.stream.writeln("unexpected success")
- self.stream.flush()
- elif self.dots:
- self.stream.write("u")
- self.stream.flush()
-
- def printErrors(self):
- if self.dots or self.showAll:
- self.stream.writeln()
- self.stream.flush()
- self.printErrorList('ERROR', self.errors)
- self.printErrorList('FAIL', self.failures)
-
- def printErrorList(self, flavour, errors):
- for test, err in errors:
- self.stream.writeln(self.separator1)
- self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
- self.stream.writeln(self.separator2)
- self.stream.writeln("%s" % err)
- self.stream.flush()
-
-
-class TextTestRunner(object):
- """A test runner class that displays results in textual form.
-
- It prints out the names of tests as they are run, errors as they
- occur, and a summary of the results at the end of the test run.
- """
- resultclass = TextTestResult
-
- def __init__(self, stream=None, descriptions=True, verbosity=1,
- failfast=False, buffer=False, resultclass=None, warnings=None,
- *, tb_locals=False):
- """Construct a TextTestRunner.
-
- Subclasses should accept **kwargs to ensure compatibility as the
- interface changes.
- """
- if stream is None:
- stream = sys.stderr
- self.stream = _WritelnDecorator(stream)
- self.descriptions = descriptions
- self.verbosity = verbosity
- self.failfast = failfast
- self.buffer = buffer
- self.tb_locals = tb_locals
- self.warnings = warnings
- if resultclass is not None:
- self.resultclass = resultclass
-
- def _makeResult(self):
- return self.resultclass(self.stream, self.descriptions, self.verbosity)
-
- def run(self, test):
- "Run the given test case or test suite."
- result = self._makeResult()
- registerResult(result)
- result.failfast = self.failfast
- result.buffer = self.buffer
- result.tb_locals = self.tb_locals
- with warnings.catch_warnings():
- if self.warnings:
- # if self.warnings is set, use it to filter all the warnings
- warnings.simplefilter(self.warnings)
- # if the filter is 'default' or 'always', special-case the
- # warnings from the deprecated unittest methods to show them
- # no more than once per module, because they can be fairly
- # noisy. The -Wd and -Wa flags can be used to bypass this
- # only when self.warnings is None.
- if self.warnings in ['default', 'always']:
- warnings.filterwarnings('module',
- category=DeprecationWarning,
- message=r'Please use assert\w+ instead.')
- startTime = time.perf_counter()
- startTestRun = getattr(result, 'startTestRun', None)
- if startTestRun is not None:
- startTestRun()
- try:
- test(result)
- finally:
- stopTestRun = getattr(result, 'stopTestRun', None)
- if stopTestRun is not None:
- stopTestRun()
- stopTime = time.perf_counter()
- timeTaken = stopTime - startTime
- result.printErrors()
- if hasattr(result, 'separator2'):
- self.stream.writeln(result.separator2)
- run = result.testsRun
- self.stream.writeln("Ran %d test%s in %.3fs" %
- (run, run != 1 and "s" or "", timeTaken))
- self.stream.writeln()
-
- expectedFails = unexpectedSuccesses = skipped = 0
- try:
- results = map(len, (result.expectedFailures,
- result.unexpectedSuccesses,
- result.skipped))
- except AttributeError:
- pass
- else:
- expectedFails, unexpectedSuccesses, skipped = results
-
- infos = []
- if not result.wasSuccessful():
- self.stream.write("FAILED")
- failed, errored = len(result.failures), len(result.errors)
- if failed:
- infos.append("failures=%d" % failed)
- if errored:
- infos.append("errors=%d" % errored)
- else:
- self.stream.write("OK")
- if skipped:
- infos.append("skipped=%d" % skipped)
- if expectedFails:
- infos.append("expected failures=%d" % expectedFails)
- if unexpectedSuccesses:
- infos.append("unexpected successes=%d" % unexpectedSuccesses)
- if infos:
- self.stream.writeln(" (%s)" % (", ".join(infos),))
- else:
- self.stream.write("\n")
+
+
+class TextTestRunner(object):
+ """A test runner class that displays results in textual form.
+
+ It prints out the names of tests as they are run, errors as they
+ occur, and a summary of the results at the end of the test run.
+ """
+ resultclass = TextTestResult
+
+ def __init__(self, stream=None, descriptions=True, verbosity=1,
+ failfast=False, buffer=False, resultclass=None, warnings=None,
+ *, tb_locals=False):
+ """Construct a TextTestRunner.
+
+ Subclasses should accept **kwargs to ensure compatibility as the
+ interface changes.
+ """
+ if stream is None:
+ stream = sys.stderr
+ self.stream = _WritelnDecorator(stream)
+ self.descriptions = descriptions
+ self.verbosity = verbosity
+ self.failfast = failfast
+ self.buffer = buffer
+ self.tb_locals = tb_locals
+ self.warnings = warnings
+ if resultclass is not None:
+ self.resultclass = resultclass
+
+ def _makeResult(self):
+ return self.resultclass(self.stream, self.descriptions, self.verbosity)
+
+ def run(self, test):
+ "Run the given test case or test suite."
+ result = self._makeResult()
+ registerResult(result)
+ result.failfast = self.failfast
+ result.buffer = self.buffer
+ result.tb_locals = self.tb_locals
+ with warnings.catch_warnings():
+ if self.warnings:
+ # if self.warnings is set, use it to filter all the warnings
+ warnings.simplefilter(self.warnings)
+ # if the filter is 'default' or 'always', special-case the
+ # warnings from the deprecated unittest methods to show them
+ # no more than once per module, because they can be fairly
+ # noisy. The -Wd and -Wa flags can be used to bypass this
+ # only when self.warnings is None.
+ if self.warnings in ['default', 'always']:
+ warnings.filterwarnings('module',
+ category=DeprecationWarning,
+ message=r'Please use assert\w+ instead.')
+ startTime = time.perf_counter()
+ startTestRun = getattr(result, 'startTestRun', None)
+ if startTestRun is not None:
+ startTestRun()
+ try:
+ test(result)
+ finally:
+ stopTestRun = getattr(result, 'stopTestRun', None)
+ if stopTestRun is not None:
+ stopTestRun()
+ stopTime = time.perf_counter()
+ timeTaken = stopTime - startTime
+ result.printErrors()
+ if hasattr(result, 'separator2'):
+ self.stream.writeln(result.separator2)
+ run = result.testsRun
+ self.stream.writeln("Ran %d test%s in %.3fs" %
+ (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln()
+
+ expectedFails = unexpectedSuccesses = skipped = 0
+ try:
+ results = map(len, (result.expectedFailures,
+ result.unexpectedSuccesses,
+ result.skipped))
+ except AttributeError:
+ pass
+ else:
+ expectedFails, unexpectedSuccesses, skipped = results
+
+ infos = []
+ if not result.wasSuccessful():
+ self.stream.write("FAILED")
+ failed, errored = len(result.failures), len(result.errors)
+ if failed:
+ infos.append("failures=%d" % failed)
+ if errored:
+ infos.append("errors=%d" % errored)
+ else:
+ self.stream.write("OK")
+ if skipped:
+ infos.append("skipped=%d" % skipped)
+ if expectedFails:
+ infos.append("expected failures=%d" % expectedFails)
+ if unexpectedSuccesses:
+ infos.append("unexpected successes=%d" % unexpectedSuccesses)
+ if infos:
+ self.stream.writeln(" (%s)" % (", ".join(infos),))
+ else:
+ self.stream.write("\n")
self.stream.flush()
- return result
+ return result