run-tests: add support for external test result
The goal is to begin experiment with custom test result. I'm not sure we
should offers any backward-compatibility guarantee on that plugin API as it
doesn't change often and shouldn't have too much clients.
Differential Revision: https://phab.mercurial-scm.org/D3700
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/basic_test_result.py Sat Apr 28 12:51:44 2018 +0200
@@ -0,0 +1,46 @@
+from __future__ import print_function
+
+import unittest
+
+class TestResult(unittest._TextTestResult):
+
+ def __init__(self, options, *args, **kwargs):
+ super(TestResult, self).__init__(*args, **kwargs)
+ self._options = options
+
+ # unittest.TestResult didn't have skipped until 2.7. We need to
+ # polyfill it.
+ self.skipped = []
+
+ # We have a custom "ignored" result that isn't present in any Python
+ # unittest implementation. It is very similar to skipped. It may make
+ # sense to map it into skip some day.
+ self.ignored = []
+
+ self.times = []
+ self._firststarttime = None
+ # Data stored for the benefit of generating xunit reports.
+ self.successes = []
+ self.faildata = {}
+
+ def addFailure(self, test, reason):
+ print("FAILURE!", test, reason)
+
+ def addSuccess(self, test):
+ print("SUCCESS!", test)
+
+ def addError(self, test, err):
+ print("ERR!", test, err)
+
+ # Polyfill.
+ def addSkip(self, test, reason):
+ print("SKIP!", test, reason)
+
+ def addIgnore(self, test, reason):
+ print("IGNORE!", test, reason)
+
+ def addOutputMismatch(self, test, ret, got, expected):
+ return False
+
+ def stopTest(self, test, interrupted=False):
+ super(TestResult, self).stopTest(test)
--- a/tests/run-tests.py Thu Mar 15 17:37:03 2018 +0530
+++ b/tests/run-tests.py Sat Apr 28 12:51:44 2018 +0200
@@ -1851,6 +1851,16 @@
self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
test.name, self.times[-1][3]))
+def getTestResult():
+ """
+ Returns the relevant test result
+ """
+ if "CUSTOM_TEST_RESULT" in os.environ:
+ testresultmodule = __import__(os.environ["CUSTOM_TEST_RESULT"])
+ return testresultmodule.TestResult
+ else:
+ return TestResult
+
class TestSuite(unittest.TestSuite):
"""Custom unittest TestSuite that knows how to execute Mercurial tests."""
@@ -2090,8 +2100,8 @@
self._runner = runner
def listtests(self, test):
- result = TestResult(self._runner.options, self.stream,
- self.descriptions, 0)
+ result = getTestResult()(self._runner.options, self.stream,
+ self.descriptions, 0)
test = sorted(test, key=lambda t: t.name)
for t in test:
print(t.name)
@@ -2109,9 +2119,8 @@
return result
def run(self, test):
- result = TestResult(self._runner.options, self.stream,
- self.descriptions, self.verbosity)
-
+ result = getTestResult()(self._runner.options, self.stream,
+ self.descriptions, self.verbosity)
test(result)
failed = len(result.failures)
--- a/tests/test-run-tests.t Thu Mar 15 17:37:03 2018 +0530
+++ b/tests/test-run-tests.t Sat Apr 28 12:51:44 2018 +0200
@@ -1246,6 +1246,15 @@
$ echo dead:beef::1
$LOCALIP (glob)
+Add support for external test formatter
+=======================================
+
+ $ CUSTOM_TEST_RESULT=basic_test_result $PYTHON $TESTDIR/run-tests.py --with-hg=`which hg` "$@" test-success.t test-failure.t
+
+ # Ran 2 tests, 0 skipped, 0 failed.
+ FAILURE! test-failure.t output changed
+ SUCCESS! test-success.t
+
Test reusability for third party tools
======================================