run-tests: added '--json' functionality to store test result in json file
This patch added a new functionality '--json'. While testing, if '--json'
is enabled then test result data gets stored in newly created "report.json"
file in the following format.
testreport ={
"test-success.t": {
"result": "success",
"time": "2.041"
}
"test-failure.t": {
"result": "failure",
"time": "4.430"
}
"test-skip.t": {
"result": "skip"
"time": "3.754"
}
}
Otherwise, if '--json' is enabled but json module was not installed then it
will raise an error "json module not installed".
This "report.json" file will further accessed by html/javascript file for
graph usage.
--- a/tests/run-tests.py Tue Sep 02 12:11:36 2014 +0200
+++ b/tests/run-tests.py Mon Sep 08 16:41:00 2014 +0200
@@ -60,6 +60,14 @@
from xml.dom import minidom
import unittest
+try:
+ if sys.version_info < (2, 7):
+ import simplejson as json
+ else:
+ import json
+except ImportError:
+ json = None
+
processlock = threading.Lock()
# subprocess._cleanup can race with any Popen.wait or Popen.poll on py24
@@ -186,6 +194,8 @@
" (default: $%s or %d)" % defaults['timeout'])
parser.add_option("--time", action="store_true",
help="time how long each test takes")
+ parser.add_option("--json", action="store_true",
+ help="store test result data in 'report.json' file")
parser.add_option("--tmpdir", type="string",
help="run tests in the given temporary directory"
" (implies --keep-tmpdir)")
@@ -1419,6 +1429,37 @@
finally:
xuf.close()
+ if self._runner.options.json:
+ if json is None:
+ raise ImportError("json module not installed")
+ jsonpath = os.path.join(self._runner._testdir, 'report.json')
+ fp = open(jsonpath, 'w')
+ try:
+ timesd = {}
+ for test, cuser, csys, real in result.times:
+ timesd[test] = real
+
+ outcome = {}
+ for tc in result.successes:
+ testresult = {'result': 'success',
+ 'time': ('%0.3f' % timesd[tc.name])}
+ outcome[tc.name] = testresult
+
+ for tc, err in sorted(result.faildata.iteritems()):
+ testresult = {'result': 'failure',
+ 'time': ('%0.3f' % timesd[tc])}
+ outcome[tc] = testresult
+
+ for tc, reason in result.skipped:
+ testresult = {'result': 'skip',
+ 'time': ('%0.3f' % timesd[tc.name])}
+ outcome[tc.name] = testresult
+
+ jsonout = json.dumps(outcome, sort_keys=True, indent=4)
+ fp.writelines(("testreport =", jsonout))
+ finally:
+ fp.close()
+
self._runner._checkhglib('Tested')
self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.'
--- a/tests/test-run-tests.t Tue Sep 02 12:11:36 2014 +0200
+++ b/tests/test-run-tests.t Mon Sep 08 16:41:00 2014 +0200
@@ -369,3 +369,40 @@
Skipped test-failure.t: blacklisted
# Ran 0 tests, 2 skipped, 0 warned, 0 failed.
+test for --json
+==================
+
+ $ $TESTDIR/run-tests.py --with-hg=`which hg` --json
+
+ --- $TESTTMP/test-failure.t
+ +++ $TESTTMP/test-failure.t.err
+ @@ -1,4 +1,4 @@
+ $ echo babar
+ - rataxes
+ + babar
+ This is a noop statement so that
+ this test is still more bytes than success.
+
+ ERROR: test-failure.t output changed
+ !.s
+ Skipped test-skip.t: skipped
+ Failed test-failure.t: output changed
+ # Ran 2 tests, 1 skipped, 0 warned, 1 failed.
+ python hash seed: * (glob)
+ [1]
+
+ $ cat report.json
+ testreport ={
+ "test-failure.t": [\{] (re)
+ "result": "failure",
+ "time": "\s*[\d\.]{5}" (re)
+ },
+ "test-skip.t": {
+ "result": "skip",
+ "time": "\s*[\d\.]{5}" (re)
+ },
+ "test-success.t": [\{] (re)
+ "result": "success",
+ "time": "\s*[\d\.]{5}" (re)
+ }
+ } (no-eol)