Mercurial > hg
comparison tests/run-tests.py @ 22391:c42e69268f5b
run-tests: added '--json' functionality to store test result in json file
This patch added a new functionality '--json'. While testing, if '--json'
is enabled then test result data gets stored in newly created "report.json"
file in the following format.
testreport ={
"test-success.t": {
"result": "success",
"time": "2.041"
}
"test-failure.t": {
"result": "failure",
"time": "4.430"
}
"test-skip.t": {
"result": "skip"
"time": "3.754"
}
}
Otherwise, if '--json' is enabled but json module was not installed then it
will raise an error "json module not installed".
This "report.json" file will further accessed by html/javascript file for
graph usage.
author | anuraggoel <anurag.dsps@gmail.com> |
---|---|
date | Mon, 08 Sep 2014 16:41:00 +0200 |
parents | eb6adf750954 |
children | f166e08ece3b |
comparison
equal
deleted
inserted
replaced
22390:e2806b8613ca | 22391:c42e69268f5b |
---|---|
57 import threading | 57 import threading |
58 import killdaemons as killmod | 58 import killdaemons as killmod |
59 import Queue as queue | 59 import Queue as queue |
60 from xml.dom import minidom | 60 from xml.dom import minidom |
61 import unittest | 61 import unittest |
62 | |
63 try: | |
64 if sys.version_info < (2, 7): | |
65 import simplejson as json | |
66 else: | |
67 import json | |
68 except ImportError: | |
69 json = None | |
62 | 70 |
63 processlock = threading.Lock() | 71 processlock = threading.Lock() |
64 | 72 |
65 # subprocess._cleanup can race with any Popen.wait or Popen.poll on py24 | 73 # subprocess._cleanup can race with any Popen.wait or Popen.poll on py24 |
66 # http://bugs.python.org/issue1731717 for details. We shouldn't be producing | 74 # http://bugs.python.org/issue1731717 for details. We shouldn't be producing |
184 parser.add_option("-t", "--timeout", type="int", | 192 parser.add_option("-t", "--timeout", type="int", |
185 help="kill errant tests after TIMEOUT seconds" | 193 help="kill errant tests after TIMEOUT seconds" |
186 " (default: $%s or %d)" % defaults['timeout']) | 194 " (default: $%s or %d)" % defaults['timeout']) |
187 parser.add_option("--time", action="store_true", | 195 parser.add_option("--time", action="store_true", |
188 help="time how long each test takes") | 196 help="time how long each test takes") |
197 parser.add_option("--json", action="store_true", | |
198 help="store test result data in 'report.json' file") | |
189 parser.add_option("--tmpdir", type="string", | 199 parser.add_option("--tmpdir", type="string", |
190 help="run tests in the given temporary directory" | 200 help="run tests in the given temporary directory" |
191 " (implies --keep-tmpdir)") | 201 " (implies --keep-tmpdir)") |
192 parser.add_option("-v", "--verbose", action="store_true", | 202 parser.add_option("-v", "--verbose", action="store_true", |
193 help="output verbose messages") | 203 help="output verbose messages") |
1417 s.appendChild(t) | 1427 s.appendChild(t) |
1418 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8')) | 1428 xuf.write(doc.toprettyxml(indent=' ', encoding='utf-8')) |
1419 finally: | 1429 finally: |
1420 xuf.close() | 1430 xuf.close() |
1421 | 1431 |
1432 if self._runner.options.json: | |
1433 if json is None: | |
1434 raise ImportError("json module not installed") | |
1435 jsonpath = os.path.join(self._runner._testdir, 'report.json') | |
1436 fp = open(jsonpath, 'w') | |
1437 try: | |
1438 timesd = {} | |
1439 for test, cuser, csys, real in result.times: | |
1440 timesd[test] = real | |
1441 | |
1442 outcome = {} | |
1443 for tc in result.successes: | |
1444 testresult = {'result': 'success', | |
1445 'time': ('%0.3f' % timesd[tc.name])} | |
1446 outcome[tc.name] = testresult | |
1447 | |
1448 for tc, err in sorted(result.faildata.iteritems()): | |
1449 testresult = {'result': 'failure', | |
1450 'time': ('%0.3f' % timesd[tc])} | |
1451 outcome[tc] = testresult | |
1452 | |
1453 for tc, reason in result.skipped: | |
1454 testresult = {'result': 'skip', | |
1455 'time': ('%0.3f' % timesd[tc.name])} | |
1456 outcome[tc.name] = testresult | |
1457 | |
1458 jsonout = json.dumps(outcome, sort_keys=True, indent=4) | |
1459 fp.writelines(("testreport =", jsonout)) | |
1460 finally: | |
1461 fp.close() | |
1462 | |
1422 self._runner._checkhglib('Tested') | 1463 self._runner._checkhglib('Tested') |
1423 | 1464 |
1424 self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.' | 1465 self.stream.writeln('# Ran %d tests, %d skipped, %d warned, %d failed.' |
1425 % (result.testsRun, | 1466 % (result.testsRun, |
1426 skipped + ignored, warned, failed)) | 1467 skipped + ignored, warned, failed)) |