view tests/basic_test_result.py @ 42386:15d5a2de44aa

tests: make run-tests exit non-zero if there are "errors" Previously, if there was an error such as a broken .t file that caused run-tests.py to encounter an exception during parsing, the test would be considered in an "errored" state, which is separate from "failed". The check for whether to exit non-zero or not was based entirely on whether there were any tests in a "failed" state, so if there was only an error, run-tests would exit with 0. Our test infrastructure would then consider the test as passing, causing us to have some tests with false negatives that have gone undetected for a few weeks now. Differential Revision: https://phab.mercurial-scm.org/D6452
author Kyle Lippincott <spectral@google.com>
date Tue, 28 May 2019 23:22:46 -0700
parents f4a214300957
children 2372284d9457
line wrap: on
line source

from __future__ import absolute_import, print_function

import unittest

class TestResult(unittest._TextTestResult):

    def __init__(self, options, *args, **kwargs):
        super(TestResult, self).__init__(*args, **kwargs)
        self._options = options

        # unittest.TestResult didn't have skipped until 2.7. We need to
        # polyfill it.
        self.skipped = []

        # We have a custom "ignored" result that isn't present in any Python
        # unittest implementation. It is very similar to skipped. It may make
        # sense to map it into skip some day.
        self.ignored = []

        self.times = []
        self._firststarttime = None
        # Data stored for the benefit of generating xunit reports.
        self.successes = []
        self.faildata = {}

    def addFailure(self, test, reason):
        print("FAILURE!", test, reason)

    def addSuccess(self, test):
        print("SUCCESS!", test)

    def addError(self, test, err):
        print("ERR!", test, err)

    # Polyfill.
    def addSkip(self, test, reason):
        print("SKIP!", test, reason)

    def addIgnore(self, test, reason):
        print("IGNORE!", test, reason)

    def onStart(self, test):
        print("ON_START!", test)

    def onEnd(self):
        print("ON_END!")

    def addOutputMismatch(self, test, ret, got, expected):
        return False

    def stopTest(self, test, interrupted=False):
        super(TestResult, self).stopTest(test)