D3701: run-tests: extract onStart and onEnd into the test result

lothiraldan (Boris Feld) phabricator at mercurial-scm.org
Tue Jun 12 17:05:37 EDT 2018


lothiraldan updated this revision to Diff 9032.

REPOSITORY
  rHG Mercurial

CHANGES SINCE LAST UPDATE
  https://phab.mercurial-scm.org/D3701?vs=8993&id=9032

REVISION DETAIL
  https://phab.mercurial-scm.org/D3701

AFFECTED FILES
  tests/run-tests.py

CHANGE DETAILS

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1711,6 +1711,11 @@
         else: # 'always', for testing purposes
             self.color = pygmentspresent
 
+    def onStart(self, test):
+        """ Can be overriden by custom TestResult
+        """
+        pass
+
     def addFailure(self, test, reason):
         self.failures.append((test, reason))
 
@@ -2099,71 +2104,73 @@
         super(TextTestRunner, self).__init__(*args, **kwargs)
 
         self._runner = runner
+        self._result = getTestResult()(self._runner.options, self.stream,
+                                       self.descriptions, 0)
 
     def listtests(self, test):
-        result = getTestResult()(self._runner.options, self.stream,
-                                 self.descriptions, 0)
         test = sorted(test, key=lambda t: t.name)
+
+        self._result.onStart(test)
+
         for t in test:
             print(t.name)
-            result.addSuccess(t)
+            self._result.addSuccess(t)
 
         if self._runner.options.xunit:
             with open(self._runner.options.xunit, "wb") as xuf:
-                self._writexunit(result, xuf)
+                self._writexunit(self._result, xuf)
 
         if self._runner.options.json:
             jsonpath = os.path.join(self._runner._outputdir, b'report.json')
             with open(jsonpath, 'w') as fp:
-                self._writejson(result, fp)
-
-        return result
+                self._writejson(self._result, fp)
+
+        return self._result
 
     def run(self, test):
-        result = getTestResult()(self._runner.options, self.stream,
-                                 self.descriptions, self.verbosity)
-        test(result)
-
-        failed = len(result.failures)
-        skipped = len(result.skipped)
-        ignored = len(result.ignored)
+        self._result.onStart(test)
+        test(self._result)
+
+        failed = len(self._result.failures)
+        skipped = len(self._result.skipped)
+        ignored = len(self._result.ignored)
 
         with iolock:
             self.stream.writeln('')
 
             if not self._runner.options.noskips:
-                for test, msg in result.skipped:
+                for test, msg in self._result.skipped:
                     formatted = 'Skipped %s: %s\n' % (test.name, msg)
-                    self.stream.write(highlightmsg(formatted, result.color))
-            for test, msg in result.failures:
+                    self.stream.write(highlightmsg(formatted, self._result.color))
+            for test, msg in self._result.failures:
                 formatted = 'Failed %s: %s\n' % (test.name, msg)
-                self.stream.write(highlightmsg(formatted, result.color))
-            for test, msg in result.errors:
+                self.stream.write(highlightmsg(formatted, self._result.color))
+            for test, msg in self._result.errors:
                 self.stream.writeln('Errored %s: %s' % (test.name, msg))
 
             if self._runner.options.xunit:
                 with open(self._runner.options.xunit, "wb") as xuf:
-                    self._writexunit(result, xuf)
+                    self._writexunit(self._result, xuf)
 
             if self._runner.options.json:
                 jsonpath = os.path.join(self._runner._outputdir, b'report.json')
                 with open(jsonpath, 'w') as fp:
-                    self._writejson(result, fp)
+                    self._writejson(self._result, fp)
 
             self._runner._checkhglib('Tested')
 
-            savetimes(self._runner._outputdir, result)
+            savetimes(self._runner._outputdir, self._result)
 
             if failed and self._runner.options.known_good_rev:
-                self._bisecttests(t for t, m in result.failures)
+                self._bisecttests(t for t, m in self._result.failures)
             self.stream.writeln(
                 '# Ran %d tests, %d skipped, %d failed.'
-                % (result.testsRun, skipped + ignored, failed))
+                % (self._result.testsRun, skipped + ignored, failed))
             if failed:
                 self.stream.writeln('python hash seed: %s' %
                     os.environ['PYTHONHASHSEED'])
             if self._runner.options.time:
-                self.printtimes(result.times)
+                self.printtimes(self._result.times)
 
             if self._runner.options.exceptions:
                 exceptions = aggregateexceptions(
@@ -2186,7 +2193,7 @@
 
             self.stream.flush()
 
-        return result
+        return self._result
 
     def _bisecttests(self, tests):
         bisectcmd = ['hg', 'bisect']
@@ -2752,6 +2759,8 @@
             if result.failures:
                 failed = True
 
+            result.onEnd()
+
             if self.options.anycoverage:
                 self._outputcoverage()
         except KeyboardInterrupt:



To: lothiraldan, #hg-reviewers
Cc: mercurial-devel


More information about the Mercurial-devel mailing list