[PATCH 1 of 2 V2] run-tests: track start and end time of tests

Pierre-Yves David pierre-yves.david at ens-lyon.org
Fri May 15 00:22:01 UTC 2015


# HG changeset patch
# User Pierre-Yves David <pierre-yves.david at fb.com>
# Date 1431066898 25200
#      Thu May 07 23:34:58 2015 -0700
# Node ID 7bb8c7d943cdb4889ea643e5e3474b5c7d470613
# Parent  d1bd0fd07ee6adf4ab3be2b0a0a7c0df54d55abf
run-tests: track start and end time of tests

We currently have information about how long each test took, but we have no data
about their actual scheduling. So we now track when a test started and stopped
(in the referential of the whole tests run) to expose this information.

The data is currently in the json only because the json output is meant to be
extensible. Later changeset will includes this data in the text output and we
will be happy to bikeshed its formating there.

Yes, "end" is actually just "start" + "time", but computing it an including it
in the output is simple, cheap and convenient.

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1198,10 +1198,11 @@ class TestResult(unittest._TextTestResul
         # unittest implementation. It is very similar to failed. It may make
         # sense to map it into fail some day.
         self.warned = []
 
         self.times = []
+        self._firststarttime =  None
         # Data stored for the benefit of generating xunit reports.
         self.successes = []
         self.faildata = {}
 
     def addFailure(self, test, reason):
@@ -1322,22 +1323,27 @@ class TestResult(unittest._TextTestResul
         # os.times module computes the user time and system time spent by
         # child's processes along with real elapsed time taken by a process.
         # This module has one limitation. It can only work for Linux user
         # and not for Windows.
         test.started = os.times()
+        if self._firststarttime is None: # thread racy but irrelevant
+            self._firststarttime = test.started[4]
 
     def stopTest(self, test, interrupted=False):
         super(TestResult, self).stopTest(test)
 
         test.stopped = os.times()
 
         starttime = test.started
         endtime = test.stopped
+        origin = self._firststarttime
         self.times.append((test.name,
                            endtime[2] - starttime[2], # user space CPU time
                            endtime[3] - starttime[3], # sys  space CPU time
                            endtime[4] - starttime[4], # real time
+                           starttime[4] - origin, # start date in run context
+                           endtime[4] - origin, # end date in run context
                            ))
 
         if interrupted:
             with iolock:
                 self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
@@ -1568,13 +1574,14 @@ class TextTestRunner(unittest.TextTestRu
                     for res, testcases in groups:
                         for tc, __ in testcases:
                             tres = {'result': res,
                                     'time': ('%0.3f' % timesd[tc.name][2]),
                                     'cuser': ('%0.3f' % timesd[tc.name][0]),
-                                    'csys': ('%0.3f' % timesd[tc.name][1])}
+                                    'csys': ('%0.3f' % timesd[tc.name][1]),
+                                    'start': ('%0.3f' % timesd[tc.name][3]),
+                                    'end': ('%0.3f' % timesd[tc.name][4])}
                             outcome[tc.name] = tres
-
                     jsonout = json.dumps(outcome, sort_keys=True, indent=4)
                     fp.writelines(("testreport =", jsonout))
                 finally:
                     fp.close()
 
diff --git a/tests/test-run-tests.t b/tests/test-run-tests.t
--- a/tests/test-run-tests.t
+++ b/tests/test-run-tests.t
@@ -478,23 +478,29 @@ test for --json
   $ cat report.json
   testreport ={
       "test-failure.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "failure", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-skip.t": {
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "skip", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-success.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "success", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }
   } (no-eol)
 
 Test that failed test accepted through interactive are properly reported:
@@ -517,23 +523,29 @@ Test that failed test accepted through i
   $ cat report.json
   testreport ={
       "test-failure.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "success", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-skip.t": {
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "skip", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-success.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "success", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }
   } (no-eol)
   $ mv backup test-failure.t
 


More information about the Mercurial-devel mailing list