[PATCH 1 of 2] run-tests: track start and end time of tests

Pierre-Yves David pierre-yves.david at ens-lyon.org
Sat May 9 20:18:44 UTC 2015


# HG changeset patch
# User Pierre-Yves David <pierre-yves.david at fb.com>
# Date 1431066898 25200
#      Thu May 07 23:34:58 2015 -0700
# Node ID 8e0564ce5766e039845ba5e8b4a8e94aa085b33c
# Parent  ae29715cf458a52685b1bdf9721ecaeba5b73eb6
run-tests: track start and end time of tests

We currently have information about how long each test took, but we have no data
about their actual scheduling. So we now track when a test started and stopped
(in the referential of the whole tests run) to expose this information.

The data is currently in the json only because the json output is meant to be
extensible. Later changeset will includes this data in the text output and we
will be happy to bikeshed its formating there.

Yes, "end" is actually just "start" + "time", but computing it an including it
in the output is simple, cheap and convenient.

diff --git a/tests/run-tests.py b/tests/run-tests.py
--- a/tests/run-tests.py
+++ b/tests/run-tests.py
@@ -1176,10 +1176,11 @@ class TestResult(unittest._TextTestResul
         # unittest implementation. It is very similar to failed. It may make
         # sense to map it into fail some day.
         self.warned = []
 
         self.times = []
+        self._firststarttime =  None
         # Data stored for the benefit of generating xunit reports.
         self.successes = []
         self.faildata = {}
 
     def addFailure(self, test, reason):
@@ -1298,22 +1299,27 @@ class TestResult(unittest._TextTestResul
         # os.times module computes the user time and system time spent by
         # child's processes along with real elapsed time taken by a process.
         # This module has one limitation. It can only work for Linux user
         # and not for Windows.
         test.started = os.times()
+        if self._firststarttime is None: # thread racy but irrelevant
+            self._firststarttime = test.started[4]
 
     def stopTest(self, test, interrupted=False):
         super(TestResult, self).stopTest(test)
 
         test.stopped = os.times()
 
         starttime = test.started
         endtime = test.stopped
+        origin = self._firststarttime
         self.times.append((test.name,
                            endtime[2] - starttime[2], # user space CPU time
                            endtime[3] - starttime[3], # sys  space CPU time
                            endtime[4] - starttime[4], # real time
+                           starttime[4] - origin, # start date in run context
+                           endtime[4] - origin, # end date in run context
                            ))
 
         if interrupted:
             iolock.acquire()
             self.stream.writeln('INTERRUPTED: %s (after %d seconds)' % (
@@ -1544,11 +1550,13 @@ class TextTestRunner(unittest.TextTestRu
                 for res, testcases in groups:
                     for tc, __ in testcases:
                         testresult = {'result': res,
                                       'time': ('%0.3f' % timesd[tc.name][2]),
                                       'cuser': ('%0.3f' % timesd[tc.name][0]),
-                                      'csys': ('%0.3f' % timesd[tc.name][1])}
+                                      'csys': ('%0.3f' % timesd[tc.name][1]),
+                                      'start': ('%0.3f' % timesd[tc.name][3]),
+                                      'end': ('%0.3f' % timesd[tc.name][4])}
                         outcome[tc.name] = testresult
 
                 jsonout = json.dumps(outcome, sort_keys=True, indent=4)
                 fp.writelines(("testreport =", jsonout))
             finally:
diff --git a/tests/test-run-tests.t b/tests/test-run-tests.t
--- a/tests/test-run-tests.t
+++ b/tests/test-run-tests.t
@@ -471,23 +471,29 @@ test for --json
   $ cat report.json
   testreport ={
       "test-failure.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "failure", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-skip.t": {
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "skip", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-success.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "success", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }
   } (no-eol)
 
 Test that failed test accepted through interactive are properly reported:
@@ -510,23 +516,29 @@ Test that failed test accepted through i
   $ cat report.json
   testreport ={
       "test-failure.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "success", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-skip.t": {
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "skip", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }, ? (re)
       "test-success.t": [\{] (re)
           "csys": "\s*[\d\.]{4,5}", ? (re)
           "cuser": "\s*[\d\.]{4,5}", ? (re)
+          "end": "\s*[\d\.]{4,5}", ? (re)
           "result": "success", ? (re)
+          "start": "\s*[\d\.]{4,5}", ? (re)
           "time": "\s*[\d\.]{4,5}" (re)
       }
   } (no-eol)
   $ mv backup test-failure.t
 


More information about the Mercurial-devel mailing list