D6581: update: fix spurious unclean status bug shown by previous commit

valentin.gatienbaron (Valentin Gatien-Baron) phabricator at mercurial-scm.org
Thu Jun 27 11:19:13 EDT 2019


Closed by commit rHGd29db0a0c4eb: update: fix spurious unclean status bug shown by previous commit (authored by valentin.gatienbaron).
This revision was automatically updated to reflect the committed changes.

REPOSITORY
  rHG Mercurial

CHANGES SINCE LAST UPDATE
  https://phab.mercurial-scm.org/D6581?vs=15669&id=15674

CHANGES SINCE LAST ACTION
  https://phab.mercurial-scm.org/D6581/new/

REVISION DETAIL
  https://phab.mercurial-scm.org/D6581

AFFECTED FILES
  mercurial/merge.py
  mercurial/worker.py
  tests/test-simple-update.t

CHANGE DETAILS

diff --git a/tests/test-simple-update.t b/tests/test-simple-update.t
--- a/tests/test-simple-update.t
+++ b/tests/test-simple-update.t
@@ -110,24 +110,6 @@
   getting 100
   100 files updated, 0 files merged, 0 files removed, 0 files unresolved
   $ hg status
-  M 100
-  M 11
-  M 2
-  M 21
-  M 3
-  M 4
-  M 41
-  M 5
-  M 51
-  M 54
-  M 6
-  M 61
-  M 7
-  M 71
-  M 8
-  M 81
-  M 9
-  M 91
 
   $ cd ..
 
diff --git a/mercurial/worker.py b/mercurial/worker.py
--- a/mercurial/worker.py
+++ b/mercurial/worker.py
@@ -100,8 +100,9 @@
     workers
 
     hasretval - when True, func and the current function return an progress
-    iterator then a list (encoded as an iterator that yield many (False, ..)
-    then a (True, list)). The resulting list is in the natural order.
+    iterator then a dict (encoded as an iterator that yield many (False, ..)
+    then a (True, dict)). The dicts are joined in some arbitrary order, so
+    overlapping keys are a bad idea.
 
     threadsafe - whether work items are thread safe and can be executed using
     a thread-based worker. Should be disabled for CPU heavy tasks that don't
@@ -162,8 +163,8 @@
     ui.flush()
     parentpid = os.getpid()
     pipes = []
-    retvals = []
-    for i, pargs in enumerate(partition(args, workers)):
+    retval = {}
+    for pargs in partition(args, workers):
         # Every worker gets its own pipe to send results on, so we don't have to
         # implement atomic writes larger than PIPE_BUF. Each forked process has
         # its own pipe's descriptors in the local variables, and the parent
@@ -171,7 +172,6 @@
         # care what order they're in).
         rfd, wfd = os.pipe()
         pipes.append((rfd, wfd))
-        retvals.append(None)
         # make sure we use os._exit in all worker code paths. otherwise the
         # worker may do some clean-ups which could cause surprises like
         # deadlock. see sshpeer.cleanup for example.
@@ -192,7 +192,7 @@
                         os.close(w)
                     os.close(rfd)
                     for result in func(*(staticargs + (pargs,))):
-                        os.write(wfd, util.pickle.dumps((i, result)))
+                        os.write(wfd, util.pickle.dumps(result))
                     return 0
 
                 ret = scmutil.callcatch(ui, workerfunc)
@@ -226,9 +226,9 @@
         while openpipes > 0:
             for key, events in selector.select():
                 try:
-                    i, res = util.pickle.load(key.fileobj)
+                    res = util.pickle.load(key.fileobj)
                     if hasretval and res[0]:
-                        retvals[i] = res[1]
+                        retval.update(res[1])
                     else:
                         yield res
                 except EOFError:
@@ -249,7 +249,7 @@
             os.kill(os.getpid(), -status)
         sys.exit(status)
     if hasretval:
-        yield True, sum(retvals, [])
+        yield True, retval
 
 def _posixexitstatus(code):
     '''convert a posix exit status into the same form returned by
@@ -281,9 +281,9 @@
             try:
                 while not self._taskqueue.empty():
                     try:
-                        i, args = self._taskqueue.get_nowait()
+                        args = self._taskqueue.get_nowait()
                         for res in self._func(*self._staticargs + (args,)):
-                            self._resultqueue.put((i, res))
+                            self._resultqueue.put(res)
                             # threading doesn't provide a native way to
                             # interrupt execution. handle it manually at every
                             # iteration.
@@ -318,11 +318,10 @@
     workers = _numworkers(ui)
     resultqueue = pycompat.queue.Queue()
     taskqueue = pycompat.queue.Queue()
-    retvals = []
+    retval = {}
     # partition work to more pieces than workers to minimize the chance
     # of uneven distribution of large tasks between the workers
-    for pargs in enumerate(partition(args, workers * 20)):
-        retvals.append(None)
+    for pargs in partition(args, workers * 20):
         taskqueue.put(pargs)
     for _i in range(workers):
         t = Worker(taskqueue, resultqueue, func, staticargs)
@@ -331,9 +330,9 @@
     try:
         while len(threads) > 0:
             while not resultqueue.empty():
-                (i, res) = resultqueue.get()
+                res = resultqueue.get()
                 if hasretval and res[0]:
-                    retvals[i] = res[1]
+                    retval.update(res[1])
                 else:
                     yield res
             threads[0].join(0.05)
@@ -346,13 +345,13 @@
         trykillworkers()
         raise
     while not resultqueue.empty():
-        (i, res) = resultqueue.get()
+        res = resultqueue.get()
         if hasretval and res[0]:
-            retvals[i] = res[1]
+            retval.update(res[1])
         else:
             yield res
     if hasretval:
-        yield True, sum(retvals, [])
+        yield True, retval
 
 if pycompat.iswindows:
     _platformworker = _windowsworker
diff --git a/mercurial/merge.py b/mercurial/merge.py
--- a/mercurial/merge.py
+++ b/mercurial/merge.py
@@ -1472,10 +1472,10 @@
 
     Yields arbitrarily many (False, tuple) for progress updates, followed by
     exactly one (True, filedata). When wantfiledata is false, filedata is an
-    empty list. When wantfiledata is true, filedata[i] is a triple (mode, size,
-    mtime) of the file written for action[i].
+    empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
+    mtime) of the file f written for each action.
     """
-    filedata = []
+    filedata = {}
     verbose = repo.ui.verbose
     fctx = mctx.filectx
     ui = repo.ui
@@ -1509,7 +1509,7 @@
                 s = wfctx.lstat()
                 mode = s.st_mode
                 mtime = s[stat.ST_MTIME]
-                filedata.append((mode, size, mtime)) # for dirstate.normal
+                filedata[f] = ((mode, size, mtime)) # for dirstate.normal
             if i == 100:
                 yield False, (i, f)
                 i = 0
@@ -1670,7 +1670,7 @@
                          actions[ACTION_GET],
                          threadsafe=threadsafe,
                          hasretval=True)
-    getfiledata = []
+    getfiledata = {}
     for final, res in prog:
         if final:
             getfiledata = res
@@ -1803,7 +1803,8 @@
             actions[k].extend(acts)
             if k == ACTION_GET and wantfiledata:
                 # no filedata until mergestate is updated to provide it
-                getfiledata.extend([None] * len(acts))
+                for a in acts:
+                    getfiledata[a[0]] = None
             # Remove these files from actions[ACTION_MERGE] as well. This is
             # important because in recordupdates, files in actions[ACTION_MERGE]
             # are processed after files in other actions, and the merge driver
@@ -1873,11 +1874,11 @@
         pass
 
     # get
-    for i, (f, args, msg) in enumerate(actions.get(ACTION_GET, [])):
+    for f, args, msg in actions.get(ACTION_GET, []):
         if branchmerge:
             repo.dirstate.otherparent(f)
         else:
-            parentfiledata = getfiledata[i] if getfiledata else None
+            parentfiledata = getfiledata[f] if getfiledata else None
             repo.dirstate.normal(f, parentfiledata=parentfiledata)
 
     # merge



To: valentin.gatienbaron, #hg-reviewers, martinvonz
Cc: martinvonz, mercurial-devel


More information about the Mercurial-devel mailing list