[PATCH 2 of 3] revlog: introduce an experimental flag to slice chunks reads when too sparse

Paul Morelle paul.morelle at octobus.net
Mon Oct 16 12:35:49 EDT 2017


# HG changeset patch
# User Paul Morelle <paul.morelle at octobus.net>
# Date 1507650627 -7200
#      Tue Oct 10 17:50:27 2017 +0200
# Node ID fd6ea10467600ccdfc9f3491ad95da5cdb5b840d
# Parent  ac3901a97e195627a2ed4e65040912326ce5d943
# EXP-Topic optimized-read
# Available At https://bitbucket.org/octobus/mercurial-devel/
#              hg pull https://bitbucket.org/octobus/mercurial-devel/ -r fd6ea1046760
revlog: introduce an experimental flag to slice chunks reads when too sparse

Delta chains can become quite sparse if there is a lot of unrelated data between
relevant pieces. Right now, revlog always reads all the necessary data for the
delta chain in one single read. This can lead to a lot of unrelated data to be
read (see issue5482 for more details).
One can use the `experimental.maxdeltachainspan` option with a large value
(or -1) to easily produce a very sparse delta chain.

This change introduces the ability to slice the chunks retrieval into  multiple
reads, skipping large sections of unrelated data. Preliminary testing shows
interesting results. For example the peak memory consumption to read a manifest
on a large repository is reduced from 600MB to 250MB (200MB without
maxdeltachainspan). However, the slicing itself and the multiple reads can have
an negative impact on performance. This is why the new feature is hidden behind
an experimental flag.

Future changesets will add various parameters to control the slicing heuristics.
We hope to experiment a wide variety of repositories during 4.4 and hopefully
turn the feature on by default in 4.5.
As a first try, the algorithm itself is prone to deep changes. However, we wish
to define APIs and have a baseline to work on.

diff -r ac3901a97e19 -r fd6ea1046760 mercurial/configitems.py
--- a/mercurial/configitems.py	Mon Oct 09 15:13:41 2017 +0200
+++ b/mercurial/configitems.py	Tue Oct 10 17:50:27 2017 +0200
@@ -409,6 +409,12 @@
 coreconfigitem('experimental', 'spacemovesdown',
     default=False,
 )
+coreconfigitem('experimental', 'sparse-read',
+    default=False,
+)
+coreconfigitem('experimental', 'sparse-read.density-threshold',
+    default=0.25,
+)
 coreconfigitem('experimental', 'treemanifest',
     default=False,
 )
diff -r ac3901a97e19 -r fd6ea1046760 mercurial/localrepo.py
--- a/mercurial/localrepo.py	Mon Oct 09 15:13:41 2017 +0200
+++ b/mercurial/localrepo.py	Tue Oct 10 17:50:27 2017 +0200
@@ -608,6 +608,11 @@
                                                  'mmapindexthreshold')
         if mmapindexthreshold is not None:
             self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
+        withsparseread = self.ui.configbool('experimental', 'sparse-read')
+        srdensitythres = float(self.ui.config('experimental',
+                                              'sparse-read.density-threshold'))
+        self.svfs.options['with-sparse-read'] = withsparseread
+        self.svfs.options['sparse-read-density-threshold'] = srdensitythres
 
         for r in self.requirements:
             if r.startswith('exp-compression-'):
diff -r ac3901a97e19 -r fd6ea1046760 mercurial/revlog.py
--- a/mercurial/revlog.py	Mon Oct 09 15:13:41 2017 +0200
+++ b/mercurial/revlog.py	Tue Oct 10 17:50:27 2017 +0200
@@ -161,6 +161,58 @@
     s.update(text)
     return s.digest()
 
+def _slicechunk(revlog, revs):
+    """slice revs to reduce the amount of unrelated data to be read from disk.
+
+    ``revs`` is sliced into groups that should be read in one time.
+    Assume that revs are sorted.
+    """
+    start = revlog.start
+    length = revlog.length
+
+    chunkqueue = collections.deque()
+    chunkqueue.append((revs, 0))
+
+    while chunkqueue:
+        revs, depth = chunkqueue.popleft()
+
+        startbyte = start(revs[0])
+        endbyte = start(revs[-1]) + length(revs[-1])
+        deltachainspan = endbyte - startbyte
+
+        if len(revs) <= 1:
+            yield revs
+            continue
+
+        # Find where is the largest hole (this is where we would split) and
+        # sum up the lengths of useful data to compute the density of the span
+        textlen = 0
+        prevend = None
+        largesthole = 0
+        idxlargesthole = -1
+        for i, rev in enumerate(revs):
+            revstart = start(rev)
+            revlen = length(rev)
+
+            if prevend is not None:
+                hole = revstart - prevend
+                if hole > largesthole:
+                    largesthole = hole
+                    idxlargesthole = i
+
+            textlen += revlen
+            prevend = revstart + revlen
+
+        density = textlen / float(deltachainspan) if deltachainspan > 0 else 1.0
+
+        if density > revlog._srdensitythreshold:
+            yield revs
+            continue
+
+        # Add the left and right parts so that they will be sliced recursively too
+        chunkqueue.append((revs[:idxlargesthole], depth + 1))
+        chunkqueue.append((revs[idxlargesthole:], depth + 1))
+
 # index v0:
 #  4 bytes: offset
 #  4 bytes: compressed length
@@ -305,6 +357,8 @@
         self._nodepos = None
         self._compengine = 'zlib'
         self._maxdeltachainspan = -1
+        self._withsparseread = False
+        self._srdensitythreshold = 0.25
 
         mmapindexthreshold = None
         v = REVLOG_DEFAULT_VERSION
@@ -331,6 +385,9 @@
                 self._maxdeltachainspan = opts['maxdeltachainspan']
             if mmaplargeindex and 'mmapindexthreshold' in opts:
                 mmapindexthreshold = opts['mmapindexthreshold']
+            self._withsparseread = bool(opts.get('with-sparse-read', False))
+            if 'sparse-read-density-threshold' in opts:
+                self._srdensitythreshold = opts['sparse-read-density-threshold']
 
         if self._chunkcachesize <= 0:
             raise RevlogError(_('revlog chunk cache size %r is not greater '
@@ -1327,26 +1384,32 @@
         l = []
         ladd = l.append
 
-        firstrev = revs[0]
-        # Skip trailing revisions with empty diff
-        for lastrev in revs[::-1]:
-            if length(lastrev) != 0:
-                break
+        if not self._withsparseread:
+            slicedchunks = (revs,)
+        else:
+            slicedchunks = _slicechunk(self, revs)
+
+        for revschunk in slicedchunks:
+            firstrev = revschunk[0]
+            # Skip trailing revisions with empty diff
+            for lastrev in revschunk[::-1]:
+                if length(lastrev) != 0:
+                    break
 
-        try:
-            offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
-        except OverflowError:
-            # issue4215 - we can't cache a run of chunks greater than
-            # 2G on Windows
-            return [self._chunk(rev, df=df) for rev in revs]
+            try:
+                offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
+            except OverflowError:
+                # issue4215 - we can't cache a run of chunks greater than
+                # 2G on Windows
+                return [self._chunk(rev, df=df) for rev in revschunk]
 
-        decomp = self.decompress
-        for rev in revs:
-            chunkstart = start(rev)
-            if inline:
-                chunkstart += (rev + 1) * iosize
-            chunklength = length(rev)
-            ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
+            decomp = self.decompress
+            for rev in revschunk:
+                chunkstart = start(rev)
+                if inline:
+                    chunkstart += (rev + 1) * iosize
+                chunklength = length(rev)
+                ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
 
         return l
 


More information about the Mercurial-devel mailing list