[PATCH 01 of 17 V2] largefiles: rename lfutil to storeutil

liscju piotr.listkiewicz at gmail.com
Thu May 12 10:20:23 UTC 2016


# HG changeset patch
# User liscju <piotr.listkiewicz at gmail.com>
# Date 1462488595 -7200
#      Fri May 06 00:49:55 2016 +0200
# Node ID 6606554248eaeffc7dc43b62f3b9d8128b86b9b6
# Parent  c641b8dfb98c2ade6995ba3aa341fe4d7b154827
largefiles: rename lfutil to storeutil

lfutil should be used as higher level module by lfcommands and
overrides. storeutil should have utilities to deal with store
and be lower level module. This separation will be helpful in
resolving cycle dependencies.

diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/basestore.py
--- a/hgext/largefiles/basestore.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/basestore.py	Fri May 06 00:49:55 2016 +0200
@@ -13,7 +13,7 @@ import re
 from mercurial import util, node, hg, error
 from mercurial.i18n import _
 
-import lfutil
+import storeutil
 
 class StoreError(Exception):
     '''Raised when there is a problem getting files from or putting
@@ -87,8 +87,8 @@ class basestore(object):
         store and in the usercache.
         filename is for informational messages only.
         """
-        util.makedirs(lfutil.storepath(self.repo, ''))
-        storefilename = lfutil.storepath(self.repo, hash)
+        util.makedirs(storeutil.storepath(self.repo, ''))
+        storefilename = storeutil.storepath(self.repo, hash)
 
         tmpname = storefilename + '.tmp'
         tmpfile = util.atomictempfile(tmpname,
@@ -109,7 +109,7 @@ class basestore(object):
             return False
 
         util.rename(tmpname, storefilename)
-        lfutil.linktousercache(self.repo, hash)
+        storeutil.linktousercache(self.repo, hash)
         return True
 
     def verify(self, revs, contents=False):
@@ -126,7 +126,7 @@ class basestore(object):
             cset = "%d:%s" % (cctx.rev(), node.short(cctx.node()))
 
             for standin in cctx:
-                filename = lfutil.splitstandin(standin)
+                filename = storeutil.splitstandin(standin)
                 if filename:
                     fctx = cctx[standin]
                     key = (filename, fctx.filenode())
@@ -219,7 +219,7 @@ def _openstore(repo, remote=None, put=Fa
     for classobj in storeproviders:
         try:
             return classobj(ui, repo, remote)
-        except lfutil.storeprotonotcapable:
+        except storeutil.storeprotonotcapable:
             pass
 
     raise error.Abort(_('%s does not appear to be a largefile store') %
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/lfcommands.py
--- a/hgext/largefiles/lfcommands.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/lfcommands.py	Fri May 06 00:49:55 2016 +0200
@@ -19,7 +19,7 @@ from mercurial.lock import release
 from hgext.convert import convcmd
 from hgext.convert import filemap
 
-import lfutil
+import storeutil
 import basestore
 
 # -- Commands ----------------------------------------------------------
@@ -59,7 +59,7 @@ def lfconvert(ui, src, dest, *pats, **op
         tolfile = False
     else:
         tolfile = True
-        size = lfutil.getminsize(ui, True, opts.get('size'), default=None)
+        size = storeutil.getminsize(ui, True, opts.get('size'), default=None)
 
     if not hg.islocal(src):
         raise error.Abort(_('%s is not a local Mercurial repo') % src)
@@ -90,7 +90,7 @@ def lfconvert(ui, src, dest, *pats, **op
             lfiles = set()
             normalfiles = set()
             if not pats:
-                pats = ui.configlist(lfutil.longname, 'patterns', default=[])
+                pats = ui.configlist(storeutil.longname, 'patterns', default=[])
             if pats:
                 matcher = match_.match(rsrc.root, '', list(pats))
             else:
@@ -104,8 +104,8 @@ def lfconvert(ui, src, dest, *pats, **op
                     lfiles, normalfiles, matcher, size, lfiletohash)
             ui.progress(_('converting revisions'), None)
 
-            if rdst.wvfs.exists(lfutil.shortname):
-                rdst.wvfs.rmtree(lfutil.shortname)
+            if rdst.wvfs.exists(storeutil.shortname):
+                rdst.wvfs.rmtree(storeutil.shortname)
 
             for f in lfiletohash.keys():
                 if rdst.wvfs.isfile(f):
@@ -124,19 +124,19 @@ def lfconvert(ui, src, dest, *pats, **op
             class lfsource(filemap.filemap_source):
                 def __init__(self, ui, source):
                     super(lfsource, self).__init__(ui, source, None)
-                    self.filemapper.rename[lfutil.shortname] = '.'
+                    self.filemapper.rename[storeutil.shortname] = '.'
 
                 def getfile(self, name, rev):
                     realname, realrev = rev
                     f = super(lfsource, self).getfile(name, rev)
 
-                    if (not realname.startswith(lfutil.shortnameslash)
+                    if (not realname.startswith(storeutil.shortnameslash)
                             or f[0] is None):
                         return f
 
                     # Substitute in the largefile data for the hash
                     hash = f[0].strip()
-                    path = lfutil.findfile(rsrc, hash)
+                    path = storeutil.findfile(rsrc, hash)
 
                     if path is None:
                         raise error.Abort(_("missing largefile for '%s' in %s")
@@ -201,7 +201,7 @@ def _lfconvert_addchangeset(rsrc, rdst, 
                 normalfiles.add(f)
 
         if f in lfiles:
-            dstfiles.append(lfutil.standin(f))
+            dstfiles.append(storeutil.standin(f))
             # largefile in manifest if it has not been removed/renamed
             if f in ctx.manifest():
                 fctx = ctx.filectx(f)
@@ -217,7 +217,7 @@ def _lfconvert_addchangeset(rsrc, rdst, 
                 if f not in lfiletohash or lfiletohash[f] != hash:
                     rdst.wwrite(f, ctx[f].data(), ctx[f].flags())
                     executable = 'x' in ctx[f].flags()
-                    lfutil.writestandin(rdst, lfutil.standin(f), hash,
+                    storeutil.writestandin(rdst, storeutil.standin(f), hash,
                         executable)
                     lfiletohash[f] = hash
         else:
@@ -225,10 +225,10 @@ def _lfconvert_addchangeset(rsrc, rdst, 
             dstfiles.append(f)
 
     def getfilectx(repo, memctx, f):
-        if lfutil.isstandin(f):
+        if storeutil.isstandin(f):
             # if the file isn't in the manifest then it was removed
             # or renamed, raise IOError to indicate this
-            srcfname = lfutil.splitstandin(f)
+            srcfname = storeutil.splitstandin(f)
             try:
                 fctx = ctx.filectx(srcfname)
             except error.LookupError:
@@ -237,7 +237,7 @@ def _lfconvert_addchangeset(rsrc, rdst, 
             if renamed:
                 # standin is always a largefile because largefile-ness
                 # doesn't change after rename or copy
-                renamed = lfutil.standin(renamed[0])
+                renamed = storeutil.standin(renamed[0])
 
             return context.memfilectx(repo, f, lfiletohash[srcfname] + '\n',
                                       'l' in fctx.flags(), 'x' in fctx.flags(),
@@ -252,7 +252,7 @@ def _commitcontext(rdst, parents, ctx, d
     mctx = context.memctx(rdst, parents, ctx.description(), dstfiles,
                           getfilectx, ctx.user(), ctx.date(), ctx.extra())
     ret = rdst.commitctx(mctx)
-    lfutil.copyalltostore(rdst, ret)
+    storeutil.copyalltostore(rdst, ret)
     rdst.setparents(ret)
     revmap[ctx.node()] = rdst.changelog.tip()
 
@@ -348,7 +348,7 @@ def uploadlfiles(ui, rsrc, rdst, files):
     for hash in files:
         ui.progress(_('uploading largefiles'), at, unit=_('files'),
                     total=len(files))
-        source = lfutil.findfile(rsrc, hash)
+        source = storeutil.findfile(rsrc, hash)
         if not source:
             raise error.Abort(_('largefile %s missing from store'
                                ' (needs to be uploaded)') % hash)
@@ -378,19 +378,19 @@ def cachelfiles(ui, repo, node, filelist
     returns a tuple (cached, missing).  cached is the list of files downloaded
     by this operation; missing is the list of files that were needed but could
     not be found.'''
-    lfiles = lfutil.listlfiles(repo, node)
+    lfiles = storeutil.listlfiles(repo, node)
     if filelist:
         lfiles = set(lfiles) & set(filelist)
     toget = []
 
     for lfile in lfiles:
         try:
-            expectedhash = repo[node][lfutil.standin(lfile)].data().strip()
+            expectedhash = repo[node][storeutil.standin(lfile)].data().strip()
         except IOError as err:
             if err.errno == errno.ENOENT:
                 continue # node must be None and standin wasn't found in wctx
             raise
-        if not lfutil.findfile(repo, expectedhash):
+        if not storeutil.findfile(repo, expectedhash):
             toget.append((lfile, expectedhash))
 
     if toget:
@@ -402,7 +402,7 @@ def cachelfiles(ui, repo, node, filelist
 
 def downloadlfiles(ui, repo, rev=None):
     matchfn = scmutil.match(repo[None],
-                            [repo.wjoin(lfutil.shortname)], {})
+                            [repo.wjoin(storeutil.shortname)], {})
     def prepare(ctx, fns):
         pass
     totalsuccess = 0
@@ -425,10 +425,10 @@ def updatelfiles(ui, repo, filelist=None
     If ``printmessage`` is other than ``None``, it means "print (or
     ignore, for false) message forcibly".
     '''
-    statuswriter = lfutil.getstatuswriter(ui, repo, printmessage)
+    statuswriter = storeutil.getstatuswriter(ui, repo, printmessage)
     with repo.wlock():
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
-        lfiles = set(lfutil.listlfiles(repo)) | set(lfdirstate)
+        lfdirstate = storeutil.openlfdirstate(ui, repo)
+        lfiles = set(storeutil.listlfiles(repo)) | set(lfdirstate)
 
         if filelist is not None:
             filelist = set(filelist)
@@ -442,7 +442,7 @@ def updatelfiles(ui, repo, filelist=None
             rellfileorig = os.path.relpath(
                 scmutil.origpath(ui, repo, wvfs.join(rellfile)),
                 start=repo.root)
-            relstandin = lfutil.standin(lfile)
+            relstandin = storeutil.standin(lfile)
             relstandinorig = os.path.relpath(
                 scmutil.origpath(ui, repo, wvfs.join(relstandin)),
                 start=repo.root)
@@ -452,7 +452,7 @@ def updatelfiles(ui, repo, filelist=None
                     shutil.copyfile(wvfs.join(rellfile),
                                     wvfs.join(rellfileorig))
                     wvfs.unlinkpath(relstandinorig)
-                expecthash = lfutil.readstandin(repo, lfile)
+                expecthash = storeutil.readstandin(repo, lfile)
                 if expecthash != '':
                     if lfile not in repo[None]: # not switched to normal file
                         wvfs.unlinkpath(rellfile, ignoremissing=True)
@@ -483,7 +483,7 @@ def updatelfiles(ui, repo, filelist=None
 
             expecthash = update.get(lfile)
             if expecthash:
-                if not lfutil.copyfromcache(repo, expecthash, lfile):
+                if not storeutil.copyfromcache(repo, expecthash, lfile):
                     # failed ... but already removed and set to normallookup
                     continue
                 # Synchronize largefile dirstate to the last modified
@@ -494,7 +494,7 @@ def updatelfiles(ui, repo, filelist=None
             # copy the state of largefile standin from the repository's
             # dirstate to its state in the lfdirstate.
             rellfile = lfile
-            relstandin = lfutil.standin(lfile)
+            relstandin = storeutil.standin(lfile)
             if wvfs.exists(relstandin):
                 mode = wvfs.stat(relstandin).st_mode
                 if mode != wvfs.stat(rellfile).st_mode:
@@ -503,7 +503,7 @@ def updatelfiles(ui, repo, filelist=None
 
             updated += update1
 
-            lfutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
+            storeutil.synclfdirstate(repo, lfdirstate, lfile, normallookup)
 
         lfdirstate.write()
         if lfiles:
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/lfutil.py
--- a/hgext/largefiles/lfutil.py	Wed May 11 01:46:11 2016 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,655 +0,0 @@
-# Copyright 2009-2010 Gregory P. Ward
-# Copyright 2009-2010 Intelerad Medical Systems Incorporated
-# Copyright 2010-2011 Fog Creek Software
-# Copyright 2010-2011 Unity Technologies
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''largefiles utility code: must not import other modules in this package.'''
-
-import os
-import platform
-import stat
-import copy
-
-from mercurial import dirstate, httpconnection, match as match_, util, scmutil
-from mercurial.i18n import _
-from mercurial import node, error
-
-shortname = '.hglf'
-shortnameslash = shortname + '/'
-longname = 'largefiles'
-
-
-# -- Private worker functions ------------------------------------------
-
-def getminsize(ui, assumelfiles, opt, default=10):
-    lfsize = opt
-    if not lfsize and assumelfiles:
-        lfsize = ui.config(longname, 'minsize', default=default)
-    if lfsize:
-        try:
-            lfsize = float(lfsize)
-        except ValueError:
-            raise error.Abort(_('largefiles: size must be number (not %s)\n')
-                             % lfsize)
-    if lfsize is None:
-        raise error.Abort(_('minimum size for largefiles must be specified'))
-    return lfsize
-
-def link(src, dest):
-    """Try to create hardlink - if that fails, efficiently make a copy."""
-    util.makedirs(os.path.dirname(dest))
-    try:
-        util.oslink(src, dest)
-    except OSError:
-        # if hardlinks fail, fallback on atomic copy
-        dst = util.atomictempfile(dest)
-        for chunk in util.filechunkiter(open(src, 'rb')):
-            dst.write(chunk)
-        dst.close()
-        os.chmod(dest, os.stat(src).st_mode)
-
-def usercachepath(ui, hash):
-    '''Return the correct location in the "global" largefiles cache for a file
-    with the given hash.
-    This cache is used for sharing of largefiles across repositories - both
-    to preserve download bandwidth and storage space.'''
-    return os.path.join(_usercachedir(ui), hash)
-
-def _usercachedir(ui):
-    '''Return the location of the "global" largefiles cache.'''
-    path = ui.configpath(longname, 'usercache', None)
-    if path:
-        return path
-    if os.name == 'nt':
-        appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
-        if appdata:
-            return os.path.join(appdata, longname)
-    elif platform.system() == 'Darwin':
-        home = os.getenv('HOME')
-        if home:
-            return os.path.join(home, 'Library', 'Caches', longname)
-    elif os.name == 'posix':
-        path = os.getenv('XDG_CACHE_HOME')
-        if path:
-            return os.path.join(path, longname)
-        home = os.getenv('HOME')
-        if home:
-            return os.path.join(home, '.cache', longname)
-    else:
-        raise error.Abort(_('unknown operating system: %s\n') % os.name)
-    raise error.Abort(_('unknown %s usercache location\n') % longname)
-
-def inusercache(ui, hash):
-    path = usercachepath(ui, hash)
-    return os.path.exists(path)
-
-def findfile(repo, hash):
-    '''Return store path of the largefile with the specified hash.
-    As a side effect, the file might be linked from user cache.
-    Return None if the file can't be found locally.'''
-    path, exists = findstorepath(repo, hash)
-    if exists:
-        repo.ui.note(_('found %s in store\n') % hash)
-        return path
-    elif inusercache(repo.ui, hash):
-        repo.ui.note(_('found %s in system cache\n') % hash)
-        path = storepath(repo, hash)
-        link(usercachepath(repo.ui, hash), path)
-        return path
-    return None
-
-class largefilesdirstate(dirstate.dirstate):
-    def __getitem__(self, key):
-        return super(largefilesdirstate, self).__getitem__(unixpath(key))
-    def normal(self, f):
-        return super(largefilesdirstate, self).normal(unixpath(f))
-    def remove(self, f):
-        return super(largefilesdirstate, self).remove(unixpath(f))
-    def add(self, f):
-        return super(largefilesdirstate, self).add(unixpath(f))
-    def drop(self, f):
-        return super(largefilesdirstate, self).drop(unixpath(f))
-    def forget(self, f):
-        return super(largefilesdirstate, self).forget(unixpath(f))
-    def normallookup(self, f):
-        return super(largefilesdirstate, self).normallookup(unixpath(f))
-    def _ignore(self, f):
-        return False
-    def write(self, tr=False):
-        # (1) disable PENDING mode always
-        #     (lfdirstate isn't yet managed as a part of the transaction)
-        # (2) avoid develwarn 'use dirstate.write with ....'
-        super(largefilesdirstate, self).write(None)
-
-def openlfdirstate(ui, repo, create=True):
-    '''
-    Return a dirstate object that tracks largefiles: i.e. its root is
-    the repo root, but it is saved in .hg/largefiles/dirstate.
-    '''
-    vfs = repo.vfs
-    lfstoredir = longname
-    opener = scmutil.opener(vfs.join(lfstoredir))
-    lfdirstate = largefilesdirstate(opener, ui, repo.root,
-                                     repo.dirstate._validate)
-
-    # If the largefiles dirstate does not exist, populate and create
-    # it. This ensures that we create it on the first meaningful
-    # largefiles operation in a new clone.
-    if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
-        matcher = getstandinmatcher(repo)
-        standins = repo.dirstate.walk(matcher, [], False, False)
-
-        if len(standins) > 0:
-            vfs.makedirs(lfstoredir)
-
-        for standin in standins:
-            lfile = splitstandin(standin)
-            lfdirstate.normallookup(lfile)
-    return lfdirstate
-
-def lfdirstatestatus(lfdirstate, repo):
-    wctx = repo['.']
-    match = match_.always(repo.root, repo.getcwd())
-    unsure, s = lfdirstate.status(match, [], False, False, False)
-    modified, clean = s.modified, s.clean
-    for lfile in unsure:
-        try:
-            fctx = wctx[standin(lfile)]
-        except LookupError:
-            fctx = None
-        if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
-            modified.append(lfile)
-        else:
-            clean.append(lfile)
-            lfdirstate.normal(lfile)
-    return s
-
-def listlfiles(repo, rev=None, matcher=None):
-    '''return a list of largefiles in the working copy or the
-    specified changeset'''
-
-    if matcher is None:
-        matcher = getstandinmatcher(repo)
-
-    # ignore unknown files in working directory
-    return [splitstandin(f)
-            for f in repo[rev].walk(matcher)
-            if rev is not None or repo.dirstate[f] != '?']
-
-def instore(repo, hash, forcelocal=False):
-    '''Return true if a largefile with the given hash exists in the user
-    cache.'''
-    return os.path.exists(storepath(repo, hash, forcelocal))
-
-def storepath(repo, hash, forcelocal=False):
-    '''Return the correct location in the repository largefiles cache for a
-    file with the given hash.'''
-    if not forcelocal and repo.shared():
-        return repo.vfs.reljoin(repo.sharedpath, longname, hash)
-    return repo.join(longname, hash)
-
-def findstorepath(repo, hash):
-    '''Search through the local store path(s) to find the file for the given
-    hash.  If the file is not found, its path in the primary store is returned.
-    The return value is a tuple of (path, exists(path)).
-    '''
-    # For shared repos, the primary store is in the share source.  But for
-    # backward compatibility, force a lookup in the local store if it wasn't
-    # found in the share source.
-    path = storepath(repo, hash, False)
-
-    if instore(repo, hash):
-        return (path, True)
-    elif repo.shared() and instore(repo, hash, True):
-        return storepath(repo, hash, True)
-
-    return (path, False)
-
-def copyfromcache(repo, hash, filename):
-    '''Copy the specified largefile from the repo or system cache to
-    filename in the repository. Return true on success or false if the
-    file was not found in either cache (which should not happened:
-    this is meant to be called only after ensuring that the needed
-    largefile exists in the cache).'''
-    wvfs = repo.wvfs
-    path = findfile(repo, hash)
-    if path is None:
-        return False
-    wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
-    # The write may fail before the file is fully written, but we
-    # don't use atomic writes in the working copy.
-    with open(path, 'rb') as srcfd:
-        with wvfs(filename, 'wb') as destfd:
-            gothash = copyandhash(srcfd, destfd)
-    if gothash != hash:
-        repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
-                     % (filename, path, gothash))
-        wvfs.unlink(filename)
-        return False
-    return True
-
-def copytostore(repo, rev, file, uploaded=False):
-    wvfs = repo.wvfs
-    hash = readstandin(repo, file, rev)
-    if instore(repo, hash):
-        return
-    if wvfs.exists(file):
-        copytostoreabsolute(repo, wvfs.join(file), hash)
-    else:
-        repo.ui.warn(_("%s: largefile %s not available from local store\n") %
-                     (file, hash))
-
-def copyalltostore(repo, node):
-    '''Copy all largefiles in a given revision to the store'''
-
-    ctx = repo[node]
-    for filename in ctx.files():
-        if isstandin(filename) and filename in ctx.manifest():
-            realfile = splitstandin(filename)
-            copytostore(repo, ctx.node(), realfile)
-
-
-def copytostoreabsolute(repo, file, hash):
-    if inusercache(repo.ui, hash):
-        link(usercachepath(repo.ui, hash), storepath(repo, hash))
-    else:
-        util.makedirs(os.path.dirname(storepath(repo, hash)))
-        dst = util.atomictempfile(storepath(repo, hash),
-                                  createmode=repo.store.createmode)
-        for chunk in util.filechunkiter(open(file, 'rb')):
-            dst.write(chunk)
-        dst.close()
-        linktousercache(repo, hash)
-
-def linktousercache(repo, hash):
-    '''Link / copy the largefile with the specified hash from the store
-    to the cache.'''
-    path = usercachepath(repo.ui, hash)
-    link(storepath(repo, hash), path)
-
-def getstandinmatcher(repo, rmatcher=None):
-    '''Return a match object that applies rmatcher to the standin directory'''
-    wvfs = repo.wvfs
-    standindir = shortname
-
-    # no warnings about missing files or directories
-    badfn = lambda f, msg: None
-
-    if rmatcher and not rmatcher.always():
-        pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
-        if not pats:
-            pats = [wvfs.join(standindir)]
-        match = scmutil.match(repo[None], pats, badfn=badfn)
-        # if pats is empty, it would incorrectly always match, so clear _always
-        match._always = False
-    else:
-        # no patterns: relative to repo root
-        match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
-    return match
-
-def composestandinmatcher(repo, rmatcher):
-    '''Return a matcher that accepts standins corresponding to the
-    files accepted by rmatcher. Pass the list of files in the matcher
-    as the paths specified by the user.'''
-    smatcher = getstandinmatcher(repo, rmatcher)
-    isstandin = smatcher.matchfn
-    def composedmatchfn(f):
-        return isstandin(f) and rmatcher.matchfn(splitstandin(f))
-    smatcher.matchfn = composedmatchfn
-
-    return smatcher
-
-def standin(filename):
-    '''Return the repo-relative path to the standin for the specified big
-    file.'''
-    # Notes:
-    # 1) Some callers want an absolute path, but for instance addlargefiles
-    #    needs it repo-relative so it can be passed to repo[None].add().  So
-    #    leave it up to the caller to use repo.wjoin() to get an absolute path.
-    # 2) Join with '/' because that's what dirstate always uses, even on
-    #    Windows. Change existing separator to '/' first in case we are
-    #    passed filenames from an external source (like the command line).
-    return shortnameslash + util.pconvert(filename)
-
-def isstandin(filename):
-    '''Return true if filename is a big file standin. filename must be
-    in Mercurial's internal form (slash-separated).'''
-    return filename.startswith(shortnameslash)
-
-def splitstandin(filename):
-    # Split on / because that's what dirstate always uses, even on Windows.
-    # Change local separator to / first just in case we are passed filenames
-    # from an external source (like the command line).
-    bits = util.pconvert(filename).split('/', 1)
-    if len(bits) == 2 and bits[0] == shortname:
-        return bits[1]
-    else:
-        return None
-
-def updatestandin(repo, standin):
-    file = repo.wjoin(splitstandin(standin))
-    if repo.wvfs.exists(splitstandin(standin)):
-        hash = hashfile(file)
-        executable = getexecutable(file)
-        writestandin(repo, standin, hash, executable)
-    else:
-        raise error.Abort(_('%s: file not found!') % splitstandin(standin))
-
-def readstandin(repo, filename, node=None):
-    '''read hex hash from standin for filename at given node, or working
-    directory if no node is given'''
-    return repo[node][standin(filename)].data().strip()
-
-def writestandin(repo, standin, hash, executable):
-    '''write hash to <repo.root>/<standin>'''
-    repo.wwrite(standin, hash + '\n', executable and 'x' or '')
-
-def copyandhash(instream, outfile):
-    '''Read bytes from instream (iterable) and write them to outfile,
-    computing the SHA-1 hash of the data along the way. Return the hash.'''
-    hasher = util.sha1('')
-    for data in instream:
-        hasher.update(data)
-        outfile.write(data)
-    return hasher.hexdigest()
-
-def hashrepofile(repo, file):
-    return hashfile(repo.wjoin(file))
-
-def hashfile(file):
-    if not os.path.exists(file):
-        return ''
-    hasher = util.sha1('')
-    fd = open(file, 'rb')
-    for data in util.filechunkiter(fd, 128 * 1024):
-        hasher.update(data)
-    fd.close()
-    return hasher.hexdigest()
-
-def getexecutable(filename):
-    mode = os.stat(filename).st_mode
-    return ((mode & stat.S_IXUSR) and
-            (mode & stat.S_IXGRP) and
-            (mode & stat.S_IXOTH))
-
-def urljoin(first, second, *arg):
-    def join(left, right):
-        if not left.endswith('/'):
-            left += '/'
-        if right.startswith('/'):
-            right = right[1:]
-        return left + right
-
-    url = join(first, second)
-    for a in arg:
-        url = join(url, a)
-    return url
-
-def hexsha1(data):
-    """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
-    object data"""
-    h = util.sha1()
-    for chunk in util.filechunkiter(data):
-        h.update(chunk)
-    return h.hexdigest()
-
-def httpsendfile(ui, filename):
-    return httpconnection.httpsendfile(ui, filename, 'rb')
-
-def unixpath(path):
-    '''Return a version of path normalized for use with the lfdirstate.'''
-    return util.pconvert(os.path.normpath(path))
-
-def islfilesrepo(repo):
-    '''Return true if the repo is a largefile repo.'''
-    if ('largefiles' in repo.requirements and
-            any(shortnameslash in f[0] for f in repo.store.datafiles())):
-        return True
-
-    return any(openlfdirstate(repo.ui, repo, False))
-
-class storeprotonotcapable(Exception):
-    def __init__(self, storetypes):
-        self.storetypes = storetypes
-
-def getstandinsstate(repo):
-    standins = []
-    matcher = getstandinmatcher(repo)
-    for standin in repo.dirstate.walk(matcher, [], False, False):
-        lfile = splitstandin(standin)
-        try:
-            hash = readstandin(repo, lfile)
-        except IOError:
-            hash = None
-        standins.append((lfile, hash))
-    return standins
-
-def synclfdirstate(repo, lfdirstate, lfile, normallookup):
-    lfstandin = standin(lfile)
-    if lfstandin in repo.dirstate:
-        stat = repo.dirstate._map[lfstandin]
-        state, mtime = stat[0], stat[3]
-    else:
-        state, mtime = '?', -1
-    if state == 'n':
-        if (normallookup or mtime < 0 or
-            not repo.wvfs.exists(lfile)):
-            # state 'n' doesn't ensure 'clean' in this case
-            lfdirstate.normallookup(lfile)
-        else:
-            lfdirstate.normal(lfile)
-    elif state == 'm':
-        lfdirstate.normallookup(lfile)
-    elif state == 'r':
-        lfdirstate.remove(lfile)
-    elif state == 'a':
-        lfdirstate.add(lfile)
-    elif state == '?':
-        lfdirstate.drop(lfile)
-
-def markcommitted(orig, ctx, node):
-    repo = ctx.repo()
-
-    orig(node)
-
-    # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
-    # because files coming from the 2nd parent are omitted in the latter.
-    #
-    # The former should be used to get targets of "synclfdirstate",
-    # because such files:
-    # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
-    # - have to be marked as "n" after commit, but
-    # - aren't listed in "repo[node].files()"
-
-    lfdirstate = openlfdirstate(repo.ui, repo)
-    for f in ctx.files():
-        if isstandin(f):
-            lfile = splitstandin(f)
-            synclfdirstate(repo, lfdirstate, lfile, False)
-    lfdirstate.write()
-
-    # As part of committing, copy all of the largefiles into the cache.
-    copyalltostore(repo, node)
-
-def getlfilestoupdate(oldstandins, newstandins):
-    changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
-    filelist = []
-    for f in changedstandins:
-        if f[0] not in filelist:
-            filelist.append(f[0])
-    return filelist
-
-def getlfilestoupload(repo, missing, addfunc):
-    for i, n in enumerate(missing):
-        repo.ui.progress(_('finding outgoing largefiles'), i,
-            unit=_('revisions'), total=len(missing))
-        parents = [p for p in repo[n].parents() if p != node.nullid]
-
-        oldlfstatus = repo.lfstatus
-        repo.lfstatus = False
-        try:
-            ctx = repo[n]
-        finally:
-            repo.lfstatus = oldlfstatus
-
-        files = set(ctx.files())
-        if len(parents) == 2:
-            mc = ctx.manifest()
-            mp1 = ctx.parents()[0].manifest()
-            mp2 = ctx.parents()[1].manifest()
-            for f in mp1:
-                if f not in mc:
-                    files.add(f)
-            for f in mp2:
-                if f not in mc:
-                    files.add(f)
-            for f in mc:
-                if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
-                    files.add(f)
-        for fn in files:
-            if isstandin(fn) and fn in ctx:
-                addfunc(fn, ctx[fn].data().strip())
-    repo.ui.progress(_('finding outgoing largefiles'), None)
-
-def updatestandinsbymatch(repo, match):
-    '''Update standins in the working directory according to specified match
-
-    This returns (possibly modified) ``match`` object to be used for
-    subsequent commit process.
-    '''
-
-    ui = repo.ui
-
-    # Case 1: user calls commit with no specific files or
-    # include/exclude patterns: refresh and commit all files that
-    # are "dirty".
-    if match is None or match.always():
-        # Spend a bit of time here to get a list of files we know
-        # are modified so we can compare only against those.
-        # It can cost a lot of time (several seconds)
-        # otherwise to update all standins if the largefiles are
-        # large.
-        lfdirstate = openlfdirstate(ui, repo)
-        dirtymatch = match_.always(repo.root, repo.getcwd())
-        unsure, s = lfdirstate.status(dirtymatch, [], False, False,
-                                      False)
-        modifiedfiles = unsure + s.modified + s.added + s.removed
-        lfiles = listlfiles(repo)
-        # this only loops through largefiles that exist (not
-        # removed/renamed)
-        for lfile in lfiles:
-            if lfile in modifiedfiles:
-                if repo.wvfs.exists(standin(lfile)):
-                    # this handles the case where a rebase is being
-                    # performed and the working copy is not updated
-                    # yet.
-                    if repo.wvfs.exists(lfile):
-                        updatestandin(repo,
-                            standin(lfile))
-
-        return match
-
-    lfiles = listlfiles(repo)
-    match._files = repo._subdirlfs(match.files(), lfiles)
-
-    # Case 2: user calls commit with specified patterns: refresh
-    # any matching big files.
-    smatcher = composestandinmatcher(repo, match)
-    standins = repo.dirstate.walk(smatcher, [], False, False)
-
-    # No matching big files: get out of the way and pass control to
-    # the usual commit() method.
-    if not standins:
-        return match
-
-    # Refresh all matching big files.  It's possible that the
-    # commit will end up failing, in which case the big files will
-    # stay refreshed.  No harm done: the user modified them and
-    # asked to commit them, so sooner or later we're going to
-    # refresh the standins.  Might as well leave them refreshed.
-    lfdirstate = openlfdirstate(ui, repo)
-    for fstandin in standins:
-        lfile = splitstandin(fstandin)
-        if lfdirstate[lfile] != 'r':
-            updatestandin(repo, fstandin)
-
-    # Cook up a new matcher that only matches regular files or
-    # standins corresponding to the big files requested by the
-    # user.  Have to modify _files to prevent commit() from
-    # complaining "not tracked" for big files.
-    match = copy.copy(match)
-    origmatchfn = match.matchfn
-
-    # Check both the list of largefiles and the list of
-    # standins because if a largefile was removed, it
-    # won't be in the list of largefiles at this point
-    match._files += sorted(standins)
-
-    actualfiles = []
-    for f in match._files:
-        fstandin = standin(f)
-
-        # For largefiles, only one of the normal and standin should be
-        # committed (except if one of them is a remove).  In the case of a
-        # standin removal, drop the normal file if it is unknown to dirstate.
-        # Thus, skip plain largefile names but keep the standin.
-        if f in lfiles or fstandin in standins:
-            if repo.dirstate[fstandin] != 'r':
-                if repo.dirstate[f] != 'r':
-                    continue
-            elif repo.dirstate[f] == '?':
-                continue
-
-        actualfiles.append(f)
-    match._files = actualfiles
-
-    def matchfn(f):
-        if origmatchfn(f):
-            return f not in lfiles
-        else:
-            return f in standins
-
-    match.matchfn = matchfn
-
-    return match
-
-class automatedcommithook(object):
-    '''Stateful hook to update standins at the 1st commit of resuming
-
-    For efficiency, updating standins in the working directory should
-    be avoided while automated committing (like rebase, transplant and
-    so on), because they should be updated before committing.
-
-    But the 1st commit of resuming automated committing (e.g. ``rebase
-    --continue``) should update them, because largefiles may be
-    modified manually.
-    '''
-    def __init__(self, resuming):
-        self.resuming = resuming
-
-    def __call__(self, repo, match):
-        if self.resuming:
-            self.resuming = False # avoids updating at subsequent commits
-            return updatestandinsbymatch(repo, match)
-        else:
-            return match
-
-def getstatuswriter(ui, repo, forcibly=None):
-    '''Return the function to write largefiles specific status out
-
-    If ``forcibly`` is ``None``, this returns the last element of
-    ``repo._lfstatuswriters`` as "default" writer function.
-
-    Otherwise, this returns the function to always write out (or
-    ignore if ``not forcibly``) status.
-    '''
-    if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
-        return repo._lfstatuswriters[-1]
-    else:
-        if forcibly:
-            return ui.status # forcibly WRITE OUT
-        else:
-            return lambda *msg, **opts: None # forcibly IGNORE
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/localstore.py
--- a/hgext/largefiles/localstore.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/localstore.py	Fri May 06 00:49:55 2016 +0200
@@ -10,7 +10,7 @@
 
 from mercurial.i18n import _
 
-import lfutil
+import storeutil
 import basestore
 
 class localstore(basestore.basestore):
@@ -23,36 +23,37 @@ class localstore(basestore.basestore):
         super(localstore, self).__init__(ui, repo, self.remote.url())
 
     def put(self, source, hash):
-        if lfutil.instore(self.remote, hash):
+        if storeutil.instore(self.remote, hash):
             return
-        lfutil.link(source, lfutil.storepath(self.remote, hash))
+        storeutil.link(source, storeutil.storepath(self.remote, hash))
 
     def exists(self, hashes):
         retval = {}
         for hash in hashes:
-            retval[hash] = lfutil.instore(self.remote, hash)
+            retval[hash] = storeutil.instore(self.remote, hash)
         return retval
 
 
     def _getfile(self, tmpfile, filename, hash):
-        path = lfutil.findfile(self.remote, hash)
+        path = storeutil.findfile(self.remote, hash)
         if not path:
             raise basestore.StoreError(filename, hash, self.url,
                 _("can't get file locally"))
         with open(path, 'rb') as fd:
-            return lfutil.copyandhash(fd, tmpfile)
+            return storeutil.copyandhash(fd, tmpfile)
 
     def _verifyfiles(self, contents, filestocheck):
         failed = False
         for cset, filename, expectedhash in filestocheck:
-            storepath, exists = lfutil.findstorepath(self.remote, expectedhash)
+            storepath, exists = \
+                storeutil.findstorepath(self.remote, expectedhash)
             if not exists:
                 self.ui.warn(
                     _('changeset %s: %s references missing %s\n')
                     % (cset, filename, storepath))
                 failed = True
             elif contents:
-                actualhash = lfutil.hashfile(storepath)
+                actualhash = storeutil.hashfile(storepath)
                 if actualhash != expectedhash:
                     self.ui.warn(
                         _('changeset %s: %s references corrupted %s\n')
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/overrides.py
--- a/hgext/largefiles/overrides.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/overrides.py	Fri May 06 00:49:55 2016 +0200
@@ -15,7 +15,7 @@ from mercurial import hg, util, cmdutil,
         archival, pathutil, registrar, revset, error
 from mercurial.i18n import _
 
-import lfutil
+import storeutil
 import lfcommands
 import basestore
 
@@ -25,7 +25,7 @@ def composelargefilematcher(match, manif
     '''create a matcher that matches only the largefiles in the original
     matcher'''
     m = copy.copy(match)
-    lfile = lambda f: lfutil.standin(f) in manifest
+    lfile = lambda f: storeutil.standin(f) in manifest
     m._files = filter(lfile, m._files)
     m._fileroots = set(m._files)
     m._always = False
@@ -39,7 +39,7 @@ def composenormalfilematcher(match, mani
         excluded.update(exclude)
 
     m = copy.copy(match)
-    notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
+    notlfile = lambda f: not (storeutil.isstandin(f) or storeutil.standin(f) in
             manifest or f in excluded)
     m._files = filter(notlfile, m._files)
     m._fileroots = set(m._files)
@@ -92,12 +92,12 @@ def restorematchandpatsfn():
 
 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
     large = opts.get('large')
-    lfsize = lfutil.getminsize(
-        ui, lfutil.islfilesrepo(repo), opts.get('lfsize'))
+    lfsize = storeutil.getminsize(
+        ui, storeutil.islfilesrepo(repo), opts.get('lfsize'))
 
     lfmatcher = None
-    if lfutil.islfilesrepo(repo):
-        lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
+    if storeutil.islfilesrepo(repo):
+        lfpats = ui.configlist(storeutil.longname, 'patterns', default=[])
         if lfpats:
             lfmatcher = match_.match(repo.root, '', list(lfpats))
 
@@ -107,7 +107,7 @@ def addlargefiles(ui, repo, isaddremove,
     wctx = repo[None]
     for f in repo.walk(match_.badmatch(m, lambda x, y: None)):
         exact = m.exact(f)
-        lfile = lfutil.standin(f) in wctx
+        lfile = storeutil.standin(f) in wctx
         nfile = f in wctx
         exists = lfile or nfile
 
@@ -124,7 +124,7 @@ def addlargefiles(ui, repo, isaddremove,
                 ui.warn(_('%s already a largefile\n') % name)
             continue
 
-        if (exact or not exists) and not lfutil.isstandin(f):
+        if (exact or not exists) and not storeutil.isstandin(f):
             # In case the file was removed previously, but not committed
             # (issue3507)
             if not repo.wvfs.exists(f):
@@ -144,18 +144,18 @@ def addlargefiles(ui, repo, isaddremove,
     with repo.wlock():
         if not opts.get('dry_run'):
             standins = []
-            lfdirstate = lfutil.openlfdirstate(ui, repo)
+            lfdirstate = storeutil.openlfdirstate(ui, repo)
             for f in lfnames:
-                standinname = lfutil.standin(f)
-                lfutil.writestandin(repo, standinname, hash='',
-                    executable=lfutil.getexecutable(repo.wjoin(f)))
+                standinname = storeutil.standin(f)
+                storeutil.writestandin(repo, standinname, hash='',
+                    executable=storeutil.getexecutable(repo.wjoin(f)))
                 standins.append(standinname)
                 if lfdirstate[f] == 'r':
                     lfdirstate.normallookup(f)
                 else:
                     lfdirstate.add(f)
             lfdirstate.write()
-            bad += [lfutil.splitstandin(f)
+            bad += [storeutil.splitstandin(f)
                     for f in repo[None].add(standins)
                     if f in m.files()]
 
@@ -172,7 +172,7 @@ def removelargefiles(ui, repo, isaddremo
         repo.lfstatus = False
     manifest = repo[None].manifest()
     modified, added, deleted, clean = [[f for f in list
-                                        if lfutil.standin(f) in manifest]
+                                        if storeutil.standin(f) in manifest]
                                        for list in (s.modified, s.added,
                                                     s.deleted, s.clean)]
 
@@ -197,7 +197,7 @@ def removelargefiles(ui, repo, isaddremo
     # Need to lock because standin files are deleted then removed from the
     # repository and we could race in-between.
     with repo.wlock():
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
+        lfdirstate = storeutil.openlfdirstate(ui, repo)
         for f in sorted(remove):
             if ui.verbose or not m.exact(f):
                 # addremove in core gets fancy with the name, remove doesn't
@@ -214,7 +214,7 @@ def removelargefiles(ui, repo, isaddremo
         if opts.get('dry_run'):
             return result
 
-        remove = [lfutil.standin(f) for f in remove]
+        remove = [storeutil.standin(f) for f in remove]
         # If this is being called by addremove, let the original addremove
         # function handle this.
         if not isaddremove:
@@ -223,8 +223,9 @@ def removelargefiles(ui, repo, isaddremo
         repo[None].forget(remove)
 
         for f in remove:
-            lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
-                                  False)
+            storeutil.synclfdirstate(repo, lfdirstate,
+                                     storeutil.splitstandin(f),
+                                     False)
 
         lfdirstate.write()
 
@@ -233,7 +234,7 @@ def removelargefiles(ui, repo, isaddremo
 # For overriding mercurial.hgweb.webcommands so that largefiles will
 # appear at their right place in the manifests.
 def decodepath(orig, path):
-    return lfutil.splitstandin(path) or path
+    return storeutil.splitstandin(path) or path
 
 # -- Wrappers: modify existing commands --------------------------------
 
@@ -303,7 +304,7 @@ def overridelog(orig, ui, repo, *pats, *
 
         pats = set(p)
 
-        def fixpats(pat, tostandin=lfutil.standin):
+        def fixpats(pat, tostandin=storeutil.standin):
             if pat.startswith('set:'):
                 return pat
 
@@ -314,14 +315,14 @@ def overridelog(orig, ui, repo, *pats, *
             return tostandin(kindpat[1])
 
         if m._cwd:
-            hglf = lfutil.shortname
+            hglf = storeutil.shortname
             back = util.pconvert(m.rel(hglf)[:-len(hglf)])
 
             def tostandin(f):
                 # The file may already be a standin, so truncate the back
                 # prefix and test before mangling it.  This avoids turning
                 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
-                if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
+                if f.startswith(back) and storeutil.splitstandin(f[len(back):]):
                     return f
 
                 # An absolute path is from outside the repo, so truncate the
@@ -332,21 +333,21 @@ def overridelog(orig, ui, repo, *pats, *
                     f = f[len(back):]
                 else:
                     f = m._cwd + '/' + f
-                return back + lfutil.standin(f)
+                return back + storeutil.standin(f)
 
             pats.update(fixpats(f, tostandin) for f in p)
         else:
             def tostandin(f):
-                if lfutil.splitstandin(f):
+                if storeutil.splitstandin(f):
                     return f
-                return lfutil.standin(f)
+                return storeutil.standin(f)
             pats.update(fixpats(f, tostandin) for f in p)
 
         for i in range(0, len(m._files)):
             # Don't add '.hglf' to m.files, since that is already covered by '.'
             if m._files[i] == '.':
                 continue
-            standin = lfutil.standin(m._files[i])
+            standin = storeutil.standin(m._files[i])
             # If the "standin" is a directory, append instead of replace to
             # support naming a directory on the command line with only
             # largefiles.  The original directory is kept to support normal
@@ -361,7 +362,7 @@ def overridelog(orig, ui, repo, *pats, *
         m._always = False
         origmatchfn = m.matchfn
         def lfmatchfn(f):
-            lf = lfutil.splitstandin(f)
+            lf = storeutil.splitstandin(f)
             if lf is not None and origmatchfn(lf):
                 return True
             r = origmatchfn(f)
@@ -405,7 +406,7 @@ def overridedebugstate(orig, ui, repo, *
     large = opts.pop('large', False)
     if large:
         class fakerepo(object):
-            dirstate = lfutil.openlfdirstate(ui, repo)
+            dirstate = storeutil.openlfdirstate(ui, repo)
         orig(ui, fakerepo, *pats, **opts)
     else:
         orig(ui, repo, *pats, **opts)
@@ -421,7 +422,7 @@ def overridedebugstate(orig, ui, repo, *
 # largefiles. This makes the merge proceed and we can then handle this
 # case further in the overridden calculateupdates function below.
 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
-    if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
+    if storeutil.standin(repo.dirstate.normalize(f)) in wctx:
         return False
     return origfn(repo, wctx, mctx, f, f2)
 
@@ -463,14 +464,14 @@ def overridecalculateupdates(origfn, rep
     # Convert to dictionary with filename as key and action as value.
     lfiles = set()
     for f in actions:
-        splitstandin = lfutil.splitstandin(f)
+        splitstandin = storeutil.splitstandin(f)
         if splitstandin in p1:
             lfiles.add(splitstandin)
-        elif lfutil.standin(f) in p1:
+        elif storeutil.standin(f) in p1:
             lfiles.add(f)
 
     for lfile in sorted(lfiles):
-        standin = lfutil.standin(lfile)
+        standin = storeutil.standin(lfile)
         (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
         (sm, sargs, smsg) = actions.get(standin, (None, None, None))
         if sm in ('g', 'dc') and lm != 'r':
@@ -521,7 +522,7 @@ def overridecalculateupdates(origfn, rep
 
 def mergerecordupdates(orig, repo, actions, branchmerge):
     if 'lfmr' in actions:
-        lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
+        lfdirstate = storeutil.openlfdirstate(repo.ui, repo)
         for lfile, args, msg in actions['lfmr']:
             # this should be executed before 'orig', to execute 'remove'
             # before all other actions
@@ -537,7 +538,7 @@ def mergerecordupdates(orig, repo, actio
 # largefiles. This will handle identical edits without prompting the user.
 def overridefilemerge(origfn, premerge, repo, mynode, orig, fcd, fco, fca,
                       labels=None):
-    if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
+    if not storeutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
         return origfn(premerge, repo, mynode, orig, fcd, fco, fca,
                       labels=labels)
 
@@ -551,7 +552,7 @@ def overridefilemerge(origfn, premerge, 
              _('largefile %s has a merge conflict\nancestor was %s\n'
                'keep (l)ocal %s or\ntake (o)ther %s?'
                '$$ &Local $$ &Other') %
-               (lfutil.splitstandin(orig), ahash, dhash, ohash),
+               (storeutil.splitstandin(orig), ahash, dhash, ohash),
              0) == 1)):
         repo.wwrite(fcd.path(), fco.data(), fco.flags())
     return True, 0, False
@@ -561,7 +562,7 @@ def copiespathcopies(orig, ctx1, ctx2, m
     updated = {}
 
     for k, v in copies.iteritems():
-        updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
+        updated[storeutil.splitstandin(k) or k] = storeutil.splitstandin(v) or v
 
     return updated
 
@@ -603,7 +604,7 @@ def overridecopy(orig, ui, repo, pats, o
 
     def makestandin(relpath):
         path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
-        return repo.wvfs.join(lfutil.standin(path))
+        return repo.wvfs.join(storeutil.standin(path))
 
     fullpats = scmutil.expandpats(pats)
     dest = fullpats[-1]
@@ -626,19 +627,19 @@ def overridecopy(orig, ui, repo, pats, o
             # The patterns were previously mangled to add the standin
             # directory; we need to remove that now
             for pat in pats:
-                if match_.patkind(pat) is None and lfutil.shortname in pat:
-                    newpats.append(pat.replace(lfutil.shortname, ''))
+                if match_.patkind(pat) is None and storeutil.shortname in pat:
+                    newpats.append(pat.replace(storeutil.shortname, ''))
                 else:
                     newpats.append(pat)
             match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
             m = copy.copy(match)
-            lfile = lambda f: lfutil.standin(f) in manifest
-            m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
+            lfile = lambda f: storeutil.standin(f) in manifest
+            m._files = [storeutil.standin(f) for f in m._files if lfile(f)]
             m._fileroots = set(m._files)
             origmatchfn = m.matchfn
-            m.matchfn = lambda f: (lfutil.isstandin(f) and
+            m.matchfn = lambda f: (storeutil.isstandin(f) and
                                 (f in manifest) and
-                                origmatchfn(lfutil.splitstandin(f)) or
+                                origmatchfn(storeutil.splitstandin(f)) or
                                 None)
             return m
         oldmatch = installmatchfn(overridematch)
@@ -653,9 +654,9 @@ def overridecopy(orig, ui, repo, pats, o
             origcopyfile = util.copyfile
             copiedfiles = []
             def overridecopyfile(src, dest):
-                if (lfutil.shortname in src and
-                    dest.startswith(repo.wjoin(lfutil.shortname))):
-                    destlfile = dest.replace(lfutil.shortname, '')
+                if (storeutil.shortname in src and
+                    dest.startswith(repo.wjoin(storeutil.shortname))):
+                    destlfile = dest.replace(storeutil.shortname, '')
                     if not opts['force'] and os.path.exists(destlfile):
                         raise IOError('',
                             _('destination largefile already exists'))
@@ -667,12 +668,12 @@ def overridecopy(orig, ui, repo, pats, o
         finally:
             util.copyfile = origcopyfile
 
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
+        lfdirstate = storeutil.openlfdirstate(ui, repo)
         for (src, dest) in copiedfiles:
-            if (lfutil.shortname in src and
-                dest.startswith(repo.wjoin(lfutil.shortname))):
-                srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
-                destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
+            if (storeutil.shortname in src and
+                dest.startswith(repo.wjoin(storeutil.shortname))):
+                srclfile = src.replace(repo.wjoin(storeutil.standin('')), '')
+                destlfile = dest.replace(repo.wjoin(storeutil.standin('')), '')
                 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
                 if not os.path.isdir(destlfiledir):
                     os.makedirs(destlfiledir)
@@ -717,16 +718,16 @@ def overriderevert(orig, ui, repo, ctx, 
     # and then return them to a correct state we need to lock to
     # prevent others from changing them in their incorrect state.
     with repo.wlock():
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
-        s = lfutil.lfdirstatestatus(lfdirstate, repo)
+        lfdirstate = storeutil.openlfdirstate(ui, repo)
+        s = storeutil.lfdirstatestatus(lfdirstate, repo)
         lfdirstate.write()
         for lfile in s.modified:
-            lfutil.updatestandin(repo, lfutil.standin(lfile))
+            storeutil.updatestandin(repo, storeutil.standin(lfile))
         for lfile in s.deleted:
-            if (repo.wvfs.exists(lfutil.standin(lfile))):
-                repo.wvfs.unlink(lfutil.standin(lfile))
+            if (repo.wvfs.exists(storeutil.standin(lfile))):
+                repo.wvfs.unlink(storeutil.standin(lfile))
 
-        oldstandins = lfutil.getstandinsstate(repo)
+        oldstandins = storeutil.getstandinsstate(repo)
 
         def overridematch(mctx, pats=(), opts=None, globbed=False,
                 default='relpath', badfn=None):
@@ -739,11 +740,11 @@ def overriderevert(orig, ui, repo, ctx, 
             # currently doesn't work correctly in that case, this match is
             # called, so the lfdirstate above may not be the correct one for
             # this invocation of match.
-            lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
+            lfdirstate = storeutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
                                                False)
 
             def tostandin(f):
-                standin = lfutil.standin(f)
+                standin = storeutil.standin(f)
                 if standin in ctx or standin in mctx:
                     return standin
                 elif standin in repo[None] or lfdirstate[f] == 'r':
@@ -754,8 +755,8 @@ def overriderevert(orig, ui, repo, ctx, 
             m._fileroots = set(m._files)
             origmatchfn = m.matchfn
             def matchfn(f):
-                if lfutil.isstandin(f):
-                    return (origmatchfn(lfutil.splitstandin(f)) and
+                if storeutil.isstandin(f):
+                    return (origmatchfn(storeutil.splitstandin(f)) and
                             (f in ctx or f in mctx))
                 return origmatchfn(f)
             m.matchfn = matchfn
@@ -766,8 +767,8 @@ def overriderevert(orig, ui, repo, ctx, 
         finally:
             restorematchfn()
 
-        newstandins = lfutil.getstandinsstate(repo)
-        filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
+        newstandins = storeutil.getstandinsstate(repo)
+        filelist = storeutil.getlfilestoupdate(oldstandins, newstandins)
         # lfdirstate should be 'normallookup'-ed for updated files,
         # because reverting doesn't touch dirstate for 'normal' files
         # when target revision is explicitly specified: in such case,
@@ -892,7 +893,7 @@ def overriderebase(orig, ui, repo, **opt
         return orig(ui, repo, **opts)
 
     resuming = opts.get('continue')
-    repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
+    repo._lfcommithooks.append(storeutil.automatedcommithook(resuming))
     repo._lfstatuswriters.append(lambda *msg, **opts: None)
     try:
         return orig(ui, repo, **opts)
@@ -958,18 +959,18 @@ def overridearchive(orig, repo, dest, no
     for f in ctx:
         ff = ctx.flags(f)
         getdata = ctx[f].data
-        if lfutil.isstandin(f):
+        if storeutil.isstandin(f):
             if node is not None:
-                path = lfutil.findfile(repo, getdata().strip())
+                path = storeutil.findfile(repo, getdata().strip())
 
                 if path is None:
                     raise error.Abort(
                        _('largefile %s not found in repo store or system cache')
-                       % lfutil.splitstandin(f))
+                       % storeutil.splitstandin(f))
             else:
-                path = lfutil.splitstandin(f)
+                path = storeutil.splitstandin(f)
 
-            f = lfutil.splitstandin(f)
+            f = storeutil.splitstandin(f)
 
             getdata = lambda: util.readfile(path)
         write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
@@ -996,7 +997,7 @@ def hgsubrepoarchive(orig, repo, archive
 
     def write(name, mode, islink, getdata):
         # At this point, the standin has been replaced with the largefile name,
-        # so the normal matcher works here without the lfutil variants.
+        # so the normal matcher works here without the storeutil variants.
         if match and not match(f):
             return
         data = getdata()
@@ -1006,18 +1007,18 @@ def hgsubrepoarchive(orig, repo, archive
     for f in ctx:
         ff = ctx.flags(f)
         getdata = ctx[f].data
-        if lfutil.isstandin(f):
+        if storeutil.isstandin(f):
             if ctx.node() is not None:
-                path = lfutil.findfile(repo._repo, getdata().strip())
+                path = storeutil.findfile(repo._repo, getdata().strip())
 
                 if path is None:
                     raise error.Abort(
                        _('largefile %s not found in repo store or system cache')
-                       % lfutil.splitstandin(f))
+                       % storeutil.splitstandin(f))
             else:
-                path = lfutil.splitstandin(f)
+                path = storeutil.splitstandin(f)
 
-            f = lfutil.splitstandin(f)
+            f = storeutil.splitstandin(f)
 
             getdata = lambda: util.readfile(os.path.join(prefix, path))
 
@@ -1059,11 +1060,12 @@ def cmdutilforget(orig, ui, repo, match,
     finally:
         repo.lfstatus = False
     forget = sorted(s.modified + s.added + s.deleted + s.clean)
-    forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
+    forget = [f for f in forget
+              if storeutil.standin(f) in repo[None].manifest()]
 
     for f in forget:
-        if lfutil.standin(f) not in repo.dirstate and not \
-                repo.wvfs.isdir(lfutil.standin(f)):
+        if storeutil.standin(f) not in repo.dirstate and not \
+                repo.wvfs.isdir(storeutil.standin(f)):
             ui.warn(_('not removing %s: file is already untracked\n')
                     % m.rel(f))
             bad.append(f)
@@ -1075,14 +1077,14 @@ def cmdutilforget(orig, ui, repo, match,
     # Need to lock because standin files are deleted then removed from the
     # repository and we could race in-between.
     with repo.wlock():
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
+        lfdirstate = storeutil.openlfdirstate(ui, repo)
         for f in forget:
             if lfdirstate[f] == 'a':
                 lfdirstate.drop(f)
             else:
                 lfdirstate.remove(f)
         lfdirstate.write()
-        standins = [lfutil.standin(f) for f in forget]
+        standins = [storeutil.standin(f) for f in forget]
         for f in standins:
             util.unlinkpath(repo.wjoin(f), ignoremissing=True)
         rejected = repo[None].forget(standins)
@@ -1107,7 +1109,7 @@ def _getoutgoings(repo, other, missing, 
         if k not in knowns:
             knowns.add(k)
             lfhashes.add(lfhash)
-    lfutil.getlfilestoupload(repo, missing, dedup)
+    storeutil.getlfilestoupload(repo, missing, dedup)
     if lfhashes:
         lfexists = basestore._openstore(repo, other).exists(lfhashes)
         for fn, lfhash in knowns:
@@ -1142,7 +1144,7 @@ def outgoinghook(ui, repo, other, opts, 
             ui.status(_('largefiles to upload (%d entities):\n')
                       % (len(lfhashes)))
             for file in sorted(toupload):
-                ui.status(lfutil.splitstandin(file) + '\n')
+                ui.status(storeutil.splitstandin(file) + '\n')
                 showhashes(file)
             ui.status('\n')
 
@@ -1186,10 +1188,10 @@ def scmutiladdremove(orig, repo, matcher
                      similarity=None):
     if opts is None:
         opts = {}
-    if not lfutil.islfilesrepo(repo):
+    if not storeutil.islfilesrepo(repo):
         return orig(repo, matcher, prefix, opts, dry_run, similarity)
     # Get the list of missing largefiles so we can remove them
-    lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
+    lfdirstate = storeutil.openlfdirstate(repo.ui, repo)
     unsure, s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [],
                                   False, False, False)
 
@@ -1235,7 +1237,7 @@ def overridepurge(orig, ui, repo, *dirs,
                         clean=False, unknown=False, listsubrepos=False):
         r = oldstatus(node1, node2, match, ignored, clean, unknown,
                       listsubrepos)
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
+        lfdirstate = storeutil.openlfdirstate(ui, repo)
         unknown = [f for f in r.unknown if lfdirstate[f] == '?']
         ignored = [f for f in r.ignored if lfdirstate[f] == '?']
         return scmutil.status(r.modified, r.added, r.removed, r.deleted,
@@ -1247,7 +1249,7 @@ def overriderollback(orig, ui, repo, **o
     with repo.wlock():
         before = repo.dirstate.parents()
         orphans = set(f for f in repo.dirstate
-                      if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
+                      if storeutil.isstandin(f) and repo.dirstate[f] != 'r')
         result = orig(ui, repo, **opts)
         after = repo.dirstate.parents()
         if before == after:
@@ -1255,7 +1257,7 @@ def overriderollback(orig, ui, repo, **o
 
         pctx = repo['.']
         for f in repo.dirstate:
-            if lfutil.isstandin(f):
+            if storeutil.isstandin(f):
                 orphans.discard(f)
                 if repo.dirstate[f] == 'r':
                     repo.wvfs.unlinkpath(f, ignoremissing=True)
@@ -1265,15 +1267,15 @@ def overriderollback(orig, ui, repo, **o
                 else:
                     # content of standin is not so important in 'a',
                     # 'm' or 'n' (coming from the 2nd parent) cases
-                    lfutil.writestandin(repo, f, '', False)
+                    storeutil.writestandin(repo, f, '', False)
         for standin in orphans:
             repo.wvfs.unlinkpath(standin, ignoremissing=True)
 
-        lfdirstate = lfutil.openlfdirstate(ui, repo)
+        lfdirstate = storeutil.openlfdirstate(ui, repo)
         orphans = set(lfdirstate)
-        lfiles = lfutil.listlfiles(repo)
+        lfiles = storeutil.listlfiles(repo)
         for file in lfiles:
-            lfutil.synclfdirstate(repo, lfdirstate, file, True)
+            storeutil.synclfdirstate(repo, lfdirstate, file, True)
             orphans.discard(file)
         for lfile in orphans:
             lfdirstate.drop(lfile)
@@ -1282,7 +1284,7 @@ def overriderollback(orig, ui, repo, **o
 
 def overridetransplant(orig, ui, repo, *revs, **opts):
     resuming = opts.get('continue')
-    repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
+    repo._lfcommithooks.append(storeutil.automatedcommithook(resuming))
     repo._lfstatuswriters.append(lambda *msg, **opts: None)
     try:
         result = orig(ui, repo, *revs, **opts)
@@ -1300,7 +1302,7 @@ def overridecat(orig, ui, repo, file1, *
     def lfmatchfn(f):
         if origmatchfn(f):
             return True
-        lf = lfutil.splitstandin(f)
+        lf = storeutil.splitstandin(f)
         if lf is None:
             return False
         notbad.add(lf)
@@ -1314,12 +1316,12 @@ def overridecat(orig, ui, repo, file1, *
 
     origvisitdirfn = m.visitdir
     def lfvisitdirfn(dir):
-        if dir == lfutil.shortname:
+        if dir == storeutil.shortname:
             return True
         ret = origvisitdirfn(dir)
         if ret:
             return ret
-        lf = lfutil.splitstandin(dir)
+        lf = storeutil.splitstandin(dir)
         if lf is None:
             return False
         return origvisitdirfn(lf)
@@ -1328,7 +1330,7 @@ def overridecat(orig, ui, repo, file1, *
     for f in ctx.walk(m):
         fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
                                  pathname=f)
-        lf = lfutil.splitstandin(f)
+        lf = storeutil.splitstandin(f)
         if lf is None or origmatchfn(f):
             # duplicating unreachable code from commands.cat
             data = ctx[f].data()
@@ -1336,15 +1338,15 @@ def overridecat(orig, ui, repo, file1, *
                 data = repo.wwritedata(f, data)
             fp.write(data)
         else:
-            hash = lfutil.readstandin(repo, lf, ctx.rev())
-            if not lfutil.inusercache(repo.ui, hash):
+            hash = storeutil.readstandin(repo, lf, ctx.rev())
+            if not storeutil.inusercache(repo.ui, hash):
                 store = basestore._openstore(repo)
                 success, missing = store.get([(lf, hash)])
                 if len(success) != 1:
                     raise error.Abort(
                         _('largefile %s is not in cache and could not be '
                           'downloaded')  % lf)
-            path = lfutil.usercachepath(repo.ui, hash)
+            path = storeutil.usercachepath(repo.ui, hash)
             fpin = open(path, "rb")
             for chunk in util.filechunkiter(fpin, 128 * 1024):
                 fp.write(chunk)
@@ -1374,7 +1376,7 @@ def mergeupdate(orig, repo, node, branch
         # (*) don't care
         # (*1) deprecated, but used internally (e.g: "rebase --collapse")
 
-        lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
+        lfdirstate = storeutil.openlfdirstate(repo.ui, repo)
         unsure, s = lfdirstate.status(match_.always(repo.root,
                                                     repo.getcwd()),
                                       [], False, False, False)
@@ -1383,23 +1385,23 @@ def mergeupdate(orig, repo, node, branch
             lfileabs = repo.wvfs.join(lfile)
             if not repo.wvfs.exists(lfileabs):
                 continue
-            lfhash = lfutil.hashrepofile(repo, lfile)
-            standin = lfutil.standin(lfile)
-            lfutil.writestandin(repo, standin, lfhash,
-                                lfutil.getexecutable(lfileabs))
+            lfhash = storeutil.hashrepofile(repo, lfile)
+            standin = storeutil.standin(lfile)
+            storeutil.writestandin(repo, standin, lfhash,
+                                storeutil.getexecutable(lfileabs))
             if (standin in pctx and
-                lfhash == lfutil.readstandin(repo, lfile, '.')):
+                lfhash == storeutil.readstandin(repo, lfile, '.')):
                 lfdirstate.normal(lfile)
         for lfile in s.added:
-            lfutil.updatestandin(repo, lfutil.standin(lfile))
+            storeutil.updatestandin(repo, storeutil.standin(lfile))
         lfdirstate.write()
 
-        oldstandins = lfutil.getstandinsstate(repo)
+        oldstandins = storeutil.getstandinsstate(repo)
 
         result = orig(repo, node, branchmerge, force, *args, **kwargs)
 
-        newstandins = lfutil.getstandinsstate(repo)
-        filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
+        newstandins = storeutil.getstandinsstate(repo)
+        filelist = storeutil.getlfilestoupdate(oldstandins, newstandins)
         if branchmerge or force or partial:
             filelist.extend(s.deleted + s.removed)
 
@@ -1411,7 +1413,8 @@ def mergeupdate(orig, repo, node, branch
 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
     result = orig(repo, files, *args, **kwargs)
 
-    filelist = [lfutil.splitstandin(f) for f in files if lfutil.isstandin(f)]
+    filelist = [storeutil.splitstandin(f)
+                for f in files if storeutil.isstandin(f)]
     if filelist:
         lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
                                 printmessage=False, normallookup=True)
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/proto.py
--- a/hgext/largefiles/proto.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/proto.py	Fri May 06 00:49:55 2016 +0200
@@ -12,7 +12,7 @@ from mercurial.i18n import _
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-import lfutil
+import storeutil
 
 LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
                            '\n\nPlease enable it in your Mercurial config '
@@ -28,17 +28,17 @@ def putlfile(repo, proto, sha):
     and into the user cache.'''
     proto.redirect()
 
-    path = lfutil.storepath(repo, sha)
+    path = storeutil.storepath(repo, sha)
     util.makedirs(os.path.dirname(path))
     tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
 
     try:
         proto.getfile(tmpfp)
         tmpfp._fp.seek(0)
-        if sha != lfutil.hexsha1(tmpfp._fp):
+        if sha != storeutil.hexsha1(tmpfp._fp):
             raise IOError(0, _('largefile contents do not match hash'))
         tmpfp.close()
-        lfutil.linktousercache(repo, sha)
+        storeutil.linktousercache(repo, sha)
     except IOError as e:
         repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') %
                      (sha, e.strerror))
@@ -51,7 +51,7 @@ def putlfile(repo, proto, sha):
 def getlfile(repo, proto, sha):
     '''Server command for retrieving a largefile from the repository-local
     cache or user cache.'''
-    filename = lfutil.findfile(repo, sha)
+    filename = storeutil.findfile(repo, sha)
     if not filename:
         raise error.Abort(_('requested largefile %s not present in cache')
                           % sha)
@@ -76,7 +76,7 @@ def statlfile(repo, proto, sha):
     The value 1 is reserved for mismatched checksum, but that is too expensive
     to be verified on every stat and must be caught be running 'hg verify'
     server side.'''
-    filename = lfutil.findfile(repo, sha)
+    filename = storeutil.findfile(repo, sha)
     if not filename:
         return '2\n'
     return '0\n'
@@ -159,7 +159,7 @@ def capabilities(repo, proto):
 def heads(repo, proto):
     '''Wrap server command - largefile capable clients will know to call
     lheads instead'''
-    if lfutil.islfilesrepo(repo):
+    if storeutil.islfilesrepo(repo):
         return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
     return wireproto.heads(repo, proto)
 
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/remotestore.py
--- a/hgext/largefiles/remotestore.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/remotestore.py	Fri May 06 00:49:55 2016 +0200
@@ -12,7 +12,7 @@ from mercurial.i18n import _
 urlerr = util.urlerr
 urlreq = util.urlreq
 
-import lfutil
+import storeutil
 import basestore
 
 class remotestore(basestore.basestore):
@@ -37,7 +37,7 @@ class remotestore(basestore.basestore):
         self.ui.debug('remotestore: sendfile(%s, %s)\n' % (filename, hash))
         fd = None
         try:
-            fd = lfutil.httpsendfile(self.ui, filename)
+            fd = storeutil.httpsendfile(self.ui, filename)
             return self._put(hash, fd)
         except IOError as e:
             raise error.Abort(
@@ -63,7 +63,7 @@ class remotestore(basestore.basestore):
         except IOError as e:
             raise basestore.StoreError(filename, hash, self.url, str(e))
 
-        return lfutil.copyandhash(chunks, tmpfile)
+        return storeutil.copyandhash(chunks, tmpfile)
 
     def _verifyfiles(self, contents, filestocheck):
         failed = False
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/reposetup.py
--- a/hgext/largefiles/reposetup.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/reposetup.py	Fri May 06 00:49:55 2016 +0200
@@ -14,7 +14,7 @@ from mercurial.i18n import _
 from mercurial import scmutil, localrepo
 
 import lfcommands
-import lfutil
+import storeutil
 
 def reposetup(ui, repo):
     # wire repositories should be given new wireproto functions
@@ -40,14 +40,15 @@ def reposetup(ui, repo):
                 class lfilesctx(ctx.__class__):
                     def files(self):
                         filenames = super(lfilesctx, self).files()
-                        return [lfutil.splitstandin(f) or f for f in filenames]
+                        return [storeutil.splitstandin(f) or f
+                                for f in filenames]
                     def manifest(self):
                         man1 = super(lfilesctx, self).manifest()
                         class lfilesmanifest(man1.__class__):
                             def __contains__(self, filename):
                                 orig = super(lfilesmanifest, self).__contains__
                                 return (orig(filename) or
-                                        orig(lfutil.standin(filename)))
+                                        orig(storeutil.standin(filename)))
                         man1.__class__ = lfilesmanifest
                         return man1
                     def filectx(self, path, fileid=None, filelog=None):
@@ -61,10 +62,10 @@ def reposetup(ui, repo):
                             # Adding a null character will cause Mercurial to
                             # identify this as a binary file.
                             if filelog is not None:
-                                result = orig(lfutil.standin(path), fileid,
+                                result = orig(storeutil.standin(path), fileid,
                                               filelog)
                             else:
-                                result = orig(lfutil.standin(path), fileid)
+                                result = orig(storeutil.standin(path), fileid)
                             olddata = result.data
                             result.data = lambda: olddata() + '\0'
                         return result
@@ -109,7 +110,7 @@ def reposetup(ui, repo):
                 # command line.  If there were, and they don't match any
                 # largefiles, we should just bail here and let super
                 # handle it -- thus gaining a big performance boost.
-                lfdirstate = lfutil.openlfdirstate(ui, self)
+                lfdirstate = storeutil.openlfdirstate(ui, self)
                 if not match.always():
                     for f in lfdirstate:
                         if match(f):
@@ -126,7 +127,7 @@ def reposetup(ui, repo):
                     newfiles = []
                     dirstate = self.dirstate
                     for f in files:
-                        sf = lfutil.standin(f)
+                        sf = storeutil.standin(f)
                         if sf in dirstate:
                             newfiles.append(sf)
                         elif sf in dirstate.dirs():
@@ -145,7 +146,7 @@ def reposetup(ui, repo):
                 if working:
 
                     def sfindirstate(f):
-                        sf = lfutil.standin(f)
+                        sf = storeutil.standin(f)
                         dirstate = self.dirstate
                         return sf in dirstate or sf in dirstate.dirs()
 
@@ -159,12 +160,12 @@ def reposetup(ui, repo):
                                                          s.removed, s.clean)
                     if parentworking:
                         for lfile in unsure:
-                            standin = lfutil.standin(lfile)
+                            standin = storeutil.standin(lfile)
                             if standin not in ctx1:
                                 # from second parent
                                 modified.append(lfile)
                             elif ctx1[standin].data().strip() \
-                                    != lfutil.hashfile(self.wjoin(lfile)):
+                                    != storeutil.hashfile(self.wjoin(lfile)):
                                 modified.append(lfile)
                             else:
                                 if listclean:
@@ -176,14 +177,14 @@ def reposetup(ui, repo):
                         checkexec = self.dirstate._checkexec
 
                         for lfile in tocheck:
-                            standin = lfutil.standin(lfile)
+                            standin = storeutil.standin(lfile)
                             if standin in ctx1:
                                 abslfile = self.wjoin(lfile)
                                 if ((ctx1[standin].data().strip() !=
-                                     lfutil.hashfile(abslfile)) or
+                                     storeutil.hashfile(abslfile)) or
                                     (checkexec and
                                      ('x' in ctx1.flags(standin)) !=
-                                     bool(lfutil.getexecutable(abslfile)))):
+                                     bool(storeutil.getexecutable(abslfile)))):
                                     modified.append(lfile)
                                 elif listclean:
                                     clean.append(lfile)
@@ -195,12 +196,12 @@ def reposetup(ui, repo):
                         # then, largefiles not managed also in the target
                         # context should be excluded from 'removed'.
                         removed = [lfile for lfile in removed
-                                   if lfutil.standin(lfile) in ctx1]
+                                   if storeutil.standin(lfile) in ctx1]
 
                     # Standins no longer found in lfdirstate has been
                     # removed
-                    for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
-                        lfile = lfutil.splitstandin(standin)
+                    for standin in ctx1.walk(storeutil.getstandinmatcher(self)):
+                        lfile = storeutil.splitstandin(standin)
                         if not match(lfile):
                             continue
                         if lfile not in lfdirstate:
@@ -226,14 +227,14 @@ def reposetup(ui, repo):
                     result[5] = set(result[5]).difference(lfiles)
                     # combine normal files and largefiles
                     normals = [[fn for fn in filelist
-                                if not lfutil.isstandin(fn)]
+                                if not storeutil.isstandin(fn)]
                                for filelist in result]
                     lfstatus = (modified, added, removed, s.deleted, [], [],
                                 clean)
                     result = [sorted(list1 + list2)
                               for (list1, list2) in zip(normals, lfstatus)]
                 else: # not against working directory
-                    result = [[lfutil.splitstandin(f) or f for f in items]
+                    result = [[storeutil.splitstandin(f) or f for f in items]
                               for items in result]
 
                 if wlock:
@@ -251,7 +252,7 @@ def reposetup(ui, repo):
             class lfilesctx(ctx.__class__):
                 def markcommitted(self, node):
                     orig = super(lfilesctx, self).markcommitted
-                    return lfutil.markcommitted(orig, self, node)
+                    return storeutil.markcommitted(orig, self, node)
             ctx.__class__ = lfilesctx
             return node
 
@@ -280,8 +281,8 @@ def reposetup(ui, repo):
             return super(lfilesrepo, self).push(remote, force=force, revs=revs,
                 newbranch=newbranch)
 
-        # TODO: _subdirlfs should be moved into "lfutil.py", because
-        # it is referred only from "lfutil.updatestandinsbymatch"
+        # TODO: _subdirlfs should be moved into "storeutil.py", because
+        # it is referred only from "storeutil.updatestandinsbymatch"
         def _subdirlfs(self, files, lfiles):
             '''
             Adjust matched file list
@@ -299,7 +300,7 @@ def reposetup(ui, repo):
             regulars = []
 
             for f in files:
-                if lfutil.isstandin(f + '/'):
+                if storeutil.isstandin(f + '/'):
                     raise error.Abort(
                         _('file "%s" is a largefile standin') % f,
                         hint=('commit the largefile itself instead'))
@@ -345,7 +346,7 @@ def reposetup(ui, repo):
 
     # stack of hooks being executed before committing.
     # only last element ("_lfcommithooks[-1]") is used for each committing.
-    repo._lfcommithooks = [lfutil.updatestandinsbymatch]
+    repo._lfcommithooks = [storeutil.updatestandinsbymatch]
 
     # Stack of status writer functions taking "*msg, **opts" arguments
     # like "ui.status()". Only last element ("_lfstatuswriters[-1]")
@@ -360,14 +361,15 @@ def reposetup(ui, repo):
         if lfrevs:
             toupload = set()
             addfunc = lambda fn, lfhash: toupload.add(lfhash)
-            lfutil.getlfilestoupload(pushop.repo, lfrevs,
+            storeutil.getlfilestoupload(pushop.repo, lfrevs,
                                      addfunc)
             lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
     repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
 
     def checkrequireslfiles(ui, repo, **kwargs):
         if 'largefiles' not in repo.requirements and any(
-                lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
+                storeutil.shortname+'/' in f[0]
+                for f in repo.store.datafiles()):
             repo.requirements.add('largefiles')
             repo._writerequirements()
 
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/storeutil.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hgext/largefiles/storeutil.py	Fri May 06 00:49:55 2016 +0200
@@ -0,0 +1,653 @@
+# Copyright 2009-2010 Gregory P. Ward
+# Copyright 2009-2010 Intelerad Medical Systems Incorporated
+# Copyright 2010-2011 Fog Creek Software
+# Copyright 2010-2011 Unity Technologies
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''store utility code: must not import other modules in this package.'''
+
+import os
+import platform
+import stat
+import copy
+
+from mercurial import dirstate, httpconnection, match as match_, util, scmutil
+from mercurial.i18n import _
+from mercurial import node, error
+
+shortname = '.hglf'
+shortnameslash = shortname + '/'
+longname = 'largefiles'
+
+# -- Private worker functions ------------------------------------------
+
+def getminsize(ui, assumelfiles, opt, default=10):
+    lfsize = opt
+    if not lfsize and assumelfiles:
+        lfsize = ui.config(longname, 'minsize', default=default)
+    if lfsize:
+        try:
+            lfsize = float(lfsize)
+        except ValueError:
+            raise error.Abort(_('largefiles: size must be number (not %s)\n')
+                             % lfsize)
+    if lfsize is None:
+        raise error.Abort(_('minimum size for largefiles must be specified'))
+    return lfsize
+
+def link(src, dest):
+    """Try to create hardlink - if that fails, efficiently make a copy."""
+    util.makedirs(os.path.dirname(dest))
+    try:
+        util.oslink(src, dest)
+    except OSError:
+        # if hardlinks fail, fallback on atomic copy
+        dst = util.atomictempfile(dest)
+        for chunk in util.filechunkiter(open(src, 'rb')):
+            dst.write(chunk)
+        dst.close()
+        os.chmod(dest, os.stat(src).st_mode)
+
+def usercachepath(ui, hash):
+    '''Return the correct location in the "global" largefiles cache for a file
+    with the given hash.
+    This cache is used for sharing of largefiles across repositories - both
+    to preserve download bandwidth and storage space.'''
+    return os.path.join(_usercachedir(ui), hash)
+
+def _usercachedir(ui):
+    '''Return the location of the "global" largefiles cache.'''
+    path = ui.configpath(longname, 'usercache', None)
+    if path:
+        return path
+    if os.name == 'nt':
+        appdata = os.getenv('LOCALAPPDATA', os.getenv('APPDATA'))
+        if appdata:
+            return os.path.join(appdata, longname)
+    elif platform.system() == 'Darwin':
+        home = os.getenv('HOME')
+        if home:
+            return os.path.join(home, 'Library', 'Caches', longname)
+    elif os.name == 'posix':
+        path = os.getenv('XDG_CACHE_HOME')
+        if path:
+            return os.path.join(path, longname)
+        home = os.getenv('HOME')
+        if home:
+            return os.path.join(home, '.cache', longname)
+    else:
+        raise error.Abort(_('unknown operating system: %s\n') % os.name)
+    raise error.Abort(_('unknown %s usercache location\n') % longname)
+
+def inusercache(ui, hash):
+    path = usercachepath(ui, hash)
+    return os.path.exists(path)
+
+def findfile(repo, hash):
+    '''Return store path of the largefile with the specified hash.
+    As a side effect, the file might be linked from user cache.
+    Return None if the file can't be found locally.'''
+    path, exists = findstorepath(repo, hash)
+    if exists:
+        repo.ui.note(_('found %s in store\n') % hash)
+        return path
+    elif inusercache(repo.ui, hash):
+        repo.ui.note(_('found %s in system cache\n') % hash)
+        path = storepath(repo, hash)
+        link(usercachepath(repo.ui, hash), path)
+        return path
+    return None
+
+class largefilesdirstate(dirstate.dirstate):
+    def __getitem__(self, key):
+        return super(largefilesdirstate, self).__getitem__(unixpath(key))
+    def normal(self, f):
+        return super(largefilesdirstate, self).normal(unixpath(f))
+    def remove(self, f):
+        return super(largefilesdirstate, self).remove(unixpath(f))
+    def add(self, f):
+        return super(largefilesdirstate, self).add(unixpath(f))
+    def drop(self, f):
+        return super(largefilesdirstate, self).drop(unixpath(f))
+    def forget(self, f):
+        return super(largefilesdirstate, self).forget(unixpath(f))
+    def normallookup(self, f):
+        return super(largefilesdirstate, self).normallookup(unixpath(f))
+    def _ignore(self, f):
+        return False
+    def write(self, tr=False):
+        # (1) disable PENDING mode always
+        #     (lfdirstate isn't yet managed as a part of the transaction)
+        # (2) avoid develwarn 'use dirstate.write with ....'
+        super(largefilesdirstate, self).write(None)
+
+def openlfdirstate(ui, repo, create=True):
+    '''
+    Return a dirstate object that tracks largefiles: i.e. its root is
+    the repo root, but it is saved in .hg/largefiles/dirstate.
+    '''
+    vfs = repo.vfs
+    lfstoredir = longname
+    opener = scmutil.opener(vfs.join(lfstoredir))
+    lfdirstate = largefilesdirstate(opener, ui, repo.root,
+                                     repo.dirstate._validate)
+
+    # If the largefiles dirstate does not exist, populate and create
+    # it. This ensures that we create it on the first meaningful
+    # largefiles operation in a new clone.
+    if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
+        matcher = getstandinmatcher(repo)
+        standins = repo.dirstate.walk(matcher, [], False, False)
+
+        if len(standins) > 0:
+            vfs.makedirs(lfstoredir)
+
+        for standin in standins:
+            lfile = splitstandin(standin)
+            lfdirstate.normallookup(lfile)
+    return lfdirstate
+
+def lfdirstatestatus(lfdirstate, repo):
+    wctx = repo['.']
+    match = match_.always(repo.root, repo.getcwd())
+    unsure, s = lfdirstate.status(match, [], False, False, False)
+    modified, clean = s.modified, s.clean
+    for lfile in unsure:
+        try:
+            fctx = wctx[standin(lfile)]
+        except LookupError:
+            fctx = None
+        if not fctx or fctx.data().strip() != hashfile(repo.wjoin(lfile)):
+            modified.append(lfile)
+        else:
+            clean.append(lfile)
+            lfdirstate.normal(lfile)
+    return s
+
+def listlfiles(repo, rev=None, matcher=None):
+    '''return a list of largefiles in the working copy or the
+    specified changeset'''
+
+    if matcher is None:
+        matcher = getstandinmatcher(repo)
+
+    # ignore unknown files in working directory
+    return [splitstandin(f)
+            for f in repo[rev].walk(matcher)
+            if rev is not None or repo.dirstate[f] != '?']
+
+def instore(repo, hash, forcelocal=False):
+    '''Return true if a largefile with the given hash exists in the user
+    cache.'''
+    return os.path.exists(storepath(repo, hash, forcelocal))
+
+def storepath(repo, hash, forcelocal=False):
+    '''Return the correct location in the repository largefiles cache for a
+    file with the given hash.'''
+    if not forcelocal and repo.shared():
+        return repo.vfs.reljoin(repo.sharedpath, longname, hash)
+    return repo.join(longname, hash)
+
+def findstorepath(repo, hash):
+    '''Search through the local store path(s) to find the file for the given
+    hash.  If the file is not found, its path in the primary store is returned.
+    The return value is a tuple of (path, exists(path)).
+    '''
+    # For shared repos, the primary store is in the share source.  But for
+    # backward compatibility, force a lookup in the local store if it wasn't
+    # found in the share source.
+    path = storepath(repo, hash, False)
+
+    if instore(repo, hash):
+        return (path, True)
+    elif repo.shared() and instore(repo, hash, True):
+        return storepath(repo, hash, True)
+
+    return (path, False)
+
+def copyfromcache(repo, hash, filename):
+    '''Copy the specified largefile from the repo or system cache to
+    filename in the repository. Return true on success or false if the
+    file was not found in either cache (which should not happened:
+    this is meant to be called only after ensuring that the needed
+    largefile exists in the cache).'''
+    wvfs = repo.wvfs
+    path = findfile(repo, hash)
+    if path is None:
+        return False
+    wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
+    # The write may fail before the file is fully written, but we
+    # don't use atomic writes in the working copy.
+    with open(path, 'rb') as srcfd:
+        with wvfs(filename, 'wb') as destfd:
+            gothash = copyandhash(srcfd, destfd)
+    if gothash != hash:
+        repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
+                     % (filename, path, gothash))
+        wvfs.unlink(filename)
+        return False
+    return True
+
+def copytostore(repo, rev, file, uploaded=False):
+    wvfs = repo.wvfs
+    hash = readstandin(repo, file, rev)
+    if instore(repo, hash):
+        return
+    if wvfs.exists(file):
+        copytostoreabsolute(repo, wvfs.join(file), hash)
+    else:
+        repo.ui.warn(_("%s: largefile %s not available from local store\n") %
+                     (file, hash))
+
+def copyalltostore(repo, node):
+    '''Copy all largefiles in a given revision to the store'''
+
+    ctx = repo[node]
+    for filename in ctx.files():
+        if isstandin(filename) and filename in ctx.manifest():
+            realfile = splitstandin(filename)
+            copytostore(repo, ctx.node(), realfile)
+
+def copytostoreabsolute(repo, file, hash):
+    if inusercache(repo.ui, hash):
+        link(usercachepath(repo.ui, hash), storepath(repo, hash))
+    else:
+        util.makedirs(os.path.dirname(storepath(repo, hash)))
+        dst = util.atomictempfile(storepath(repo, hash),
+                                  createmode=repo.store.createmode)
+        for chunk in util.filechunkiter(open(file, 'rb')):
+            dst.write(chunk)
+        dst.close()
+        linktousercache(repo, hash)
+
+def linktousercache(repo, hash):
+    '''Link / copy the largefile with the specified hash from the store
+    to the cache.'''
+    path = usercachepath(repo.ui, hash)
+    link(storepath(repo, hash), path)
+
+def getstandinmatcher(repo, rmatcher=None):
+    '''Return a match object that applies rmatcher to the standin directory'''
+    wvfs = repo.wvfs
+    standindir = shortname
+
+    # no warnings about missing files or directories
+    badfn = lambda f, msg: None
+
+    if rmatcher and not rmatcher.always():
+        pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
+        if not pats:
+            pats = [wvfs.join(standindir)]
+        match = scmutil.match(repo[None], pats, badfn=badfn)
+        # if pats is empty, it would incorrectly always match, so clear _always
+        match._always = False
+    else:
+        # no patterns: relative to repo root
+        match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
+    return match
+
+def composestandinmatcher(repo, rmatcher):
+    '''Return a matcher that accepts standins corresponding to the
+    files accepted by rmatcher. Pass the list of files in the matcher
+    as the paths specified by the user.'''
+    smatcher = getstandinmatcher(repo, rmatcher)
+    isstandin = smatcher.matchfn
+    def composedmatchfn(f):
+        return isstandin(f) and rmatcher.matchfn(splitstandin(f))
+    smatcher.matchfn = composedmatchfn
+
+    return smatcher
+
+def standin(filename):
+    '''Return the repo-relative path to the standin for the specified big
+    file.'''
+    # Notes:
+    # 1) Some callers want an absolute path, but for instance addlargefiles
+    #    needs it repo-relative so it can be passed to repo[None].add().  So
+    #    leave it up to the caller to use repo.wjoin() to get an absolute path.
+    # 2) Join with '/' because that's what dirstate always uses, even on
+    #    Windows. Change existing separator to '/' first in case we are
+    #    passed filenames from an external source (like the command line).
+    return shortnameslash + util.pconvert(filename)
+
+def isstandin(filename):
+    '''Return true if filename is a big file standin. filename must be
+    in Mercurial's internal form (slash-separated).'''
+    return filename.startswith(shortnameslash)
+
+def splitstandin(filename):
+    # Split on / because that's what dirstate always uses, even on Windows.
+    # Change local separator to / first just in case we are passed filenames
+    # from an external source (like the command line).
+    bits = util.pconvert(filename).split('/', 1)
+    if len(bits) == 2 and bits[0] == shortname:
+        return bits[1]
+    else:
+        return None
+
+def updatestandin(repo, standin):
+    file = repo.wjoin(splitstandin(standin))
+    if repo.wvfs.exists(splitstandin(standin)):
+        hash = hashfile(file)
+        executable = getexecutable(file)
+        writestandin(repo, standin, hash, executable)
+    else:
+        raise error.Abort(_('%s: file not found!') % splitstandin(standin))
+
+def readstandin(repo, filename, node=None):
+    '''read hex hash from standin for filename at given node, or working
+    directory if no node is given'''
+    return repo[node][standin(filename)].data().strip()
+
+def writestandin(repo, standin, hash, executable):
+    '''write hash to <repo.root>/<standin>'''
+    repo.wwrite(standin, hash + '\n', executable and 'x' or '')
+
+def copyandhash(instream, outfile):
+    '''Read bytes from instream (iterable) and write them to outfile,
+    computing the SHA-1 hash of the data along the way. Return the hash.'''
+    hasher = util.sha1('')
+    for data in instream:
+        hasher.update(data)
+        outfile.write(data)
+    return hasher.hexdigest()
+
+def hashrepofile(repo, file):
+    return hashfile(repo.wjoin(file))
+
+def hashfile(file):
+    if not os.path.exists(file):
+        return ''
+    hasher = util.sha1('')
+    fd = open(file, 'rb')
+    for data in util.filechunkiter(fd, 128 * 1024):
+        hasher.update(data)
+    fd.close()
+    return hasher.hexdigest()
+
+def getexecutable(filename):
+    mode = os.stat(filename).st_mode
+    return ((mode & stat.S_IXUSR) and
+            (mode & stat.S_IXGRP) and
+            (mode & stat.S_IXOTH))
+
+def urljoin(first, second, *arg):
+    def join(left, right):
+        if not left.endswith('/'):
+            left += '/'
+        if right.startswith('/'):
+            right = right[1:]
+        return left + right
+
+    url = join(first, second)
+    for a in arg:
+        url = join(url, a)
+    return url
+
+def hexsha1(data):
+    """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
+    object data"""
+    h = util.sha1()
+    for chunk in util.filechunkiter(data):
+        h.update(chunk)
+    return h.hexdigest()
+
+def httpsendfile(ui, filename):
+    return httpconnection.httpsendfile(ui, filename, 'rb')
+
+def unixpath(path):
+    '''Return a version of path normalized for use with the lfdirstate.'''
+    return util.pconvert(os.path.normpath(path))
+
+def islfilesrepo(repo):
+    '''Return true if the repo is a largefile repo.'''
+    if ('largefiles' in repo.requirements and
+            any(shortnameslash in f[0] for f in repo.store.datafiles())):
+        return True
+
+    return any(openlfdirstate(repo.ui, repo, False))
+
+class storeprotonotcapable(Exception):
+    def __init__(self, storetypes):
+        self.storetypes = storetypes
+
+def getstandinsstate(repo):
+    standins = []
+    matcher = getstandinmatcher(repo)
+    for standin in repo.dirstate.walk(matcher, [], False, False):
+        lfile = splitstandin(standin)
+        try:
+            hash = readstandin(repo, lfile)
+        except IOError:
+            hash = None
+        standins.append((lfile, hash))
+    return standins
+
+def synclfdirstate(repo, lfdirstate, lfile, normallookup):
+    lfstandin = standin(lfile)
+    if lfstandin in repo.dirstate:
+        stat = repo.dirstate._map[lfstandin]
+        state, mtime = stat[0], stat[3]
+    else:
+        state, mtime = '?', -1
+    if state == 'n':
+        if (normallookup or mtime < 0 or
+            not repo.wvfs.exists(lfile)):
+            # state 'n' doesn't ensure 'clean' in this case
+            lfdirstate.normallookup(lfile)
+        else:
+            lfdirstate.normal(lfile)
+    elif state == 'm':
+        lfdirstate.normallookup(lfile)
+    elif state == 'r':
+        lfdirstate.remove(lfile)
+    elif state == 'a':
+        lfdirstate.add(lfile)
+    elif state == '?':
+        lfdirstate.drop(lfile)
+
+def markcommitted(orig, ctx, node):
+    repo = ctx.repo()
+
+    orig(node)
+
+    # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
+    # because files coming from the 2nd parent are omitted in the latter.
+    #
+    # The former should be used to get targets of "synclfdirstate",
+    # because such files:
+    # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
+    # - have to be marked as "n" after commit, but
+    # - aren't listed in "repo[node].files()"
+
+    lfdirstate = openlfdirstate(repo.ui, repo)
+    for f in ctx.files():
+        if isstandin(f):
+            lfile = splitstandin(f)
+            synclfdirstate(repo, lfdirstate, lfile, False)
+    lfdirstate.write()
+
+    # As part of committing, copy all of the largefiles into the cache.
+    copyalltostore(repo, node)
+
+def getlfilestoupdate(oldstandins, newstandins):
+    changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
+    filelist = []
+    for f in changedstandins:
+        if f[0] not in filelist:
+            filelist.append(f[0])
+    return filelist
+
+def getlfilestoupload(repo, missing, addfunc):
+    for i, n in enumerate(missing):
+        repo.ui.progress(_('finding outgoing largefiles'), i,
+            unit=_('revisions'), total=len(missing))
+        parents = [p for p in repo[n].parents() if p != node.nullid]
+
+        oldlfstatus = repo.lfstatus
+        repo.lfstatus = False
+        try:
+            ctx = repo[n]
+        finally:
+            repo.lfstatus = oldlfstatus
+
+        files = set(ctx.files())
+        if len(parents) == 2:
+            mc = ctx.manifest()
+            mp1 = ctx.parents()[0].manifest()
+            mp2 = ctx.parents()[1].manifest()
+            for f in mp1:
+                if f not in mc:
+                    files.add(f)
+            for f in mp2:
+                if f not in mc:
+                    files.add(f)
+            for f in mc:
+                if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
+                    files.add(f)
+        for fn in files:
+            if isstandin(fn) and fn in ctx:
+                addfunc(fn, ctx[fn].data().strip())
+    repo.ui.progress(_('finding outgoing largefiles'), None)
+
+def updatestandinsbymatch(repo, match):
+    '''Update standins in the working directory according to specified match
+
+    This returns (possibly modified) ``match`` object to be used for
+    subsequent commit process.
+    '''
+
+    ui = repo.ui
+
+    # Case 1: user calls commit with no specific files or
+    # include/exclude patterns: refresh and commit all files that
+    # are "dirty".
+    if match is None or match.always():
+        # Spend a bit of time here to get a list of files we know
+        # are modified so we can compare only against those.
+        # It can cost a lot of time (several seconds)
+        # otherwise to update all standins if the largefiles are
+        # large.
+        lfdirstate = openlfdirstate(ui, repo)
+        dirtymatch = match_.always(repo.root, repo.getcwd())
+        unsure, s = lfdirstate.status(dirtymatch, [], False, False,
+                                      False)
+        modifiedfiles = unsure + s.modified + s.added + s.removed
+        lfiles = listlfiles(repo)
+        # this only loops through largefiles that exist (not
+        # removed/renamed)
+        for lfile in lfiles:
+            if lfile in modifiedfiles:
+                if repo.wvfs.exists(standin(lfile)):
+                    # this handles the case where a rebase is being
+                    # performed and the working copy is not updated
+                    # yet.
+                    if repo.wvfs.exists(lfile):
+                        updatestandin(repo,
+                            standin(lfile))
+
+        return match
+
+    lfiles = listlfiles(repo)
+    match._files = repo._subdirlfs(match.files(), lfiles)
+
+    # Case 2: user calls commit with specified patterns: refresh
+    # any matching big files.
+    smatcher = composestandinmatcher(repo, match)
+    standins = repo.dirstate.walk(smatcher, [], False, False)
+
+    # No matching big files: get out of the way and pass control to
+    # the usual commit() method.
+    if not standins:
+        return match
+
+    # Refresh all matching big files.  It's possible that the
+    # commit will end up failing, in which case the big files will
+    # stay refreshed.  No harm done: the user modified them and
+    # asked to commit them, so sooner or later we're going to
+    # refresh the standins.  Might as well leave them refreshed.
+    lfdirstate = openlfdirstate(ui, repo)
+    for fstandin in standins:
+        lfile = splitstandin(fstandin)
+        if lfdirstate[lfile] != 'r':
+            updatestandin(repo, fstandin)
+
+    # Cook up a new matcher that only matches regular files or
+    # standins corresponding to the big files requested by the
+    # user.  Have to modify _files to prevent commit() from
+    # complaining "not tracked" for big files.
+    match = copy.copy(match)
+    origmatchfn = match.matchfn
+
+    # Check both the list of largefiles and the list of
+    # standins because if a largefile was removed, it
+    # won't be in the list of largefiles at this point
+    match._files += sorted(standins)
+
+    actualfiles = []
+    for f in match._files:
+        fstandin = standin(f)
+
+        # For largefiles, only one of the normal and standin should be
+        # committed (except if one of them is a remove).  In the case of a
+        # standin removal, drop the normal file if it is unknown to dirstate.
+        # Thus, skip plain largefile names but keep the standin.
+        if f in lfiles or fstandin in standins:
+            if repo.dirstate[fstandin] != 'r':
+                if repo.dirstate[f] != 'r':
+                    continue
+            elif repo.dirstate[f] == '?':
+                continue
+
+        actualfiles.append(f)
+    match._files = actualfiles
+
+    def matchfn(f):
+        if origmatchfn(f):
+            return f not in lfiles
+        else:
+            return f in standins
+
+    match.matchfn = matchfn
+
+    return match
+
+class automatedcommithook(object):
+    '''Stateful hook to update standins at the 1st commit of resuming
+
+    For efficiency, updating standins in the working directory should
+    be avoided while automated committing (like rebase, transplant and
+    so on), because they should be updated before committing.
+
+    But the 1st commit of resuming automated committing (e.g. ``rebase
+    --continue``) should update them, because largefiles may be
+    modified manually.
+    '''
+    def __init__(self, resuming):
+        self.resuming = resuming
+
+    def __call__(self, repo, match):
+        if self.resuming:
+            self.resuming = False # avoids updating at subsequent commits
+            return updatestandinsbymatch(repo, match)
+        else:
+            return match
+
+def getstatuswriter(ui, repo, forcibly=None):
+    '''Return the function to write largefiles specific status out
+
+    If ``forcibly`` is ``None``, this returns the last element of
+    ``repo._lfstatuswriters`` as "default" writer function.
+
+    Otherwise, this returns the function to always write out (or
+    ignore if ``not forcibly``) status.
+    '''
+    if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
+        return repo._lfstatuswriters[-1]
+    else:
+        if forcibly:
+            return ui.status # forcibly WRITE OUT
+        else:
+            return lambda *msg, **opts: None # forcibly IGNORE
diff -r c641b8dfb98c -r 6606554248ea hgext/largefiles/wirestore.py
--- a/hgext/largefiles/wirestore.py	Wed May 11 01:46:11 2016 +0000
+++ b/hgext/largefiles/wirestore.py	Fri May 06 00:49:55 2016 +0200
@@ -5,17 +5,17 @@
 
 '''largefile store working over Mercurial's wire protocol'''
 
-import lfutil
+import storeutil
 import remotestore
 
 class wirestore(remotestore.remotestore):
     def __init__(self, ui, repo, remote):
         cap = remote.capable('largefiles')
         if not cap:
-            raise lfutil.storeprotonotcapable([])
+            raise storeutil.storeprotonotcapable([])
         storetypes = cap.split(',')
         if 'serve' not in storetypes:
-            raise lfutil.storeprotonotcapable(storetypes)
+            raise storeutil.storeprotonotcapable(storetypes)
         self.remote = remote
         super(wirestore, self).__init__(ui, repo, remote.url())
 
diff -r c641b8dfb98c -r 6606554248ea tests/test-check-py3-compat.t
--- a/tests/test-check-py3-compat.t	Wed May 11 01:46:11 2016 +0000
+++ b/tests/test-check-py3-compat.t	Fri May 06 00:49:55 2016 +0200
@@ -13,12 +13,12 @@
   hgext/largefiles/__init__.py not using absolute_import
   hgext/largefiles/basestore.py not using absolute_import
   hgext/largefiles/lfcommands.py not using absolute_import
-  hgext/largefiles/lfutil.py not using absolute_import
   hgext/largefiles/localstore.py not using absolute_import
   hgext/largefiles/overrides.py not using absolute_import
   hgext/largefiles/proto.py not using absolute_import
   hgext/largefiles/remotestore.py not using absolute_import
   hgext/largefiles/reposetup.py not using absolute_import
+  hgext/largefiles/storeutil.py not using absolute_import
   hgext/largefiles/uisetup.py not using absolute_import
   hgext/largefiles/wirestore.py not using absolute_import
   hgext/share.py not using absolute_import
@@ -74,14 +74,14 @@
   hgext/keyword.py: error importing: <ImportError> No module named 'BaseHTTPServer' (error at common.py:*) (glob)
   hgext/largefiles/basestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
   hgext/largefiles/lfcommands.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
-  hgext/largefiles/lfutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
-  hgext/largefiles/localstore.py: error importing module: <ImportError> No module named 'lfutil' (line *) (glob)
+  hgext/largefiles/localstore.py: error importing module: <ImportError> No module named 'storeutil' (line *) (glob)
   hgext/largefiles/overrides.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at bundlerepo.py:*) (glob)
   hgext/largefiles/proto.py: error importing: <ImportError> No module named 'httplib' (error at httppeer.py:*) (glob)
   hgext/largefiles/remotestore.py: error importing: <SyntaxError> invalid syntax (bundle*.py, line *) (error at wireproto.py:*) (glob)
   hgext/largefiles/reposetup.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
+  hgext/largefiles/storeutil.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
   hgext/largefiles/uisetup.py: error importing module: <SyntaxError> invalid syntax (archival.py, line *) (line *) (glob)
-  hgext/largefiles/wirestore.py: error importing module: <ImportError> No module named 'lfutil' (line *) (glob)
+  hgext/largefiles/wirestore.py: error importing module: <ImportError> No module named 'storeutil' (line *) (glob)
   hgext/mq.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
   hgext/notify.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
   hgext/pager.py: error importing: <AttributeError> 'dict' object has no attribute 'iteritems' (error at revset.py:*) (glob)
diff -r c641b8dfb98c -r 6606554248ea tests/test-largefiles-small-disk.t
--- a/tests/test-largefiles-small-disk.t	Wed May 11 01:46:11 2016 +0000
+++ b/tests/test-largefiles-small-disk.t	Fri May 06 00:49:55 2016 +0200
@@ -46,7 +46,7 @@ Make the commit with space on the device
 
   $ hg commit -m big
 
-Now make a clone with a full disk, and make sure lfutil.link function
+Now make a clone with a full disk, and make sure storeutil.link function
 makes copies instead of hardlinks:
 
   $ cd ..


More information about the Mercurial-devel mailing list