[PATCH evolve-ext v2] py3: broad pass for python3 compatibility
Pierre-Yves David
pierre-yves.david at ens-lyon.org
Tue Jul 2 04:42:49 EDT 2019
This patch don't apply anymore :-/ It is also very invasive and will be
painful for all other contributors.
Do you think we could get to a point where no manual tweaking is needed
? (could be simply adding dedicated comment to line that need to be
skipped). If we have a script only approach we could use the
format-source extensions for that.
On 7/1/19 7:39 AM, Ludovic Chabant wrote:
> # HG changeset patch
> # User Ludovic Chabant <ludovic at chabant.com>
> # Date 1561959530 0
> # Mon Jul 01 05:38:50 2019 +0000
> # Branch stable
> # Node ID 89e3ab4dcbc56ee72ce1d4d17527337e01d99467
> # Parent 90daf413dfc7a7e4762e6445f05c52b123c6188f
> py3: broad pass for python3 compatibility
>
> - ran mercurial's bytify-strings script
> - excluded some places where we use strings to pass to setattr()
> - re-decode some template funcions' docstrings that were previously encoded
> (probably by the hgloader?)
>
> diff --git a/hgext3rd/evolve/__init__.py b/hgext3rd/evolve/__init__.py
> --- a/hgext3rd/evolve/__init__.py
> +++ b/hgext3rd/evolve/__init__.py
> @@ -258,7 +258,7 @@
> registrar.templatekeyword # new in hg-3.8
> except ImportError:
> from . import metadata
> - raise ImportError('evolve needs Mercurial version %s or above' %
> + raise ImportError(b'evolve needs Mercurial version %s or above' %
> min(metadata.testedwith.split()))
>
> import mercurial
> @@ -311,25 +311,25 @@
> buglink = metadata.buglink
>
> # Flags for enabling optional parts of evolve
> -commandopt = 'allnewcommands'
> +commandopt = b'allnewcommands'
>
> obsexcmsg = utility.obsexcmsg
> shorttemplate = utility.shorttemplate
>
> -colortable = {'evolve.node': 'yellow',
> - 'evolve.user': 'green',
> - 'evolve.rev': 'blue',
> - 'evolve.short_description': '',
> - 'evolve.date': 'cyan',
> - 'evolve.current_rev': 'bold',
> - 'evolve.verb': '',
> - 'evolve.operation': 'bold'
> +colortable = {b'evolve.node': b'yellow',
> + b'evolve.user': b'green',
> + b'evolve.rev': b'blue',
> + b'evolve.short_description': b'',
> + b'evolve.date': b'cyan',
> + b'evolve.current_rev': b'bold',
> + b'evolve.verb': b'',
> + b'evolve.operation': b'bold'
> }
>
> _pack = struct.pack
> _unpack = struct.unpack
>
> -aliases, entry = cmdutil.findcmd('commit', commands.table)
> +aliases, entry = cmdutil.findcmd(b'commit', commands.table)
> commitopts3 = cmdrewrite.commitopts3
> interactiveopt = cmdrewrite.interactiveopt
> rewrite = rewriteutil.rewrite
> @@ -362,13 +362,13 @@
> templatekeyword = eh.templatekeyword
>
> # Configuration
> -eh.configitem('experimental', 'evolutioncommands', [])
> -eh.configitem('experimental', 'evolution.allnewcommands', None)
> -eh.configitem('experimental', 'prunestrip', False)
> +eh.configitem(b'experimental', b'evolutioncommands', [])
> +eh.configitem(b'experimental', b'evolution.allnewcommands', None)
> +eh.configitem(b'experimental', b'prunestrip', False)
>
> # pre hg 4.0 compat
>
> -if not util.safehasattr(dirstate.dirstate, 'parentchange'):
> +if not util.safehasattr(dirstate.dirstate, b'parentchange'):
> import contextlib
>
> @contextlib.contextmanager
> @@ -397,14 +397,14 @@
> def _configureoptions(ui, repo):
> # If no capabilities are specified, enable everything.
> # This is so existing evolve users don't need to change their config.
> - evolveopts = repo.ui.configlist('experimental', 'evolution')
> + evolveopts = repo.ui.configlist(b'experimental', b'evolution')
> if not evolveopts:
> - evolveopts = ['all']
> - repo.ui.setconfig('experimental', 'evolution', evolveopts, 'evolve')
> - if obsolete.isenabled(repo, 'exchange'):
> + evolveopts = [b'all']
> + repo.ui.setconfig(b'experimental', b'evolution', evolveopts, b'evolve')
> + if obsolete.isenabled(repo, b'exchange'):
> # if no config explicitly set, disable bundle1
> - if not isinstance(repo.ui.config('server', 'bundle1'), str):
> - repo.ui.setconfig('server', 'bundle1', False)
> + if not isinstance(repo.ui.config(b'server', b'bundle1'), str):
> + repo.ui.setconfig(b'server', b'bundle1', False)
>
> class trdescrepo(repo.__class__):
>
> @@ -422,18 +422,18 @@
> # This must be in the same function as the option configuration above to
> # guarantee it happens after the above configuration, but before the
> # extsetup functions.
> - evolvecommands = ui.configlist('experimental', 'evolutioncommands')
> - evolveopts = ui.configlist('experimental', 'evolution')
> + evolvecommands = ui.configlist(b'experimental', b'evolutioncommands')
> + evolveopts = ui.configlist(b'experimental', b'evolution')
> if evolveopts and (commandopt not in evolveopts
> - and 'all' not in evolveopts):
> + and b'all' not in evolveopts):
> # We build whitelist containing the commands we want to enable
> whitelist = set()
> for cmd in evolvecommands:
> matchingevolvecommands = [e for e in cmdtable.keys() if cmd in e]
> if not matchingevolvecommands:
> - raise error.Abort(_('unknown command: %s') % cmd)
> + raise error.Abort(_(b'unknown command: %s') % cmd)
> elif len(matchingevolvecommands) > 1:
> - msg = _('ambiguous command specification: "%s" matches %r')
> + msg = _(b'ambiguous command specification: "%s" matches %r')
> raise error.Abort(msg % (cmd, matchingevolvecommands))
> else:
> whitelist.add(matchingevolvecommands[0])
> @@ -464,10 +464,10 @@
> @eh.uisetup
> def setupparentcommand(ui):
>
> - _alias, statuscmd = cmdutil.findcmd('status', commands.table)
> - pstatusopts = [o for o in statuscmd[1] if o[1] != 'rev']
> + _alias, statuscmd = cmdutil.findcmd(b'status', commands.table)
> + pstatusopts = [o for o in statuscmd[1] if o[1] != b'rev']
>
> - @eh.command('pstatus', pstatusopts)
> + @eh.command(b'pstatus', pstatusopts)
> def pstatus(ui, repo, *args, **kwargs):
> """show status combining committed and uncommited changes
>
> @@ -476,13 +476,13 @@
> match the content of the commit that a bare :hg:`amend` will creates.
>
> See :hg:`help status` for details."""
> - kwargs['rev'] = ['.^']
> + kwargs[b'rev'] = [b'.^']
> return statuscmd[0](ui, repo, *args, **kwargs)
>
> - _alias, diffcmd = cmdutil.findcmd('diff', commands.table)
> - pdiffopts = [o for o in diffcmd[1] if o[1] != 'rev']
> + _alias, diffcmd = cmdutil.findcmd(b'diff', commands.table)
> + pdiffopts = [o for o in diffcmd[1] if o[1] != b'rev']
>
> - @eh.command('pdiff', pdiffopts)
> + @eh.command(b'pdiff', pdiffopts)
> def pdiff(ui, repo, *args, **kwargs):
> """show diff combining committed and uncommited changes
>
> @@ -491,32 +491,32 @@
> match the content of the commit that a bare :hg:`amend` will creates.
>
> See :hg:`help diff` for details."""
> - kwargs['rev'] = ['.^']
> + kwargs[b'rev'] = [b'.^']
> return diffcmd[0](ui, repo, *args, **kwargs)
>
> @eh.uisetup
> def _installalias(ui):
> - if ui.config('alias', 'odiff', None) is None:
> - ui.setconfig('alias', 'odiff',
> - "diff --hidden --rev 'limit(predecessors(.),1)' --rev .",
> - 'evolve')
> + if ui.config(b'alias', b'odiff', None) is None:
> + ui.setconfig(b'alias', b'odiff',
> + b"diff --hidden --rev 'limit(predecessors(.),1)' --rev .",
> + b'evolve')
>
> ### Unstable revset symbol
>
> - at eh.revsetpredicate('unstable()')
> + at eh.revsetpredicate(b'unstable()')
> def revsetunstable(repo, subset, x):
> """Changesets with instabilities.
> """
> - revset.getargs(x, 0, 0, 'unstable takes no arguments')
> + revset.getargs(x, 0, 0, b'unstable takes no arguments')
> troubled = set()
> - troubled.update(getrevs(repo, 'orphan'))
> - troubled.update(getrevs(repo, 'phasedivergent'))
> - troubled.update(getrevs(repo, 'contentdivergent'))
> + troubled.update(getrevs(repo, b'orphan'))
> + troubled.update(getrevs(repo, b'phasedivergent'))
> + troubled.update(getrevs(repo, b'contentdivergent'))
> troubled = revset.baseset(troubled)
> troubled.sort() # set is non-ordered, enforce order
> return subset & troubled
>
> - at eh.revsetpredicate('troubled()') # legacy name
> + at eh.revsetpredicate(b'troubled()') # legacy name
> def revsettroubled(repo, subset, x):
> return revsetunstable(repo, subset, x)
>
> @@ -615,17 +615,17 @@
>
>
> ### XXX I'm not sure this revset is useful
> - at eh.revsetpredicate('suspended()')
> + at eh.revsetpredicate(b'suspended()')
> def revsetsuspended(repo, subset, x):
> """Obsolete changesets with non-obsolete descendants.
> """
> - revset.getargs(x, 0, 0, 'suspended takes no arguments')
> - suspended = revset.baseset(getrevs(repo, 'suspended'))
> + revset.getargs(x, 0, 0, b'suspended takes no arguments')
> + suspended = revset.baseset(getrevs(repo, b'suspended'))
> suspended.sort()
> return subset & suspended
>
>
> - at eh.revsetpredicate('predecessors(set)')
> + at eh.revsetpredicate(b'predecessors(set)')
> def revsetpredecessors(repo, subset, x):
> """Immediate predecessors of changesets in set.
> """
> @@ -635,12 +635,12 @@
> return subset & s
>
>
> - at eh.revsetpredicate('precursors(set)') # legacy name for predecessors
> + at eh.revsetpredicate(b'precursors(set)') # legacy name for predecessors
> def revsetprecursors(repo, subset, x):
> return revsetpredecessors(repo, subset, x)
>
>
> - at eh.revsetpredicate('allpredecessors(set)')
> + at eh.revsetpredicate(b'allpredecessors(set)')
> def revsetallpredecessors(repo, subset, x):
> """Transitive predecessors of changesets in set.
> """
> @@ -650,12 +650,12 @@
> return subset & s
>
>
> - at eh.revsetpredicate('allprecursors(set)') # legacy name for allpredecessors
> + at eh.revsetpredicate(b'allprecursors(set)') # legacy name for allpredecessors
> def revsetallprecursors(repo, subset, x):
> return revsetallpredecessors(repo, subset, x)
>
>
> - at eh.revsetpredicate('successors(set)')
> + at eh.revsetpredicate(b'successors(set)')
> def revsetsuccessors(repo, subset, x):
> """Immediate successors of changesets in set.
> """
> @@ -664,7 +664,7 @@
> s.sort()
> return subset & s
>
> - at eh.revsetpredicate('allsuccessors(set)')
> + at eh.revsetpredicate(b'allsuccessors(set)')
> def revsetallsuccessors(repo, subset, x):
> """Transitive successors of changesets in set.
> """
> @@ -681,87 +681,87 @@
> # This section take care of issue warning to the user when troubles appear
>
> def _warnobsoletewc(ui, repo, prevnode=None, wasobs=None):
> - rev = repo['.']
> + rev = repo[b'.']
>
> if not rev.obsolete():
> return
>
> if rev.node() == prevnode and wasobs:
> return
> - msg = _("working directory parent is obsolete! (%s)\n")
> + msg = _(b"working directory parent is obsolete! (%s)\n")
> shortnode = node.short(rev.node())
>
> ui.warn(msg % shortnode)
>
> # Check that evolve is activated for performance reasons
> - evolvecommandenabled = any('evolve' in e for e in cmdtable)
> + evolvecommandenabled = any(b'evolve' in e for e in cmdtable)
> if ui.quiet or not evolvecommandenabled:
> return
>
> # Show a warning for helping the user to solve the issue
> reason, successors = obshistory._getobsfateandsuccs(repo, rev.node())
>
> - if reason == 'pruned':
> - solvemsg = _("use 'hg evolve' to update to its parent successor")
> - elif reason == 'diverged':
> - debugcommand = "hg evolve --list --content-divergent"
> - basemsg = _("%s has diverged, use '%s' to resolve the issue")
> + if reason == b'pruned':
> + solvemsg = _(b"use 'hg evolve' to update to its parent successor")
> + elif reason == b'diverged':
> + debugcommand = b"hg evolve --list --content-divergent"
> + basemsg = _(b"%s has diverged, use '%s' to resolve the issue")
> solvemsg = basemsg % (shortnode, debugcommand)
> - elif reason == 'superseed':
> - msg = _("use 'hg evolve' to update to its successor: %s")
> + elif reason == b'superseed':
> + msg = _(b"use 'hg evolve' to update to its successor: %s")
> solvemsg = msg % successors[0]
> - elif reason == 'superseed_split':
> - msg = _("use 'hg evolve' to update to its tipmost successor: %s")
> + elif reason == b'superseed_split':
> + msg = _(b"use 'hg evolve' to update to its tipmost successor: %s")
>
> if len(successors) <= 2:
> - solvemsg = msg % ", ".join(successors)
> + solvemsg = msg % b", ".join(successors)
> else:
> - firstsuccessors = ", ".join(successors[:2])
> + firstsuccessors = b", ".join(successors[:2])
> remainingnumber = len(successors) - 2
> - successorsmsg = _("%s and %d more") % (firstsuccessors, remainingnumber)
> + successorsmsg = _(b"%s and %d more") % (firstsuccessors, remainingnumber)
> solvemsg = msg % successorsmsg
> else:
> raise ValueError(reason)
>
> - ui.warn("(%s)\n" % solvemsg)
> + ui.warn(b"(%s)\n" % solvemsg)
>
> -if util.safehasattr(context, '_filterederror'): # <= hg-4.5
> - @eh.wrapfunction(context, '_filterederror')
> +if util.safehasattr(context, b'_filterederror'): # <= hg-4.5
> + @eh.wrapfunction(context, b'_filterederror')
> def evolve_filtererror(original, repo, changeid):
> """build an exception to be raised about a filtered changeid
>
> This is extracted in a function to help extensions (eg: evolve) to
> experiment with various message variants."""
> - if repo.filtername.startswith('visible'):
> + if repo.filtername.startswith(b'visible'):
>
> unfilteredrepo = repo.unfiltered()
> rev = repo[scmutil.revsingle(unfilteredrepo, changeid)]
> reason, successors = obshistory._getobsfateandsuccs(unfilteredrepo, rev.node())
>
> # Be more precise in case the revision is superseed
> - if reason == 'superseed':
> - reason = _("successor: %s") % successors[0]
> - elif reason == 'superseed_split':
> + if reason == b'superseed':
> + reason = _(b"successor: %s") % successors[0]
> + elif reason == b'superseed_split':
> if len(successors) <= 2:
> - reason = _("successors: %s") % ", ".join(successors)
> + reason = _(b"successors: %s") % b", ".join(successors)
> else:
> - firstsuccessors = ", ".join(successors[:2])
> + firstsuccessors = b", ".join(successors[:2])
> remainingnumber = len(successors) - 2
> - successorsmsg = _("%s and %d more") % (firstsuccessors, remainingnumber)
> - reason = _("successors: %s") % successorsmsg
> + successorsmsg = _(b"%s and %d more") % (firstsuccessors, remainingnumber)
> + reason = _(b"successors: %s") % successorsmsg
>
> - msg = _("hidden revision '%s'") % changeid
> - hint = _('use --hidden to access hidden revisions; %s') % reason
> + msg = _(b"hidden revision '%s'") % changeid
> + hint = _(b'use --hidden to access hidden revisions; %s') % reason
> return error.FilteredRepoLookupError(msg, hint=hint)
> - msg = _("filtered revision '%s' (not in '%s' subset)")
> + msg = _(b"filtered revision '%s' (not in '%s' subset)")
> msg %= (changeid, repo.filtername)
> return error.FilteredRepoLookupError(msg)
>
> - at eh.wrapcommand("update")
> - at eh.wrapcommand("pull")
> + at eh.wrapcommand(b"update")
> + at eh.wrapcommand(b"pull")
> def wrapmayobsoletewc(origfn, ui, repo, *args, **opts):
> """Warn that the working directory parent is an obsolete changeset"""
> - ctx = repo['.']
> + ctx = repo[b'.']
> node = ctx.node()
> isobs = ctx.obsolete()
>
> @@ -776,23 +776,23 @@
> lockmod.release(wlock)
> return res
>
> - at eh.wrapcommand("parents")
> + at eh.wrapcommand(b"parents")
> def wrapparents(origfn, ui, repo, *args, **opts):
> res = origfn(ui, repo, *args, **opts)
> _warnobsoletewc(ui, repo)
> return res
>
> - at eh.wrapfunction(mercurial.exchange, 'push')
> + at eh.wrapfunction(mercurial.exchange, b'push')
> def push(orig, repo, *args, **opts):
> """Add a hint for "hg evolve" when troubles make push fails
> """
> try:
> return orig(repo, *args, **opts)
> except error.Abort as ex:
> - hint = _("use 'hg evolve' to get a stable history "
> - "or --force to ignore warnings")
> + hint = _(b"use 'hg evolve' to get a stable history "
> + b"or --force to ignore warnings")
> if (len(ex.args) >= 1
> - and ex.args[0].startswith('push includes ')
> + and ex.args[0].startswith(b'push includes ')
> and ex.hint is None):
> ex.hint = hint
> raise
> @@ -801,11 +801,11 @@
> evolvestate = state.cmdstate(repo)
> if evolvestate:
> # i18n: column positioning for "hg summary"
> - ui.status(_('evolve: (evolve --continue)\n'))
> + ui.status(_(b'evolve: (evolve --continue)\n'))
>
> @eh.extsetup
> def obssummarysetup(ui):
> - cmdutil.summaryhooks.add('evolve', summaryhook)
> + cmdutil.summaryhooks.add(b'evolve', summaryhook)
>
> #####################################################################
> ### Old Evolve extension content ###
> @@ -816,20 +816,20 @@
>
> @eh.uisetup
> def _installimportobsolete(ui):
> - entry = cmdutil.findcmd('import', commands.table)[1]
> - entry[1].append(('', 'obsolete', False,
> - _('mark the old node as obsoleted by '
> - 'the created commit')))
> + entry = cmdutil.findcmd(b'import', commands.table)[1]
> + entry[1].append((b'', b'obsolete', False,
> + _(b'mark the old node as obsoleted by '
> + b'the created commit')))
>
> def _getnodefrompatch(patch, dest):
> - patchnode = patch.get('nodeid')
> + patchnode = patch.get(b'nodeid')
> if patchnode is not None:
> - dest['node'] = node.bin(patchnode)
> + dest[b'node'] = node.bin(patchnode)
>
> - at eh.wrapfunction(mercurial.cmdutil, 'tryimportone')
> + at eh.wrapfunction(mercurial.cmdutil, b'tryimportone')
> def tryimportone(orig, ui, repo, hunk, parents, opts, *args, **kwargs):
> - expected = {'node': None}
> - if not util.safehasattr(hunk, 'get'): # hg < 4.6
> + expected = {b'node': None}
> + if not util.safehasattr(hunk, b'get'): # hg < 4.6
> oldextract = patch.extract
>
> def extract(*args, **kwargs):
> @@ -845,12 +845,12 @@
> _getnodefrompatch(hunk, expected)
> ret = orig(ui, repo, hunk, parents, opts, *args, **kwargs)
> created = ret[1]
> - if (opts['obsolete'] and None not in (created, expected['node'])
> - and created != expected['node']):
> - tr = repo.transaction('import-obs')
> + if (opts[b'obsolete'] and None not in (created, expected[b'node'])
> + and created != expected[b'node']):
> + tr = repo.transaction(b'import-obs')
> try:
> - metadata = {'user': ui.username()}
> - repo.obsstore.create(tr, expected['node'], (created,),
> + metadata = {b'user': ui.username()}
> + repo.obsstore.create(tr, expected[b'node'], (created,),
> metadata=metadata)
> tr.close()
> finally:
> @@ -878,62 +878,62 @@
> if e is entry:
> break
>
> - synopsis = '(DEPRECATED)'
> + synopsis = b'(DEPRECATED)'
> if len(entry) > 2:
> fn, opts, _syn = entry
> else:
> fn, opts, = entry
> - deprecationwarning = _('%s have been deprecated in favor of %s\n') % (
> + deprecationwarning = _(b'%s have been deprecated in favor of %s\n') % (
> oldalias, newalias)
>
> def newfn(*args, **kwargs):
> ui = args[0]
> ui.warn(deprecationwarning)
> util.checksignature(fn)(*args, **kwargs)
> - newfn.__doc__ = deprecationwarning + ' (DEPRECATED)'
> + newfn.__doc__ = deprecationwarning + b' (DEPRECATED)'
> cmdwrapper = eh.command(oldalias, opts, synopsis)
> cmdwrapper(newfn)
>
> @eh.extsetup
> def deprecatealiases(ui):
> - _deprecatealias('gup', 'next')
> - _deprecatealias('gdown', 'previous')
> + _deprecatealias(b'gup', b'next')
> + _deprecatealias(b'gdown', b'previous')
>
> def _gettopic(ctx):
> """handle topic fetching with or without the extension"""
> - return getattr(ctx, 'topic', lambda: '')()
> + return getattr(ctx, 'topic', lambda: b'')()
>
> def _gettopicidx(ctx):
> """handle topic fetching with or without the extension"""
> return getattr(ctx, 'topicidx', lambda: None)()
>
> def _getcurrenttopic(repo):
> - return getattr(repo, 'currenttopic', '')
> + return getattr(repo, 'currenttopic', b'')
>
> def _prevupdate(repo, displayer, target, bookmark, dryrun, mergeopt):
> if dryrun:
> - repo.ui.write(_('hg update %s;\n') % target)
> + repo.ui.write(_(b'hg update %s;\n') % target)
> if bookmark is not None:
> - repo.ui.write(_('hg bookmark %s -r %s;\n')
> + repo.ui.write(_(b'hg bookmark %s -r %s;\n')
> % (bookmark, target))
> else:
> updatecheck = None
> # --merge is passed, we don't need to care about commands.update.check
> # config option
> if mergeopt:
> - updatecheck = 'none'
> + updatecheck = b'none'
> try:
> ret = hg.updatetotally(repo.ui, repo, target.node(), None,
> updatecheck=updatecheck)
> except error.Abort as exc:
> # replace the hint to mention about --merge option
> - exc.hint = _('do you want --merge?')
> + exc.hint = _(b'do you want --merge?')
> raise
> if not ret:
> tr = lock = None
> try:
> lock = repo.lock()
> - tr = repo.transaction('previous')
> + tr = repo.transaction(b'previous')
> if bookmark is not None:
> bmchanges = [(bookmark, target.node())]
> repo._bookmarks.applychanges(repo, tr, bmchanges)
> @@ -961,75 +961,75 @@
>
> # issue message for the various case
> if p1.node() == node.nullid:
> - repo.ui.warn(_('already at repository root\n'))
> + repo.ui.warn(_(b'already at repository root\n'))
> elif not parents and currenttopic:
> - repo.ui.warn(_('no parent in topic "%s"\n') % currenttopic)
> - repo.ui.warn(_('(do you want --no-topic)\n'))
> + repo.ui.warn(_(b'no parent in topic "%s"\n') % currenttopic)
> + repo.ui.warn(_(b'(do you want --no-topic)\n'))
> elif len(parents) == 1:
> target = parents[0]
> bookmark = None
> if movebookmark:
> bookmark = repo._activebookmark
> else:
> - header = _("multiple parents, choose one to update:")
> + header = _(b"multiple parents, choose one to update:")
> prevs = [p.rev() for p in parents]
> choosedrev = utility.revselectionprompt(repo.ui, repo, prevs, header)
> if choosedrev is None:
> for p in parents:
> displayer.show(p)
> - repo.ui.warn(_('multiple parents, explicitly update to one\n'))
> + repo.ui.warn(_(b'multiple parents, explicitly update to one\n'))
> else:
> target = repo[choosedrev]
> return target, bookmark
>
> @eh.command(
> - 'previous',
> - [('B', 'move-bookmark', False,
> - _('move active bookmark after update')),
> - ('m', 'merge', False, _('bring uncommitted change along')),
> - ('', 'no-topic', False, _('ignore topic and move topologically')),
> - ('n', 'dry-run', False,
> - _('do not perform actions, just print what would be done'))],
> - '[OPTION]...',
> + b'previous',
> + [(b'B', b'move-bookmark', False,
> + _(b'move active bookmark after update')),
> + (b'm', b'merge', False, _(b'bring uncommitted change along')),
> + (b'', b'no-topic', False, _(b'ignore topic and move topologically')),
> + (b'n', b'dry-run', False,
> + _(b'do not perform actions, just print what would be done'))],
> + b'[OPTION]...',
> helpbasic=True)
> def cmdprevious(ui, repo, **opts):
> """update to parent revision
>
> Displays the summary line of the destination for clarity."""
> wlock = None
> - dryrunopt = opts['dry_run']
> - mergeopt = opts['merge']
> + dryrunopt = opts[b'dry_run']
> + mergeopt = opts[b'merge']
> if not dryrunopt:
> wlock = repo.wlock()
> try:
> wkctx = repo[None]
> wparents = wkctx.parents()
> if len(wparents) != 1:
> - raise error.Abort(_('merge in progress'))
> + raise error.Abort(_(b'merge in progress'))
> if not mergeopt:
> # we only skip the check if noconflict is set
> - if ui.config('commands', 'update.check') == 'noconflict':
> + if ui.config(b'commands', b'update.check') == b'noconflict':
> pass
> else:
> - cmdutil.bailifchanged(repo, hint=_('do you want --merge?'))
> + cmdutil.bailifchanged(repo, hint=_(b'do you want --merge?'))
>
> - topic = not opts.get("no_topic", False)
> + topic = not opts.get(b"no_topic", False)
> hastopic = bool(_getcurrenttopic(repo))
>
> template = shorttemplate
> if topic and hastopic:
> template = utility.stacktemplate
>
> - displayer = compat.changesetdisplayer(ui, repo, {'template': template})
> + displayer = compat.changesetdisplayer(ui, repo, {b'template': template})
>
> target, bookmark = _findprevtarget(repo, displayer,
> - opts.get('move_bookmark'), topic)
> + opts.get(b'move_bookmark'), topic)
> if target is not None:
> - backup = repo.ui.backupconfig('_internal', 'keep-topic')
> + backup = repo.ui.backupconfig(b'_internal', b'keep-topic')
> try:
> if topic and _getcurrenttopic(repo) != _gettopic(target):
> - repo.ui.setconfig('_internal', 'keep-topic', 'yes',
> - source='topic-extension')
> + repo.ui.setconfig(b'_internal', b'keep-topic', b'yes',
> + source=b'topic-extension')
> _prevupdate(repo, displayer, target, bookmark, dryrunopt,
> mergeopt)
> finally:
> @@ -1041,15 +1041,15 @@
> lockmod.release(wlock)
>
> @eh.command(
> - 'next',
> - [('B', 'move-bookmark', False,
> - _('move active bookmark after update')),
> - ('m', 'merge', False, _('bring uncommitted change along')),
> - ('', 'evolve', True, _('evolve the next changeset if necessary')),
> - ('', 'no-topic', False, _('ignore topic and move topologically')),
> - ('n', 'dry-run', False,
> - _('do not perform actions, just print what would be done'))],
> - '[OPTION]...',
> + b'next',
> + [(b'B', b'move-bookmark', False,
> + _(b'move active bookmark after update')),
> + (b'm', b'merge', False, _(b'bring uncommitted change along')),
> + (b'', b'evolve', True, _(b'evolve the next changeset if necessary')),
> + (b'', b'no-topic', False, _(b'ignore topic and move topologically')),
> + (b'n', b'dry-run', False,
> + _(b'do not perform actions, just print what would be done'))],
> + b'[OPTION]...',
> helpbasic=True)
> def cmdnext(ui, repo, **opts):
> """update to next child revision
> @@ -1060,29 +1060,29 @@
> Displays the summary line of the destination for clarity.
> """
> wlock = None
> - dryrunopt = opts['dry_run']
> + dryrunopt = opts[b'dry_run']
> if not dryrunopt:
> wlock = repo.wlock()
> try:
> wkctx = repo[None]
> wparents = wkctx.parents()
> if len(wparents) != 1:
> - raise error.Abort(_('merge in progress'))
> + raise error.Abort(_(b'merge in progress'))
>
> children = [ctx for ctx in wparents[0].children() if not ctx.obsolete()]
> topic = _getcurrenttopic(repo)
> filtered = set()
> template = shorttemplate
> - if topic and not opts.get("no_topic", False):
> + if topic and not opts.get(b"no_topic", False):
> filtered = set(ctx for ctx in children if ctx.topic() != topic)
> children = [ctx for ctx in children if ctx not in filtered]
> template = utility.stacktemplate
> - opts['stacktemplate'] = True
> - displayer = compat.changesetdisplayer(ui, repo, {'template': template})
> + opts[b'stacktemplate'] = True
> + displayer = compat.changesetdisplayer(ui, repo, {b'template': template})
>
> # check if we need to evolve while updating to the next child revision
> needevolve = False
> - aspchildren = evolvecmd._aspiringchildren(repo, [repo['.'].rev()])
> + aspchildren = evolvecmd._aspiringchildren(repo, [repo[b'.'].rev()])
> if topic:
> filtered.update(repo[c] for c in aspchildren
> if repo[c].topic() != topic)
> @@ -1101,54 +1101,54 @@
> needevolve = True
>
> # check if working directory is clean before we evolve the next cset
> - if needevolve and opts['evolve']:
> - hint = _('use `hg amend`, `hg revert` or `hg shelve`')
> + if needevolve and opts[b'evolve']:
> + hint = _(b'use `hg amend`, `hg revert` or `hg shelve`')
> cmdutil.bailifchanged(repo, hint=hint)
>
> - if not (opts['merge'] or (needevolve and opts['evolve'])):
> + if not (opts[b'merge'] or (needevolve and opts[b'evolve'])):
> # we only skip the check if noconflict is set
> - if ui.config('commands', 'update.check') == 'noconflict':
> + if ui.config(b'commands', b'update.check') == b'noconflict':
> pass
> else:
> - cmdutil.bailifchanged(repo, hint=_('do you want --merge?'))
> + cmdutil.bailifchanged(repo, hint=_(b'do you want --merge?'))
>
> if len(children) == 1:
> c = children[0]
> return _updatetonext(ui, repo, c, displayer, opts)
> elif children:
> - cheader = _("ambiguous next changeset, choose one to update:")
> + cheader = _(b"ambiguous next changeset, choose one to update:")
> crevs = [c.rev() for c in children]
> choosedrev = utility.revselectionprompt(ui, repo, crevs, cheader)
> if choosedrev is None:
> - ui.warn(_("ambiguous next changeset:\n"))
> + ui.warn(_(b"ambiguous next changeset:\n"))
> for c in children:
> displayer.show(c)
> - ui.warn(_("explicitly update to one of them\n"))
> + ui.warn(_(b"explicitly update to one of them\n"))
> return 1
> else:
> return _updatetonext(ui, repo, repo[choosedrev], displayer, opts)
> else:
> - if not opts['evolve'] or not aspchildren:
> + if not opts[b'evolve'] or not aspchildren:
> if filtered:
> - ui.warn(_('no children on topic "%s"\n') % topic)
> - ui.warn(_('do you want --no-topic\n'))
> + ui.warn(_(b'no children on topic "%s"\n') % topic)
> + ui.warn(_(b'do you want --no-topic\n'))
> else:
> - ui.warn(_('no children\n'))
> + ui.warn(_(b'no children\n'))
> if aspchildren:
> - msg = _('(%i unstable changesets to be evolved here, '
> - 'do you want --evolve?)\n')
> + msg = _(b'(%i unstable changesets to be evolved here, '
> + b'do you want --evolve?)\n')
> ui.warn(msg % len(aspchildren))
> return 1
> elif len(aspchildren) > 1:
> - cheader = _("ambiguous next (unstable) changeset, choose one to"
> - " evolve and update:")
> + cheader = _(b"ambiguous next (unstable) changeset, choose one to"
> + b" evolve and update:")
> choosedrev = utility.revselectionprompt(ui, repo,
> aspchildren, cheader)
> if choosedrev is None:
> - ui.warn(_("ambiguous next (unstable) changeset:\n"))
> + ui.warn(_(b"ambiguous next (unstable) changeset:\n"))
> for c in aspchildren:
> displayer.show(repo[c])
> - ui.warn(_("(run 'hg evolve --rev REV' on one of them)\n"))
> + ui.warn(_(b"(run 'hg evolve --rev REV' on one of them)\n"))
> return 1
> else:
> return _nextevolve(ui, repo, repo[choosedrev], opts)
> @@ -1161,51 +1161,51 @@
> """logic for hg next command to evolve and update to an aspiring children"""
>
> cmdutil.bailifchanged(repo)
> - evolvestate = state.cmdstate(repo, opts={'command': 'next',
> - 'bookmarkchanges': []})
> + evolvestate = state.cmdstate(repo, opts={b'command': b'next',
> + b'bookmarkchanges': []})
> with repo.wlock(), repo.lock():
> - tr = repo.transaction("evolve")
> + tr = repo.transaction(b"evolve")
> with util.acceptintervention(tr):
> result = evolvecmd._solveone(ui, repo, repo[aspchildren],
> - evolvestate, opts.get('dry_run'),
> + evolvestate, opts.get(b'dry_run'),
> False,
> - lambda: None, category='orphan',
> - stacktmplt=opts.get('stacktemplate',
> + lambda: None, category=b'orphan',
> + stacktmplt=opts.get(b'stacktemplate',
> False))
> # making sure a next commit is formed
> if result[0] and result[1]:
> - ui.status(_('working directory is now at %s\n')
> - % ui.label(str(repo['.']), 'evolve.node'))
> + ui.status(_(b'working directory is now at %s\n')
> + % ui.label(str(repo[b'.']), b'evolve.node'))
> return 0
>
> def _updatetonext(ui, repo, children, displayer, opts):
> """ logic for `hg next` command to update to children and move bookmarks if
> required """
> bm = repo._activebookmark
> - shouldmove = opts.get('move_bookmark') and bm is not None
> - if opts.get('dry_run'):
> - ui.write(_('hg update %s;\n') % children)
> + shouldmove = opts.get(b'move_bookmark') and bm is not None
> + if opts.get(b'dry_run'):
> + ui.write(_(b'hg update %s;\n') % children)
> if shouldmove:
> - ui.write(_('hg bookmark %s -r %s;\n') % (bm, children))
> + ui.write(_(b'hg bookmark %s -r %s;\n') % (bm, children))
> else:
> updatecheck = None
> # --merge is passed, we don't need to care about commands.update.check
> # config option
> - if opts['merge']:
> - updatecheck = 'none'
> + if opts[b'merge']:
> + updatecheck = b'none'
> try:
> ret = hg.updatetotally(ui, repo, children.node(), None,
> updatecheck=updatecheck)
> except error.Abort as exc:
> # replace the hint to mention about --merge option
> - exc.hint = _('do you want --merge?')
> + exc.hint = _(b'do you want --merge?')
> raise
>
> if not ret:
> lock = tr = None
> try:
> lock = repo.lock()
> - tr = repo.transaction('next')
> + tr = repo.transaction(b'next')
> if shouldmove:
> bmchanges = [(bm, children.node())]
> repo._bookmarks.applychanges(repo, tr, bmchanges)
> @@ -1218,28 +1218,28 @@
> displayer.show(children)
> return 0
>
> - at eh.wrapcommand('commit')
> + at eh.wrapcommand(b'commit')
> def commitwrapper(orig, ui, repo, *arg, **kwargs):
> tr = None
> - if kwargs.get('amend', False):
> + if kwargs.get(b'amend', False):
> wlock = lock = None
> else:
> wlock = repo.wlock()
> lock = repo.lock()
> try:
> - obsoleted = kwargs.get('obsolete', [])
> + obsoleted = kwargs.get(b'obsolete', [])
> if obsoleted:
> - obsoleted = repo.set('%lr', obsoleted)
> + obsoleted = repo.set(b'%lr', obsoleted)
> result = orig(ui, repo, *arg, **kwargs)
> if not result: # commit succeeded
> - new = repo['tip']
> + new = repo[b'tip']
> oldbookmarks = []
> markers = []
> for old in obsoleted:
> oldbookmarks.extend(repo.nodebookmarks(old.node()))
> markers.append((old, (new,)))
> if markers:
> - obsolete.createmarkers(repo, markers, operation="amend")
> + obsolete.createmarkers(repo, markers, operation=b"amend")
> bmchanges = []
> for book in oldbookmarks:
> bmchanges.append((book, new.node()))
> @@ -1248,70 +1248,70 @@
> wlock = repo.wlock()
> if not lock:
> lock = repo.lock()
> - tr = repo.transaction('commit')
> + tr = repo.transaction(b'commit')
> repo._bookmarks.applychanges(repo, tr, bmchanges)
> tr.close()
> return result
> finally:
> lockmod.release(tr, lock, wlock)
>
> - at eh.wrapcommand('strip', extension='strip', opts=[
> - ('', 'bundle', None, _("delete the commit entirely and move it to a "
> - "backup bundle")),
> + at eh.wrapcommand(b'strip', extension=b'strip', opts=[
> + (b'', b'bundle', None, _(b"delete the commit entirely and move it to a "
> + b"backup bundle")),
> ])
> def stripwrapper(orig, ui, repo, *revs, **kwargs):
> - if (not ui.configbool('experimental', 'prunestrip')
> - or kwargs.get('bundle', False)):
> + if (not ui.configbool(b'experimental', b'prunestrip')
> + or kwargs.get(b'bundle', False)):
> return orig(ui, repo, *revs, **kwargs)
>
> - if kwargs.get('force'):
> - ui.warn(_("warning: --force has no effect during strip with evolve "
> - "enabled\n"))
> - if kwargs.get('no_backup', False):
> - ui.warn(_("warning: --no-backup has no effect during strips with "
> - "evolve enabled\n"))
> + if kwargs.get(b'force'):
> + ui.warn(_(b"warning: --force has no effect during strip with evolve "
> + b"enabled\n"))
> + if kwargs.get(b'no_backup', False):
> + ui.warn(_(b"warning: --no-backup has no effect during strips with "
> + b"evolve enabled\n"))
>
> - revs = list(revs) + kwargs.pop('rev', [])
> + revs = list(revs) + kwargs.pop(b'rev', [])
> revs = set(scmutil.revrange(repo, revs))
> - revs = repo.revs("(%ld)::", revs)
> - kwargs['rev'] = []
> - kwargs['new'] = []
> - kwargs['succ'] = []
> - kwargs['biject'] = False
> + revs = repo.revs(b"(%ld)::", revs)
> + kwargs[b'rev'] = []
> + kwargs[b'new'] = []
> + kwargs[b'succ'] = []
> + kwargs[b'biject'] = False
> return cmdrewrite.cmdprune(ui, repo, *revs, **kwargs)
>
> @eh.extsetup
> def oldevolveextsetup(ui):
> - entry = cmdutil.findcmd('commit', commands.table)[1]
> - entry[1].append(('o', 'obsolete', [],
> - _("make commit obsolete this revision (DEPRECATED)")))
> + entry = cmdutil.findcmd(b'commit', commands.table)[1]
> + entry[1].append((b'o', b'obsolete', [],
> + _(b"make commit obsolete this revision (DEPRECATED)")))
>
> - at eh.wrapfunction(obsolete, '_checkinvalidmarkers')
> + at eh.wrapfunction(obsolete, b'_checkinvalidmarkers')
> def _checkinvalidmarkers(orig, markers):
> """search for marker with invalid data and raise error if needed
>
> Exist as a separated function to allow the evolve extension for a more
> subtle handling.
> """
> - if 'debugobsconvert' in sys.argv:
> + if b'debugobsconvert' in sys.argv:
> return
> for mark in markers:
> if node.nullid in mark[1]:
> - msg = _('bad obsolescence marker detected: invalid successors nullid')
> - hint = _('You should run `hg debugobsconvert`')
> + msg = _(b'bad obsolescence marker detected: invalid successors nullid')
> + hint = _(b'You should run `hg debugobsconvert`')
> raise error.Abort(msg, hint=hint)
>
> @eh.command(
> - 'debugobsconvert',
> - [('', 'new-format', obsexchange._bestformat, _('Destination format for markers.'))],
> - '')
> + b'debugobsconvert',
> + [(b'', b'new-format', obsexchange._bestformat, _(b'Destination format for markers.'))],
> + b'')
> def debugobsconvert(ui, repo, new_format):
> origmarkers = repo.obsstore._all # settle version
> if new_format == repo.obsstore._version:
> - msg = _('New format is the same as the old format, not upgrading!')
> + msg = _(b'New format is the same as the old format, not upgrading!')
> raise error.Abort(msg)
> with repo.lock():
> - f = repo.svfs('obsstore', 'wb', atomictemp=True)
> + f = repo.svfs(b'obsstore', b'wb', atomictemp=True)
> known = set()
> markers = []
> for m in origmarkers:
> @@ -1324,11 +1324,11 @@
> continue
> known.add(m)
> markers.append(m)
> - ui.write(_('Old store is version %d, will rewrite in version %d\n') % (
> + ui.write(_(b'Old store is version %d, will rewrite in version %d\n') % (
> repo.obsstore._version, new_format))
> map(f.write, obsolete.encodemarkers(markers, True, new_format))
> f.close()
> - ui.write(_('Done!\n'))
> + ui.write(_(b'Done!\n'))
>
>
> def _helploader(ui):
> @@ -1337,40 +1337,40 @@
> @eh.uisetup
> def _setuphelp(ui):
> for entry in help.helptable:
> - if entry[0] == "evolution":
> + if entry[0] == b"evolution":
> break
> else:
> - help.helptable.append((["evolution"], _("Safely Rewriting History"),
> - _helploader))
> + help.helptable.append(([b"evolution"], _(b"Safely Rewriting History"),
> + _helploader))
> help.helptable.sort()
>
> evolvestateversion = 0
>
> def _evolvemessage():
> - _msg = _('To continue: hg evolve --continue\n'
> - 'To abort: hg evolve --abort\n'
> - 'To stop: hg evolve --stop\n'
> - '(also see `hg help evolve.interrupted`)')
> + _msg = _(b'To continue: hg evolve --continue\n'
> + b'To abort: hg evolve --abort\n'
> + b'To stop: hg evolve --stop\n'
> + b'(also see `hg help evolve.interrupted`)')
> return cmdutil._commentlines(_msg)
>
> @eh.uisetup
> def setupevolveunfinished(ui):
> - data = ('evolvestate', False, False, _('evolve in progress'),
> - _("use 'hg evolve --continue' or 'hg evolve --abort' to abort"))
> + data = (b'evolvestate', False, False, _(b'evolve in progress'),
> + _(b"use 'hg evolve --continue' or 'hg evolve --abort' to abort"))
> cmdutil.unfinishedstates.append(data)
>
> - afterresolved = ('evolvestate', _('hg evolve --continue'))
> - pickresolved = ('pickstate', _('hg pick --continue'))
> + afterresolved = (b'evolvestate', _(b'hg evolve --continue'))
> + pickresolved = (b'pickstate', _(b'hg pick --continue'))
> cmdutil.afterresolvedstates.append(afterresolved)
> cmdutil.afterresolvedstates.append(pickresolved)
>
> - if util.safehasattr(cmdutil, 'STATES'):
> - statedata = ('evolve', cmdutil.fileexistspredicate('evolvestate'),
> + if util.safehasattr(cmdutil, b'STATES'):
> + statedata = (b'evolve', cmdutil.fileexistspredicate(b'evolvestate'),
> _evolvemessage)
> cmdutil.STATES = (statedata, ) + cmdutil.STATES
>
> - at eh.wrapfunction(hg, 'clean')
> + at eh.wrapfunction(hg, b'clean')
> def clean(orig, repo, *args, **kwargs):
> ret = orig(repo, *args, **kwargs)
> - util.unlinkpath(repo.vfs.join('evolvestate'), ignoremissing=True)
> + util.unlinkpath(repo.vfs.join(b'evolvestate'), ignoremissing=True)
> return ret
> diff --git a/hgext3rd/evolve/cmdrewrite.py b/hgext3rd/evolve/cmdrewrite.py
> --- a/hgext3rd/evolve/cmdrewrite.py
> +++ b/hgext3rd/evolve/cmdrewrite.py
> @@ -61,17 +61,17 @@
> def _checknotesize(ui, opts):
> """ make sure note is of valid format """
>
> - note = opts.get('note')
> + note = opts.get(b'note')
> if not note:
> return
>
> if not compat.isobsnotesupported():
> - ui.warn(_("current hg version does not support storing"
> - " note in obsmarker\n"))
> + ui.warn(_(b"current hg version does not support storing"
> + b" note in obsmarker\n"))
> if len(note) > 255:
> - raise error.Abort(_("cannot store a note of more than 255 bytes"))
> - if '\n' in note:
> - raise error.Abort(_("note cannot contain a newline"))
> + raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
> + if b'\n' in note:
> + raise error.Abort(_(b"note cannot contain a newline"))
>
> def _resolveoptions(ui, opts):
> """modify commit options dict to handle related options
> @@ -80,34 +80,34 @@
> -d was supplied.
> """
> # N.B. this is extremely similar to setupheaderopts() in mq.py
> - if not opts.get('date') and opts.get('current_date'):
> - opts['date'] = '%d %d' % compat.makedate()
> - if not opts.get('user') and opts.get('current_user'):
> - opts['user'] = ui.username()
> + if not opts.get(b'date') and opts.get(b'current_date'):
> + opts[b'date'] = b'%d %d' % compat.makedate()
> + if not opts.get(b'user') and opts.get(b'current_user'):
> + opts[b'user'] = ui.username()
>
> commitopts3 = [
> - ('D', 'current-date', None,
> - _('record the current date as commit date')),
> - ('U', 'current-user', None,
> - _('record the current user as committer')),
> + (b'D', b'current-date', None,
> + _(b'record the current date as commit date')),
> + (b'U', b'current-user', None,
> + _(b'record the current user as committer')),
> ]
>
> -interactiveopt = [['i', 'interactive', None, _('use interactive mode')]]
> +interactiveopt = [[b'i', b'interactive', None, _(b'use interactive mode')]]
>
> @eh.command(
> - 'amend|refresh',
> - [('A', 'addremove', None,
> - _('mark new/missing files as added/removed before committing')),
> - ('a', 'all', False, _("match all files")),
> - ('e', 'edit', False, _('invoke editor on commit messages')),
> - ('', 'extract', False, _('extract changes from the commit to the working copy')),
> - ('', 'patch', False, _('make changes to wdir parent by editing patch')),
> - ('', 'close-branch', None,
> - _('mark a branch as closed, hiding it from the branch list')),
> - ('s', 'secret', None, _('use the secret phase for committing')),
> - ('n', 'note', '', _('store a note on amend'), _('TEXT')),
> - ] + walkopts + commitopts + commitopts2 + commitopts3 + interactiveopt,
> - _('[OPTION]... [FILE]...'),
> + b'amend|refresh',
> + [(b'A', b'addremove', None,
> + _(b'mark new/missing files as added/removed before committing')),
> + (b'a', b'all', False, _(b"match all files")),
> + (b'e', b'edit', False, _(b'invoke editor on commit messages')),
> + (b'', b'extract', False, _(b'extract changes from the commit to the working copy')),
> + (b'', b'patch', False, _(b'make changes to wdir parent by editing patch')),
> + (b'', b'close-branch', None,
> + _(b'mark a branch as closed, hiding it from the branch list')),
> + (b's', b'secret', None, _(b'use the secret phase for committing')),
> + (b'n', b'note', b'', _(b'store a note on amend'), _(b'TEXT')),
> + ] + walkopts + commitopts + commitopts2 + commitopts3 + interactiveopt,
> + _(b'[OPTION]... [FILE]...'),
> helpbasic=True)
> def amend(ui, repo, *pats, **opts):
> """combine a changeset with updates and replace it with a new one
> @@ -127,34 +127,34 @@
> """
> _checknotesize(ui, opts)
> opts = opts.copy()
> - if opts.get('patch'):
> + if opts.get(b'patch'):
> return amendpatch(ui, repo, *pats, **opts)
> - if opts.get('extract'):
> + if opts.get(b'extract'):
> return uncommit(ui, repo, *pats, **opts)
> else:
> - if opts.pop('all', False):
> + if opts.pop(b'all', False):
> # add an include for all
> - include = list(opts.get('include'))
> - include.append('re:.*')
> - edit = opts.pop('edit', False)
> - log = opts.get('logfile')
> - opts['amend'] = True
> + include = list(opts.get(b'include'))
> + include.append(b're:.*')
> + edit = opts.pop(b'edit', False)
> + log = opts.get(b'logfile')
> + opts[b'amend'] = True
> _resolveoptions(ui, opts)
> - _alias, commitcmd = cmdutil.findcmd('commit', commands.table)
> + _alias, commitcmd = cmdutil.findcmd(b'commit', commands.table)
> with repo.wlock(), repo.lock():
> - if not (edit or opts['message'] or log):
> - opts['message'] = repo['.'].description()
> - rewriteutil.precheck(repo, [repo['.'].rev()], action='amend')
> + if not (edit or opts[b'message'] or log):
> + opts[b'message'] = repo[b'.'].description()
> + rewriteutil.precheck(repo, [repo[b'.'].rev()], action=b'amend')
> return commitcmd[0](ui, repo, *pats, **opts)
>
> def amendpatch(ui, repo, *pats, **opts):
> """logic for --patch flag of `hg amend` command."""
> - with repo.wlock(), repo.lock(), repo.transaction('amend') as tr:
> + with repo.wlock(), repo.lock(), repo.transaction(b'amend') as tr:
> cmdutil.bailifchanged(repo)
> # first get the patch
> - old = repo['.']
> + old = repo[b'.']
> p1 = old.p1()
> - rewriteutil.precheck(repo, [old.rev()], 'amend')
> + rewriteutil.precheck(repo, [old.rev()], b'amend')
> diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
> diffopts.nodates = True
> diffopts.git = True
> @@ -167,12 +167,12 @@
> fp.write(chunk)
> newnode = _editandapply(ui, repo, pats, old, p1, fp, diffopts)
> if newnode == old.node():
> - raise error.Abort(_("nothing changed"))
> + raise error.Abort(_(b"nothing changed"))
> metadata = {}
> - if opts.get('note'):
> - metadata['note'] = opts['note']
> + if opts.get(b'note'):
> + metadata[b'note'] = opts[b'note']
> replacements = {old.node(): [newnode]}
> - scmutil.cleanupnodes(repo, replacements, operation='amend',
> + scmutil.cleanupnodes(repo, replacements, operation=b'amend',
> metadata=metadata)
> phases.retractboundary(repo, tr, old.phase(), [newnode])
> hg.updaterepo(repo, newnode, True)
> @@ -183,7 +183,7 @@
> fp.seek(0)
> previous_patch = fp.getvalue()
> if 5 <= len(ui.edit.im_func.func_defaults):
> - newpatch = ui.edit(fp.getvalue(), old.user(), action="diff")
> + newpatch = ui.edit(fp.getvalue(), old.user(), action=b"diff")
> else:
> newpatch = ui.edit(fp.getvalue(), old.user())
>
> @@ -191,7 +191,7 @@
> afp.write(newpatch)
> if pats:
> # write rest of the files in the patch
> - restmatcher = scmutil.match(old, [], opts={'exclude': pats})
> + restmatcher = scmutil.match(old, [], opts={b'exclude': pats})
> for chunk, label in patch.diffui(repo, p1.node(), old.node(),
> match=restmatcher,
> opts=diffopts):
> @@ -199,21 +199,21 @@
>
> user_patch = afp.getvalue()
> if not user_patch:
> - raise error.Abort(_("empty patch file, amend aborted"))
> + raise error.Abort(_(b"empty patch file, amend aborted"))
> if user_patch == previous_patch:
> - raise error.Abort(_("patch unchanged"))
> + raise error.Abort(_(b"patch unchanged"))
> afp.seek(0)
> # write the patch to repo and get the newnode
> try:
> newnode = _writepatch(ui, repo, old, afp)
> except patch.PatchError as err:
> - ui.write_err(_("failed to apply edited patch: %s\n") % err)
> + ui.write_err(_(b"failed to apply edited patch: %s\n") % err)
> defaultchoice = 0 # yes
> if not ui.interactive:
> defaultchoice = 1 # no
> - retrychoice = _('try to fix the patch (yn)?$$ &Yes $$ &No')
> + retrychoice = _(b'try to fix the patch (yn)?$$ &Yes $$ &No')
> if ui.promptchoice(retrychoice, default=defaultchoice):
> - raise error.Abort(_("Could not apply amended path"))
> + raise error.Abort(_(b"Could not apply amended path"))
> else:
> # consider a third choice where we restore the original patch
> fp = stringio()
> @@ -224,7 +224,7 @@
> """utility function to use filestore and patchrepo to apply a patch to the
> repository with metadata being extracted from the patch"""
> metadata = patch.extract(ui, fp)
> - if util.safehasattr(metadata, 'get'): # < hg-4.6
> + if util.safehasattr(metadata, b'get'): # < hg-4.6
> @contextlib.contextmanager
> def patchcontext():
> yield metadata
> @@ -235,20 +235,20 @@
>
> with patchcontext as metadata:
> # store the metadata from the patch to variables
> - parents = (metadata.get('p1'), metadata.get('p2'))
> - date = metadata.get('date') or old.date()
> - branch = metadata.get('branch') or old.branch()
> - user = metadata.get('user') or old.user()
> + parents = (metadata.get(b'p1'), metadata.get(b'p2'))
> + date = metadata.get(b'date') or old.date()
> + branch = metadata.get(b'branch') or old.branch()
> + user = metadata.get(b'user') or old.user()
> # XXX: we must extract extras from the patchfile too
> extra = old.extra()
> - message = metadata.get('message') or old.description()
> + message = metadata.get(b'message') or old.description()
> store = patch.filestore()
> fp.seek(0)
> try:
> files = set()
> # beware: next line may raise a PatchError to be handled by the caller
> # of this function
> - patch.patchrepo(ui, repo, pold, store, fp, 1, '',
> + patch.patchrepo(ui, repo, pold, store, fp, 1, b'',
> files=files, eolmode=None)
>
> memctx = context.memctx(repo, parents, message, files=files,
> @@ -271,23 +271,23 @@
> else:
> prev = node.nullid
>
> - fp.write("# HG changeset patch\n")
> - fp.write("# User %s\n" % ctx.user())
> - fp.write("# Date %d %d\n" % ctx.date())
> - fp.write("# %s\n" % datestr(ctx.date()))
> - if branch and branch != 'default':
> - fp.write("# Branch %s\n" % branch)
> - fp.write("# Node ID %s\n" % node.hex(nodeval))
> - fp.write("# Parent %s\n" % node.hex(prev))
> + fp.write(b"# HG changeset patch\n")
> + fp.write(b"# User %s\n" % ctx.user())
> + fp.write(b"# Date %d %d\n" % ctx.date())
> + fp.write(b"# %s\n" % datestr(ctx.date()))
> + if branch and branch != b'default':
> + fp.write(b"# Branch %s\n" % branch)
> + fp.write(b"# Node ID %s\n" % node.hex(nodeval))
> + fp.write(b"# Parent %s\n" % node.hex(prev))
> if len(parents) > 1:
> - fp.write("# Parent %s\n" % node.hex(parents[1]))
> + fp.write(b"# Parent %s\n" % node.hex(parents[1]))
>
> for headerid in cmdutil.extraexport:
> header = cmdutil.extraexportmap[headerid](1, ctx)
> if header is not None:
> - fp.write('# %s\n' % header)
> + fp.write(b'# %s\n' % header)
> fp.write(ctx.description().rstrip())
> - fp.write("\n\n")
> + fp.write(b"\n\n")
>
> def _touchedbetween(repo, source, dest, match=None):
> touched = set()
> @@ -320,7 +320,7 @@
>
> # Filter copies
> copied = copies.pathcopies(target, ctx)
> - copied = dict((dst, src) for dst, src in copied.iteritems()
> + copied = dict((dst, src) for dst, src in copied.items()
> if dst in files)
>
> def filectxfn(repo, memctx, path, contentctx=ctx, redirect=newcontent):
> @@ -356,7 +356,7 @@
> oldctx to a copy of oldctx not containing changed files matched by
> match.
> """
> - ctx = repo['.']
> + ctx = repo[b'.']
> ds = repo.dirstate
> copies = dict(ds.copies())
> if interactive:
> @@ -375,7 +375,7 @@
> # Also any modifications to a removed file will result the status as
> # added, so we have only two cases. So in either of the cases, the
> # resulting status can be modified or clean.
> - if ds[f] == 'r':
> + if ds[f] == b'r':
> # But the file is removed in the working directory, leaving that
> # as removed
> continue
> @@ -389,7 +389,7 @@
> # does not adds it back. If it's adds it back, we do a normallookup.
> # The file can't be removed in working directory, because it was
> # removed in oldctx
> - if ds[f] == 'a':
> + if ds[f] == b'a':
> ds.normallookup(f)
> continue
> ds.remove(f)
> @@ -401,7 +401,7 @@
> # would have resulted in modified status, not removed.
> # So a file added in a commit, and uncommitting that addition must
> # result in file being stated as unknown.
> - if ds[f] == 'r':
> + if ds[f] == b'r':
> # The working directory say it's removed, so lets make the file
> # unknown
> ds.drop(f)
> @@ -410,23 +410,23 @@
> else:
> m, a, r = repo.status(oldctx.p1(), oldctx, match=match)[:3]
> for f in m:
> - if ds[f] == 'r':
> + if ds[f] == b'r':
> # modified + removed -> removed
> continue
> ds.normallookup(f)
>
> for f in a:
> - if ds[f] == 'r':
> + if ds[f] == b'r':
> # added + removed -> unknown
> ds.drop(f)
> - elif ds[f] != 'a':
> + elif ds[f] != b'a':
> ds.add(f)
>
> for f in r:
> - if ds[f] == 'a':
> + if ds[f] == b'a':
> # removed + added -> normal
> ds.normallookup(f)
> - elif ds[f] != 'r':
> + elif ds[f] != b'r':
> ds.remove(f)
>
> # Merge old parent and old working dir copies
> @@ -441,22 +441,22 @@
> oldcopies[f] = src[0]
> oldcopies.update(copies)
> copies = dict((dst, oldcopies.get(src, src))
> - for dst, src in oldcopies.iteritems())
> + for dst, src in oldcopies.items())
> # Adjust the dirstate copies
> - for dst, src in copies.iteritems():
> - if (src not in ctx or dst in ctx or ds[dst] != 'a'):
> + for dst, src in copies.items():
> + if (src not in ctx or dst in ctx or ds[dst] != b'a'):
> src = None
> ds.copy(src, dst)
>
> @eh.command(
> - 'uncommit',
> - [('a', 'all', None, _('uncommit all changes when no arguments given')),
> - ('i', 'interactive', False, _('interactive mode to uncommit (EXPERIMENTAL)')),
> - ('r', 'rev', '', _('revert commit content to REV instead'), _('REV')),
> - ('', 'revert', False, _('discard working directory changes after uncommit')),
> - ('n', 'note', '', _('store a note on uncommit'), _('TEXT')),
> + b'uncommit',
> + [(b'a', b'all', None, _(b'uncommit all changes when no arguments given')),
> + (b'i', b'interactive', False, _(b'interactive mode to uncommit (EXPERIMENTAL)')),
> + (b'r', b'rev', b'', _(b'revert commit content to REV instead'), _(b'REV')),
> + (b'', b'revert', False, _(b'discard working directory changes after uncommit')),
> + (b'n', b'note', b'', _(b'store a note on uncommit'), _(b'TEXT')),
> ] + commands.walkopts + commitopts + commitopts2 + commitopts3,
> - _('[OPTION]... [NAME]'))
> + _(b'[OPTION]... [NAME]'))
> def uncommit(ui, repo, *pats, **opts):
> """move changes from parent revision to working directory
>
> @@ -482,34 +482,34 @@
>
> _checknotesize(ui, opts)
> _resolveoptions(ui, opts) # process commitopts3
> - interactive = opts.get('interactive')
> + interactive = opts.get(b'interactive')
> wlock = lock = tr = None
> try:
> wlock = repo.wlock()
> lock = repo.lock()
> wctx = repo[None]
> if len(wctx.parents()) <= 0:
> - raise error.Abort(_("cannot uncommit null changeset"))
> + raise error.Abort(_(b"cannot uncommit null changeset"))
> if len(wctx.parents()) > 1:
> - raise error.Abort(_("cannot uncommit while merging"))
> - old = repo['.']
> - rewriteutil.precheck(repo, [repo['.'].rev()], action='uncommit')
> + raise error.Abort(_(b"cannot uncommit while merging"))
> + old = repo[b'.']
> + rewriteutil.precheck(repo, [repo[b'.'].rev()], action=b'uncommit')
> if len(old.parents()) > 1:
> - raise error.Abort(_("cannot uncommit merge changeset"))
> + raise error.Abort(_(b"cannot uncommit merge changeset"))
> oldphase = old.phase()
>
> rev = None
> - if opts.get('rev'):
> - rev = scmutil.revsingle(repo, opts.get('rev'))
> + if opts.get(b'rev'):
> + rev = scmutil.revsingle(repo, opts.get(b'rev'))
> ctx = repo[None]
> if ctx.p1() == rev or ctx.p2() == rev:
> - raise error.Abort(_("cannot uncommit to parent changeset"))
> + raise error.Abort(_(b"cannot uncommit to parent changeset"))
>
> onahead = old.rev() in repo.changelog.headrevs()
> disallowunstable = not obsolete.isenabled(repo,
> obsolete.allowunstableopt)
> if disallowunstable and not onahead:
> - raise error.Abort(_("cannot uncommit in the middle of a stack"))
> + raise error.Abort(_(b"cannot uncommit in the middle of a stack"))
>
> match = scmutil.match(old, pats, opts)
>
> @@ -544,42 +544,42 @@
> % uipathfn(f), hint=hint)
>
> # Recommit the filtered changeset
> - tr = repo.transaction('uncommit')
> + tr = repo.transaction(b'uncommit')
> if interactive:
> - opts['all'] = True
> + opts[b'all'] = True
> newid = _interactiveuncommit(ui, repo, old, match)
> else:
> newid = None
> - includeorexclude = opts.get('include') or opts.get('exclude')
> - if (pats or includeorexclude or opts.get('all')):
> - if not (opts['message'] or opts['logfile']):
> - opts['message'] = old.description()
> + includeorexclude = opts.get(b'include') or opts.get(b'exclude')
> + if (pats or includeorexclude or opts.get(b'all')):
> + if not (opts[b'message'] or opts[b'logfile']):
> + opts[b'message'] = old.description()
> message = cmdutil.logmessage(ui, opts)
> newid = _commitfiltered(repo, old, match, target=rev,
> - message=message, user=opts.get('user'),
> - date=opts.get('date'))
> + message=message, user=opts.get(b'user'),
> + date=opts.get(b'date'))
> if newid is None:
> - raise error.Abort(_('nothing to uncommit'),
> - hint=_("use --all to uncommit all files"))
> + raise error.Abort(_(b'nothing to uncommit'),
> + hint=_(b"use --all to uncommit all files"))
>
> # metadata to be stored in obsmarker
> metadata = {}
> - if opts.get('note'):
> - metadata['note'] = opts['note']
> + if opts.get(b'note'):
> + metadata[b'note'] = opts[b'note']
>
> replacements = {old.node(): [newid]}
> - scmutil.cleanupnodes(repo, replacements, operation="uncommit",
> + scmutil.cleanupnodes(repo, replacements, operation=b"uncommit",
> metadata=metadata)
> phases.retractboundary(repo, tr, oldphase, [newid])
> - if opts.get('revert'):
> + if opts.get(b'revert'):
> hg.updaterepo(repo, newid, True)
> else:
> with repo.dirstate.parentchange():
> repo.dirstate.setparents(newid, node.nullid)
> _uncommitdirstate(repo, old, match, interactive)
> if not repo[newid].files():
> - ui.warn(_("new changeset is empty\n"))
> - ui.status(_("(use 'hg prune .' to remove it)\n"))
> + ui.warn(_(b"new changeset is empty\n"))
> + ui.status(_(b"(use 'hg prune .' to remove it)\n"))
> tr.close()
> finally:
> lockmod.release(tr, lock, wlock)
> @@ -606,7 +606,7 @@
> fp.seek(0)
> newnode = _patchtocommit(ui, repo, old, fp)
> # creating obs marker temp -> ()
> - obsolete.createmarkers(repo, [(repo[tempnode], ())], operation="uncommit")
> + obsolete.createmarkers(repo, [(repo[tempnode], ())], operation=b"uncommit")
> return newnode
>
> def _createtempcommit(ui, repo, old, match):
> @@ -628,20 +628,20 @@
> # to add uncommit as an operation taking care of BC.
> try:
> chunks, opts = cmdutil.recordfilter(repo.ui, originalchunks, match,
> - operation='discard')
> + operation=b'discard')
> except TypeError:
> # hg <= 4.9 (db72f9f6580e)
> chunks, opts = cmdutil.recordfilter(repo.ui, originalchunks,
> - operation='discard')
> + operation=b'discard')
> if not chunks:
> - raise error.Abort(_("nothing selected to uncommit"))
> + raise error.Abort(_(b"nothing selected to uncommit"))
> fp = stringio()
> for c in chunks:
> c.write(fp)
>
> fp.seek(0)
> oldnode = node.hex(old.node())[:12]
> - message = 'temporary commit for uncommiting %s' % oldnode
> + message = b'temporary commit for uncommiting %s' % oldnode
> tempnode = _patchtocommit(ui, repo, old, fp, message, oldnode)
> return tempnode
>
> @@ -658,14 +658,14 @@
> user = old.user()
> extra = old.extra()
> if extras:
> - extra['uncommit_source'] = extras
> + extra[b'uncommit_source'] = extras
> if not message:
> message = old.description()
> store = patch.filestore()
> try:
> files = set()
> try:
> - patch.patchrepo(ui, repo, pold, store, fp, 1, '',
> + patch.patchrepo(ui, repo, pold, store, fp, 1, b'',
> files=files, eolmode=None)
> except patch.PatchError as err:
> raise error.Abort(str(err))
> @@ -685,13 +685,13 @@
> return newcm
>
> @eh.command(
> - 'fold|squash',
> - [('r', 'rev', [], _("revision to fold"), _('REV')),
> - ('', 'exact', None, _("only fold specified revisions")),
> - ('', 'from', None, _("fold revisions linearly to working copy parent")),
> - ('n', 'note', '', _('store a note on fold'), _('TEXT')),
> - ] + commitopts + commitopts2 + commitopts3,
> - _('hg fold [OPTION]... [-r] REV'),
> + b'fold|squash',
> + [(b'r', b'rev', [], _(b"revision to fold"), _(b'REV')),
> + (b'', b'exact', None, _(b"only fold specified revisions")),
> + (b'', b'from', None, _(b"fold revisions linearly to working copy parent")),
> + (b'n', b'note', b'', _(b'store a note on fold'), _(b'TEXT')),
> + ] + commitopts + commitopts2 + commitopts3,
> + _(b'hg fold [OPTION]... [-r] REV'),
> helpbasic=True)
> def fold(ui, repo, *revs, **opts):
> """fold multiple revisions into a single one
> @@ -733,35 +733,35 @@
> _checknotesize(ui, opts)
> _resolveoptions(ui, opts)
> revs = list(revs)
> - revs.extend(opts['rev'])
> + revs.extend(opts[b'rev'])
> if not revs:
> - raise error.Abort(_('no revisions specified'))
> + raise error.Abort(_(b'no revisions specified'))
>
> revs = scmutil.revrange(repo, revs)
>
> - if opts['from'] and opts['exact']:
> - raise error.Abort(_('cannot use both --from and --exact'))
> - elif opts['from']:
> + if opts[b'from'] and opts[b'exact']:
> + raise error.Abort(_(b'cannot use both --from and --exact'))
> + elif opts[b'from']:
> # Try to extend given revision starting from the working directory
> - extrevs = repo.revs('(%ld::.) or (.::%ld)', revs, revs)
> + extrevs = repo.revs(b'(%ld::.) or (.::%ld)', revs, revs)
> discardedrevs = [r for r in revs if r not in extrevs]
> if discardedrevs:
> - msg = _("cannot fold non-linear revisions")
> - hint = _("given revisions are unrelated to parent of working"
> - " directory")
> + msg = _(b"cannot fold non-linear revisions")
> + hint = _(b"given revisions are unrelated to parent of working"
> + b" directory")
> raise error.Abort(msg, hint=hint)
> revs = extrevs
> - elif opts['exact']:
> + elif opts[b'exact']:
> # Nothing to do; "revs" is already set correctly
> pass
> else:
> - raise error.Abort(_('must specify either --from or --exact'))
> + raise error.Abort(_(b'must specify either --from or --exact'))
>
> if not revs:
> - raise error.Abort(_('specified revisions evaluate to an empty set'),
> - hint=_('use different revision arguments'))
> + raise error.Abort(_(b'specified revisions evaluate to an empty set'),
> + hint=_(b'use different revision arguments'))
> elif len(revs) == 1:
> - ui.write_err(_('single revision specified, nothing to fold\n'))
> + ui.write_err(_(b'single revision specified, nothing to fold\n'))
> return 1
>
> # Sort so combined commit message of `hg fold --exact -r . -r .^` is
> @@ -775,24 +775,24 @@
>
> root, head = rewriteutil.foldcheck(repo, revs)
>
> - tr = repo.transaction('fold')
> + tr = repo.transaction(b'fold')
> try:
> commitopts = opts.copy()
> allctx = [repo[r] for r in revs]
> targetphase = max(c.phase() for c in allctx)
>
> - if commitopts.get('message') or commitopts.get('logfile'):
> - commitopts['edit'] = False
> + if commitopts.get(b'message') or commitopts.get(b'logfile'):
> + commitopts[b'edit'] = False
> else:
> - msgs = ["HG: This is a fold of %d changesets." % len(allctx)]
> - msgs += ["HG: Commit message of changeset %s.\n\n%s\n" %
> + msgs = [b"HG: This is a fold of %d changesets." % len(allctx)]
> + msgs += [b"HG: Commit message of changeset %s.\n\n%s\n" %
> (c.rev(), c.description()) for c in allctx]
> - commitopts['message'] = "\n".join(msgs)
> - commitopts['edit'] = True
> + commitopts[b'message'] = b"\n".join(msgs)
> + commitopts[b'edit'] = True
>
> metadata = {}
> - if opts.get('note'):
> - metadata['note'] = opts['note']
> + if opts.get(b'note'):
> + metadata[b'note'] = opts[b'note']
>
> newid, unusedvariable = rewriteutil.rewrite(repo, root, allctx,
> head,
> @@ -801,24 +801,24 @@
> commitopts=commitopts)
> phases.retractboundary(repo, tr, targetphase, [newid])
> replacements = {ctx.node(): [newid] for ctx in allctx}
> - scmutil.cleanupnodes(repo, replacements, operation="fold",
> + scmutil.cleanupnodes(repo, replacements, operation=b"fold",
> metadata=metadata)
> tr.close()
> finally:
> tr.release()
> - ui.status('%i changesets folded\n' % len(revs))
> - if repo['.'].rev() in revs:
> + ui.status(b'%i changesets folded\n' % len(revs))
> + if repo[b'.'].rev() in revs:
> hg.update(repo, newid)
> finally:
> lockmod.release(lock, wlock)
>
> @eh.command(
> - 'metaedit',
> - [('r', 'rev', [], _("revision to edit"), _('REV')),
> - ('', 'fold', None, _("also fold specified revisions into one")),
> - ('n', 'note', '', _('store a note on metaedit'), _('TEXT')),
> - ] + commitopts + commitopts2 + commitopts3,
> - _('hg metaedit [OPTION]... [-r] [REV]'))
> + b'metaedit',
> + [(b'r', b'rev', [], _(b"revision to edit"), _(b'REV')),
> + (b'', b'fold', None, _(b"also fold specified revisions into one")),
> + (b'n', b'note', b'', _(b'store a note on metaedit'), _(b'TEXT')),
> + ] + commitopts + commitopts2 + commitopts3,
> + _(b'hg metaedit [OPTION]... [-r] [REV]'))
> def metaedit(ui, repo, *revs, **opts):
> """edit commit information
>
> @@ -851,15 +851,15 @@
> _checknotesize(ui, opts)
> _resolveoptions(ui, opts)
> revs = list(revs)
> - revs.extend(opts['rev'])
> + revs.extend(opts[b'rev'])
> if not revs:
> - if opts['fold']:
> - raise error.Abort(_('revisions must be specified with --fold'))
> - revs = ['.']
> + if opts[b'fold']:
> + raise error.Abort(_(b'revisions must be specified with --fold'))
> + revs = [b'.']
>
> with repo.wlock(), repo.lock():
> revs = scmutil.revrange(repo, revs)
> - if not opts['fold'] and len(revs) > 1:
> + if not opts[b'fold'] and len(revs) > 1:
> # TODO: handle multiple revisions. This is somewhat tricky because
> # if we want to edit a series of commits:
> #
> @@ -868,45 +868,45 @@
> # we need to rewrite a first, then directly rewrite b on top of the
> # new a, then rewrite c on top of the new b. So we need to handle
> # revisions in topological order.
> - raise error.Abort(_('editing multiple revisions without --fold is '
> - 'not currently supported'))
> + raise error.Abort(_(b'editing multiple revisions without --fold is '
> + b'not currently supported'))
>
> - if opts['fold']:
> + if opts[b'fold']:
> root, head = rewriteutil.foldcheck(repo, revs)
> else:
> - if repo.revs("%ld and public()", revs):
> - raise error.Abort(_('cannot edit commit information for public '
> - 'revisions'))
> + if repo.revs(b"%ld and public()", revs):
> + raise error.Abort(_(b'cannot edit commit information for public '
> + b'revisions'))
> newunstable = rewriteutil.disallowednewunstable(repo, revs)
> if newunstable:
> - msg = _('cannot edit commit information in the middle'
> - ' of a stack')
> - hint = _('%s will become unstable and new unstable changes'
> - ' are not allowed')
> + msg = _(b'cannot edit commit information in the middle'
> + b' of a stack')
> + hint = _(b'%s will become unstable and new unstable changes'
> + b' are not allowed')
> hint %= repo[newunstable.first()]
> raise error.Abort(msg, hint=hint)
> root = head = repo[revs.first()]
>
> wctx = repo[None]
> p1 = wctx.p1()
> - tr = repo.transaction('metaedit')
> + tr = repo.transaction(b'metaedit')
> newp1 = None
> try:
> commitopts = opts.copy()
> allctx = [repo[r] for r in revs]
> targetphase = max(c.phase() for c in allctx)
>
> - if commitopts.get('message') or commitopts.get('logfile'):
> - commitopts['edit'] = False
> + if commitopts.get(b'message') or commitopts.get(b'logfile'):
> + commitopts[b'edit'] = False
> else:
> - if opts['fold']:
> - msgs = ["HG: This is a fold of %d changesets." % len(allctx)]
> - msgs += ["HG: Commit message of changeset %s.\n\n%s\n" %
> + if opts[b'fold']:
> + msgs = [b"HG: This is a fold of %d changesets." % len(allctx)]
> + msgs += [b"HG: Commit message of changeset %s.\n\n%s\n" %
> (c.rev(), c.description()) for c in allctx]
> else:
> msgs = [head.description()]
> - commitopts['message'] = "\n".join(msgs)
> - commitopts['edit'] = True
> + commitopts[b'message'] = b"\n".join(msgs)
> + commitopts[b'edit'] = True
>
> # TODO: if the author and message are the same, don't create a new
> # hash. Right now we create a new hash because the date can be
> @@ -920,58 +920,58 @@
> newp1 = newid
> # metadata to be stored on obsmarker
> metadata = {}
> - if opts.get('note'):
> - metadata['note'] = opts['note']
> + if opts.get(b'note'):
> + metadata[b'note'] = opts[b'note']
>
> phases.retractboundary(repo, tr, targetphase, [newid])
> obsolete.createmarkers(repo, [(ctx, (repo[newid],))
> for ctx in allctx],
> - metadata=metadata, operation="metaedit")
> + metadata=metadata, operation=b"metaedit")
> else:
> - ui.status(_("nothing changed\n"))
> + ui.status(_(b"nothing changed\n"))
> tr.close()
> finally:
> tr.release()
>
> - if opts['fold']:
> - ui.status('%i changesets folded\n' % len(revs))
> + if opts[b'fold']:
> + ui.status(b'%i changesets folded\n' % len(revs))
> if newp1 is not None:
> hg.update(repo, newp1)
>
> metadataopts = [
> - ('d', 'date', '',
> - _('record the specified date in metadata'), _('DATE')),
> - ('u', 'user', '',
> - _('record the specified user in metadata'), _('USER')),
> + (b'd', b'date', b'',
> + _(b'record the specified date in metadata'), _(b'DATE')),
> + (b'u', b'user', b'',
> + _(b'record the specified user in metadata'), _(b'USER')),
> ]
>
> def _getmetadata(**opts):
> metadata = {}
> - date = opts.get('date')
> - user = opts.get('user')
> + date = opts.get(b'date')
> + user = opts.get(b'user')
> if date:
> - metadata['date'] = '%i %i' % compat.parsedate(date)
> + metadata[b'date'] = b'%i %i' % compat.parsedate(date)
> if user:
> - metadata['user'] = user
> + metadata[b'user'] = user
> return metadata
>
> @eh.command(
> - 'prune|obsolete',
> - [('n', 'new', [], _("successor changeset (DEPRECATED)")),
> - ('s', 'succ', [], _("successor changeset"), _('REV')),
> - ('r', 'rev', [], _("revisions to prune"), _('REV')),
> - ('k', 'keep', None, _("does not modify working copy during prune")),
> - ('n', 'note', '', _('store a note on prune'), _('TEXT')),
> - ('', 'pair', False, _("record a pairing, such as a rebase or divergence resolution "
> - "(pairing multiple precursors to multiple successors)")),
> - ('', 'biject', False, _("alias to --pair (DEPRECATED)")),
> - ('', 'fold', False,
> - _("record a fold (multiple precursors, one successors)")),
> - ('', 'split', False,
> - _("record a split (on precursor, multiple successors)")),
> - ('B', 'bookmark', [], _("remove revs only reachable from given"
> - " bookmark"), _('BOOKMARK'))] + metadataopts,
> - _('[OPTION] [-r] REV...'),
> + b'prune|obsolete',
> + [(b'n', b'new', [], _(b"successor changeset (DEPRECATED)")),
> + (b's', b'succ', [], _(b"successor changeset"), _(b'REV')),
> + (b'r', b'rev', [], _(b"revisions to prune"), _(b'REV')),
> + (b'k', b'keep', None, _(b"does not modify working copy during prune")),
> + (b'n', b'note', b'', _(b'store a note on prune'), _(b'TEXT')),
> + (b'', b'pair', False, _(b"record a pairing, such as a rebase or divergence resolution "
> + b"(pairing multiple precursors to multiple successors)")),
> + (b'', b'biject', False, _(b"alias to --pair (DEPRECATED)")),
> + (b'', b'fold', False,
> + _(b"record a fold (multiple precursors, one successors)")),
> + (b'', b'split', False,
> + _(b"record a split (on precursor, multiple successors)")),
> + (b'B', b'bookmark', [], _(b"remove revs only reachable from given"
> + b" bookmark"), _(b'BOOKMARK'))] + metadataopts,
> + _(b'[OPTION] [-r] REV...'),
> helpbasic=True)
> # XXX -U --noupdate option to prevent wc update and or bookmarks update ?
> def cmdprune(ui, repo, *revs, **opts):
> @@ -1000,17 +1000,17 @@
> else does edits history without obsolescence enabled.
> """
> _checknotesize(ui, opts)
> - revs = scmutil.revrange(repo, list(revs) + opts.get('rev'))
> - succs = opts['new'] + opts['succ']
> - bookmarks = set(opts.get('bookmark'))
> + revs = scmutil.revrange(repo, list(revs) + opts.get(b'rev'))
> + succs = opts[b'new'] + opts[b'succ']
> + bookmarks = set(opts.get(b'bookmark'))
> metadata = _getmetadata(**opts)
> - biject = opts.get('pair') or opts.get('biject')
> - fold = opts.get('fold')
> - split = opts.get('split')
> + biject = opts.get(b'pair') or opts.get(b'biject')
> + fold = opts.get(b'fold')
> + split = opts.get(b'split')
>
> - options = [o for o in ('pair', 'fold', 'split') if opts.get(o)]
> + options = [o for o in (b'pair', b'fold', b'split') if opts.get(o)]
> if 1 < len(options):
> - raise error.Abort(_("can only specify one of %s") % ', '.join(options))
> + raise error.Abort(_(b"can only specify one of %s") % b', '.join(options))
>
> if bookmarks:
> reachablefrombookmark = rewriteutil.reachablefrombookmark
> @@ -1020,14 +1020,14 @@
> rewriteutil.deletebookmark(repo, repomarks, bookmarks)
>
> if not revs:
> - raise error.Abort(_('nothing to prune'))
> + raise error.Abort(_(b'nothing to prune'))
>
> wlock = lock = tr = None
> try:
> wlock = repo.wlock()
> lock = repo.lock()
> - rewriteutil.precheck(repo, revs, 'prune')
> - tr = repo.transaction('prune')
> + rewriteutil.precheck(repo, revs, b'prune')
> + tr = repo.transaction(b'prune')
> # defines pruned changesets
> precs = []
> revs.sort()
> @@ -1035,33 +1035,33 @@
> cp = repo[p]
> precs.append(cp)
> if not precs:
> - raise error.Abort('nothing to prune')
> + raise error.Abort(b'nothing to prune')
>
> # defines successors changesets
> sucs = scmutil.revrange(repo, succs)
> sucs.sort()
> sucs = tuple(repo[n] for n in sucs)
> if not biject and len(sucs) > 1 and len(precs) > 1:
> - msg = "Can't use multiple successors for multiple precursors"
> - hint = _("use --pair to mark a series as a replacement"
> - " for another")
> + msg = b"Can't use multiple successors for multiple precursors"
> + hint = _(b"use --pair to mark a series as a replacement"
> + b" for another")
> raise error.Abort(msg, hint=hint)
> elif biject and len(sucs) != len(precs):
> - msg = "Can't use %d successors for %d precursors" \
> + msg = b"Can't use %d successors for %d precursors"\
> % (len(sucs), len(precs))
> raise error.Abort(msg)
> elif (len(precs) == 1 and len(sucs) > 1) and not split:
> - msg = "please add --split if you want to do a split"
> + msg = b"please add --split if you want to do a split"
> raise error.Abort(msg)
> elif len(sucs) == 1 and len(precs) > 1 and not fold:
> - msg = "please add --fold if you want to do a fold"
> + msg = b"please add --fold if you want to do a fold"
> raise error.Abort(msg)
> elif biject:
> replacements = {p.node(): [s.node()] for p, s in zip(precs, sucs)}
> else:
> replacements = {p.node(): [s.node() for s in sucs] for p in precs}
>
> - wdp = repo['.']
> + wdp = repo[b'.']
>
> if len(sucs) == 1 and len(precs) == 1 and wdp in precs:
> # '.' killed, so update to the successor
> @@ -1074,14 +1074,14 @@
> newnode = newnode.parents()[0]
>
> if newnode.node() != wdp.node():
> - if opts.get('keep', False):
> + if opts.get(b'keep', False):
> # This is largely the same as the implementation in
> # strip.stripcmd(). We might want to refactor this somewhere
> # common at some point.
>
> # only reset the dirstate for files that would actually change
> # between the working context and uctx
> - descendantrevs = repo.revs("%d::." % newnode.rev())
> + descendantrevs = repo.revs(b"%d::." % newnode.rev())
> changedfiles = []
> for rev in descendantrevs:
> # blindly reset the files, regardless of what actually
> @@ -1090,7 +1090,7 @@
>
> # reset files that only changed in the dirstate too
> dirstate = repo.dirstate
> - dirchanges = [f for f in dirstate if dirstate[f] != 'n']
> + dirchanges = [f for f in dirstate if dirstate[f] != b'n']
> changedfiles.extend(dirchanges)
> repo.dirstate.rebuild(newnode.node(), newnode.manifest(),
> changedfiles)
> @@ -1106,8 +1106,8 @@
> bmchanges = [(bookactive, newnode.node())]
> repo._bookmarks.applychanges(repo, tr, bmchanges)
> commands.update(ui, repo, newnode.hex())
> - ui.status(_('working directory is now at %s\n')
> - % ui.label(str(newnode), 'evolve.node'))
> + ui.status(_(b'working directory is now at %s\n')
> + % ui.label(str(newnode), b'evolve.node'))
> if movebookmark:
> bookmarksmod.activate(repo, bookactive)
>
> @@ -1116,12 +1116,12 @@
> rewriteutil.deletebookmark(repo, repomarks, bookmarks)
>
> # store note in metadata
> - if opts.get('note'):
> - metadata['note'] = opts['note']
> + if opts.get(b'note'):
> + metadata[b'note'] = opts[b'note']
>
> precrevs = (precursor.rev() for precursor in precs)
> moves = {}
> - for ctx in repo.unfiltered().set('bookmark() and %ld', precrevs):
> + for ctx in repo.unfiltered().set(b'bookmark() and %ld', precrevs):
> # used to be:
> #
> # ldest = list(repo.set('max((::%d) - obsolete())', ctx))
> @@ -1134,23 +1134,23 @@
> if not dest.obsolete() and dest.node() not in replacements:
> moves[ctx.node()] = dest.node()
> break
> - scmutil.cleanupnodes(repo, replacements, operation="prune", moves=moves,
> + scmutil.cleanupnodes(repo, replacements, operation=b"prune", moves=moves,
> metadata=metadata)
>
> # informs that changeset have been pruned
> - ui.status(_('%i changesets pruned\n') % len(precs))
> + ui.status(_(b'%i changesets pruned\n') % len(precs))
>
> tr.close()
> finally:
> lockmod.release(tr, lock, wlock)
>
> @eh.command(
> - 'split',
> - [('i', 'interactive', True, _('use interactive mode')),
> - ('r', 'rev', [], _("revision to split"), _('REV')),
> - ('n', 'note', '', _("store a note on split"), _('TEXT')),
> - ] + commitopts + commitopts2 + commitopts3,
> - _('hg split [OPTION] [-r REV] [FILES]'),
> + b'split',
> + [(b'i', b'interactive', True, _(b'use interactive mode')),
> + (b'r', b'rev', [], _(b"revision to split"), _(b'REV')),
> + (b'n', b'note', b'', _(b"store a note on split"), _(b'TEXT')),
> + ] + commitopts + commitopts2 + commitopts3,
> + _(b'hg split [OPTION] [-r REV] [FILES]'),
> helpbasic=True)
> def cmdsplit(ui, repo, *pats, **opts):
> """split a changeset into smaller changesets
> @@ -1168,17 +1168,17 @@
> _resolveoptions(ui, opts)
> tr = wlock = lock = None
> newcommits = []
> - iselect = opts.pop('interactive')
> + iselect = opts.pop(b'interactive')
>
> - revs = opts.get('rev') or '.'
> + revs = opts.get(b'rev') or b'.'
> if not revs:
> - revarg = '.'
> + revarg = b'.'
> elif len(revs) == 1:
> revarg = revs[0]
> else:
> # XXX --rev often accept multiple value, it seems safer to explicitly
> # complains here instead of just taking the last value.
> - raise error.Abort(_('more than one revset is given'))
> + raise error.Abort(_(b'more than one revset is given'))
>
> # Save the current branch to restore it in the end
> savedbranch = repo.dirstate.branch()
> @@ -1189,18 +1189,18 @@
> ctx = scmutil.revsingle(repo, revarg)
> rev = ctx.rev()
> cmdutil.bailifchanged(repo)
> - rewriteutil.precheck(repo, [rev], action='split')
> - tr = repo.transaction('split')
> + rewriteutil.precheck(repo, [rev], action=b'split')
> + tr = repo.transaction(b'split')
> # make sure we respect the phase while splitting
> - overrides = {('phases', 'new-commit'): ctx.phase()}
> + overrides = {(b'phases', b'new-commit'): ctx.phase()}
>
> if len(ctx.parents()) > 1:
> - raise error.Abort(_("cannot split merge commits"))
> + raise error.Abort(_(b"cannot split merge commits"))
> prev = ctx.p1()
> bmupdate = rewriteutil.bookmarksupdater(repo, ctx.node(), tr)
> bookactive = repo._activebookmark
> if bookactive is not None:
> - repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark)
> + repo.ui.status(_(b"(leaving bookmark %s)\n") % repo._activebookmark)
> bookmarksmod.deactivate(repo)
>
> # Prepare the working directory
> @@ -1209,13 +1209,13 @@
> def haschanges(matcher=None):
> modified, added, removed, deleted = repo.status(match=matcher)[:4]
> return modified or added or removed or deleted
> - msg = ("HG: This is the original pre-split commit message. "
> - "Edit it as appropriate.\n\n")
> + msg = (b"HG: This is the original pre-split commit message. "
> + b"Edit it as appropriate.\n\n")
> msg += ctx.description()
> - opts['message'] = msg
> - opts['edit'] = True
> - if not opts['user']:
> - opts['user'] = ctx.user()
> + opts[b'message'] = msg
> + opts[b'edit'] = True
> + if not opts[b'user']:
> + opts[b'user'] = ctx.user()
>
> # Set the right branch
> # XXX-TODO: Find a way to set the branch without altering the dirstate
> @@ -1231,21 +1231,21 @@
>
> if haschanges(matcher):
> if iselect:
> - with repo.ui.configoverride(overrides, 'split'):
> - cmdutil.dorecord(ui, repo, commands.commit, 'commit',
> + with repo.ui.configoverride(overrides, b'split'):
> + cmdutil.dorecord(ui, repo, commands.commit, b'commit',
> False, cmdutil.recordfilter, *pats,
> **opts)
> # TODO: Does no seem like the best way to do this
> # We should make dorecord return the newly created commit
> - newcommits.append(repo['.'])
> + newcommits.append(repo[b'.'])
> elif not pats:
> - msg = _("no files of directories specified")
> - hint = _("do you want --interactive")
> + msg = _(b"no files of directories specified")
> + hint = _(b"do you want --interactive")
> raise error.Abort(msg, hint=hint)
> else:
> - with repo.ui.configoverride(overrides, 'split'):
> + with repo.ui.configoverride(overrides, b'split'):
> commands.commit(ui, repo, *pats, **opts)
> - newcommits.append(repo['.'])
> + newcommits.append(repo[b'.'])
> if pats:
> # refresh the wctx used for the matcher
> matcher = scmutil.match(repo[None], pats)
> @@ -1255,20 +1255,20 @@
> if haschanges(matcher):
> nextaction = None
> while nextaction is None:
> - nextaction = ui.prompt('continue splitting? [Ycdq?]', default='y')
> - if nextaction == 'c':
> - with repo.ui.configoverride(overrides, 'split'):
> + nextaction = ui.prompt(b'continue splitting? [Ycdq?]', default=b'y')
> + if nextaction == b'c':
> + with repo.ui.configoverride(overrides, b'split'):
> commands.commit(ui, repo, **opts)
> - newcommits.append(repo['.'])
> + newcommits.append(repo[b'.'])
> break
> - elif nextaction == 'q':
> - raise error.Abort(_('user quit'))
> - elif nextaction == 'd':
> + elif nextaction == b'q':
> + raise error.Abort(_(b'user quit'))
> + elif nextaction == b'd':
> # TODO: We should offer a way for the user to confirm
> # what is the remaining changes, either via a separate
> # diff action or by showing the remaining and
> # prompting for confirmation
> - ui.status(_('discarding remaining changes\n'))
> + ui.status(_(b'discarding remaining changes\n'))
> target = newcommits[0]
> if pats:
> status = repo.status(match=matcher)[:4]
> @@ -1281,34 +1281,34 @@
> else:
> cmdutil.revert(ui, repo, repo[target],
> (target, node.nullid), all=True)
> - elif nextaction == '?':
> + elif nextaction == b'?':
> nextaction = None
> - ui.write(_("y - yes, continue selection\n"))
> - ui.write(_("c - commit, select all remaining changes\n"))
> - ui.write(_("d - discard, discard remaining changes\n"))
> - ui.write(_("q - quit, abort the split\n"))
> - ui.write(_("? - ?, display help\n"))
> + ui.write(_(b"y - yes, continue selection\n"))
> + ui.write(_(b"c - commit, select all remaining changes\n"))
> + ui.write(_(b"d - discard, discard remaining changes\n"))
> + ui.write(_(b"q - quit, abort the split\n"))
> + ui.write(_(b"? - ?, display help\n"))
> else:
> continue
> break # propagate the previous break
> else:
> - ui.status(_("no more change to split\n"))
> + ui.status(_(b"no more change to split\n"))
> if haschanges():
> # XXX: Should we show a message for informing the user
> # that we create another commit with remaining changes?
> - with repo.ui.configoverride(overrides, 'split'):
> + with repo.ui.configoverride(overrides, b'split'):
> commands.commit(ui, repo, **opts)
> - newcommits.append(repo['.'])
> + newcommits.append(repo[b'.'])
> if newcommits:
> tip = repo[newcommits[-1]]
> bmupdate(tip.node())
> if bookactive is not None:
> bookmarksmod.activate(repo, bookactive)
> metadata = {}
> - if opts.get('note'):
> - metadata['note'] = opts['note']
> + if opts.get(b'note'):
> + metadata[b'note'] = opts[b'note']
> obsolete.createmarkers(repo, [(repo[rev], newcommits)],
> - metadata=metadata, operation="split")
> + metadata=metadata, operation=b"split")
> tr.close()
> finally:
> # Restore the old branch
> @@ -1317,16 +1317,16 @@
> lockmod.release(tr, lock, wlock)
>
> @eh.command(
> - 'touch',
> - [('r', 'rev', [], _('revision to update'), _('REV')),
> - ('n', 'note', '', _('store a note on touch'), _('TEXT')),
> - ('D', 'duplicate', False,
> - 'do not mark the new revision as successor of the old one'),
> - ('A', 'allowdivergence', False,
> - 'mark the new revision as successor of the old one potentially creating '
> - 'divergence')],
> + b'touch',
> + [(b'r', b'rev', [], _(b'revision to update'), _(b'REV')),
> + (b'n', b'note', b'', _(b'store a note on touch'), _(b'TEXT')),
> + (b'D', b'duplicate', False,
> + b'do not mark the new revision as successor of the old one'),
> + (b'A', b'allowdivergence', False,
> + b'mark the new revision as successor of the old one potentially creating '
> + b'divergence')],
> # allow to choose the seed ?
> - _('[-r] revs'))
> + _(b'[-r] revs'))
> def touch(ui, repo, *revs, **opts):
> """create successors identical to their predecessors but the changeset ID
>
> @@ -1334,33 +1334,33 @@
> """
> _checknotesize(ui, opts)
> revs = list(revs)
> - revs.extend(opts['rev'])
> + revs.extend(opts[b'rev'])
> if not revs:
> - revs = ['.']
> + revs = [b'.']
> revs = scmutil.revrange(repo, revs)
> if not revs:
> - ui.write_err('no revision to touch\n')
> + ui.write_err(b'no revision to touch\n')
> return 1
>
> - duplicate = opts['duplicate']
> + duplicate = opts[b'duplicate']
> if not duplicate:
> - rewriteutil.precheck(repo, revs, 'touch')
> + rewriteutil.precheck(repo, revs, b'touch')
> tmpl = utility.shorttemplate
> - displayer = compat.changesetdisplayer(ui, repo, {'template': tmpl})
> + displayer = compat.changesetdisplayer(ui, repo, {b'template': tmpl})
> with repo.wlock(), repo.lock():
> - tr = repo.transaction('touch')
> + tr = repo.transaction(b'touch')
> with util.acceptintervention(tr):
> touchnodes(ui, repo, revs, displayer, **opts)
>
> def touchnodes(ui, repo, revs, displayer, **opts):
> - duplicate = opts['duplicate']
> - allowdivergence = opts['allowdivergence']
> + duplicate = opts[b'duplicate']
> + allowdivergence = opts[b'allowdivergence']
> revs.sort() # ensure parent are run first
> newmapping = {}
> for r in revs:
> ctx = repo[r]
> extra = ctx.extra().copy()
> - extra['__touch-noise__'] = random.randint(0, 0xffffffff)
> + extra[b'__touch-noise__'] = random.randint(0, 0xffffffff)
> # search for touched parent
> p1 = ctx.p1().node()
> p2 = ctx.p2().node()
> @@ -1381,17 +1381,17 @@
> else:
> displayer.show(ctx)
> index = ui.promptchoice(
> - _("reviving this changeset will create divergence"
> - " unless you make a duplicate.\n(a)llow divergence or"
> - " (d)uplicate the changeset? $$ &Allowdivergence $$ "
> - "&Duplicate"), 0)
> - choice = ['allowdivergence', 'duplicate'][index]
> - if choice == 'allowdivergence':
> + _(b"reviving this changeset will create divergence"
> + b" unless you make a duplicate.\n(a)llow divergence or"
> + b" (d)uplicate the changeset? $$ &Allowdivergence $$ "
> + b"&Duplicate"), 0)
> + choice = [b'allowdivergence', b'duplicate'][index]
> + if choice == b'allowdivergence':
> duplicate = False
> else:
> duplicate = True
>
> - extradict = {'extra': extra}
> + extradict = {b'extra': extra}
> new, unusedvariable = rewriteutil.rewrite(repo, ctx, [], ctx,
> [p1, p2],
> commitopts=extradict)
> @@ -1400,10 +1400,10 @@
>
> if not duplicate:
> metadata = {}
> - if opts.get('note'):
> - metadata['note'] = opts['note']
> + if opts.get(b'note'):
> + metadata[b'note'] = opts[b'note']
> obsolete.createmarkers(repo, [(ctx, (repo[new],))],
> - metadata=metadata, operation="touch")
> + metadata=metadata, operation=b"touch")
> tr = repo.currenttransaction()
> phases.retractboundary(repo, tr, ctx.phase(), [new])
> if ctx in repo[None].parents():
> @@ -1411,80 +1411,80 @@
> repo.dirstate.setparents(new, node.nullid)
>
> @eh.command(
> - 'pick|grab',
> - [('r', 'rev', '', _('revision to pick'), _('REV')),
> - ('c', 'continue', False, 'continue interrupted pick'),
> - ('a', 'abort', False, 'abort interrupted pick'),
> - ] + mergetoolopts,
> - _('[-r] rev'))
> + b'pick|grab',
> + [(b'r', b'rev', b'', _(b'revision to pick'), _(b'REV')),
> + (b'c', b'continue', False, b'continue interrupted pick'),
> + (b'a', b'abort', False, b'abort interrupted pick'),
> + ] + mergetoolopts,
> + _(b'[-r] rev'))
> def cmdpick(ui, repo, *revs, **opts):
> """move a commit on the top of working directory parent and updates to it."""
>
> - cont = opts.get('continue')
> - abort = opts.get('abort')
> + cont = opts.get(b'continue')
> + abort = opts.get(b'abort')
>
> if cont and abort:
> - raise error.Abort(_("cannot specify both --continue and --abort"))
> + raise error.Abort(_(b"cannot specify both --continue and --abort"))
>
> revs = list(revs)
> - if opts.get('rev'):
> - revs.append(opts['rev'])
> + if opts.get(b'rev'):
> + revs.append(opts[b'rev'])
>
> - overrides = {('ui', 'forcemerge'): opts.get('tool', '')}
> - with repo.wlock(), repo.lock(), repo.transaction('pick'), ui.configoverride(overrides, 'pick'):
> - pickstate = state.cmdstate(repo, path='pickstate')
> - pctx = repo['.']
> + overrides = {(b'ui', b'forcemerge'): opts.get(b'tool', b'')}
> + with repo.wlock(), repo.lock(), repo.transaction(b'pick'), ui.configoverride(overrides, b'pick'):
> + pickstate = state.cmdstate(repo, path=b'pickstate')
> + pctx = repo[b'.']
>
> if not cont and not abort:
> cmdutil.bailifchanged(repo)
> revs = scmutil.revrange(repo, revs)
> if len(revs) > 1:
> - raise error.Abort(_("specify just one revision"))
> + raise error.Abort(_(b"specify just one revision"))
> elif not revs:
> - raise error.Abort(_("empty revision set"))
> + raise error.Abort(_(b"empty revision set"))
>
> origctx = repo[revs.first()]
>
> if origctx in pctx.ancestors() or origctx.node() == pctx.node():
> - raise error.Abort(_("cannot pick an ancestor revision"))
> + raise error.Abort(_(b"cannot pick an ancestor revision"))
>
> - rewriteutil.precheck(repo, [origctx.rev()], 'pick')
> + rewriteutil.precheck(repo, [origctx.rev()], b'pick')
>
> - ui.status(_('picking %d:%s "%s"\n') %
> + ui.status(_(b'picking %d:%s "%s"\n') %
> (origctx.rev(), origctx,
> - origctx.description().split("\n", 1)[0]))
> - stats = merge.graft(repo, origctx, origctx.p1(), ['local',
> - 'destination'])
> + origctx.description().split(b"\n", 1)[0]))
> + stats = merge.graft(repo, origctx, origctx.p1(), [b'local',
> + b'destination'])
> if compat.hasconflict(stats):
> - pickstate.addopts({'orignode': origctx.node(),
> - 'oldpctx': pctx.node()})
> + pickstate.addopts({b'orignode': origctx.node(),
> + b'oldpctx': pctx.node()})
> pickstate.save()
> - raise error.InterventionRequired(_("unresolved merge conflicts"
> - " (see hg help resolve)"))
> + raise error.InterventionRequired(_(b"unresolved merge conflicts"
> + b" (see hg help resolve)"))
>
> elif abort:
> if not pickstate:
> - raise error.Abort(_("no interrupted pick state exists"))
> + raise error.Abort(_(b"no interrupted pick state exists"))
> pickstate.load()
> - pctxnode = pickstate['oldpctx']
> - ui.status(_("aborting pick, updating to %s\n") %
> + pctxnode = pickstate[b'oldpctx']
> + ui.status(_(b"aborting pick, updating to %s\n") %
> node.hex(pctxnode)[:12])
> hg.updaterepo(repo, pctxnode, True)
> return 0
>
> else:
> if revs:
> - raise error.Abort(_("cannot specify both --continue and "
> - "revision"))
> + raise error.Abort(_(b"cannot specify both --continue and "
> + b"revision"))
> if not pickstate:
> - raise error.Abort(_("no interrupted pick state exists"))
> + raise error.Abort(_(b"no interrupted pick state exists"))
>
> pickstate.load()
> - orignode = pickstate['orignode']
> + orignode = pickstate[b'orignode']
> origctx = repo[orignode]
>
> - overrides = {('phases', 'new-commit'): origctx.phase()}
> - with repo.ui.configoverride(overrides, 'pick'):
> + overrides = {(b'phases', b'new-commit'): origctx.phase()}
> + with repo.ui.configoverride(overrides, b'pick'):
> newnode = repo.commit(text=origctx.description(),
> user=origctx.user(),
> date=origctx.date(), extra=origctx.extra())
> @@ -1494,10 +1494,10 @@
> pickstate.delete()
> newctx = repo[newnode] if newnode else pctx
> replacements = {origctx.node(): [newctx.node()]}
> - scmutil.cleanupnodes(repo, replacements, operation="pick")
> + scmutil.cleanupnodes(repo, replacements, operation=b"pick")
>
> if newnode is None:
> - ui.warn(_("note: picking %d:%s created no changes to commit\n") %
> + ui.warn(_(b"note: picking %d:%s created no changes to commit\n") %
> (origctx.rev(), origctx))
> return 0
>
> diff --git a/hgext3rd/evolve/compat.py b/hgext3rd/evolve/compat.py
> --- a/hgext3rd/evolve/compat.py
> +++ b/hgext3rd/evolve/compat.py
> @@ -48,29 +48,29 @@
> # check
> if not obsutil:
> return False
> - return util.safehasattr(obsutil, 'obsfateprinter')
> + return util.safehasattr(obsutil, b'obsfateprinter')
>
> # Evolution renaming compat
>
> TROUBLES = {
> - 'ORPHAN': 'orphan',
> - 'CONTENTDIVERGENT': 'content-divergent',
> - 'PHASEDIVERGENT': 'phase-divergent',
> + b'ORPHAN': b'orphan',
> + b'CONTENTDIVERGENT': b'content-divergent',
> + b'PHASEDIVERGENT': b'phase-divergent',
> }
>
> -if util.safehasattr(uimod.ui, 'makeprogress'):
> - def progress(ui, topic, pos, item="", unit="", total=None):
> +if util.safehasattr(uimod.ui, b'makeprogress'):
> + def progress(ui, topic, pos, item=b"", unit=b"", total=None):
> progress = ui.makeprogress(topic, unit, total)
> if pos is not None:
> progress.update(pos, item=item)
> else:
> progress.complete()
> else:
> - def progress(ui, topic, pos, item="", unit="", total=None):
> + def progress(ui, topic, pos, item=b"", unit=b"", total=None):
> ui.progress(topic, pos, item, unit, total)
>
> # XXX: Better detection of property cache
> -if 'predecessors' not in dir(obsolete.obsstore):
> +if b'predecessors' not in dir(obsolete.obsstore):
> @property
> def predecessors(self):
> return self.precursors
> @@ -81,33 +81,33 @@
> # XXX Would it be better at the module level?
> varnames = context.memfilectx.__init__.__code__.co_varnames
>
> - if "copysource" in varnames:
> + if b"copysource" in varnames:
> mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(),
> - islink='l' in flags,
> - isexec='x' in flags,
> + islink=b'l' in flags,
> + isexec=b'x' in flags,
> copysource=copied.get(path))
> # compat with hg <- 4.9
> - elif varnames[2] == "changectx":
> + elif varnames[2] == b"changectx":
> mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(),
> - islink='l' in flags,
> - isexec='x' in flags,
> + islink=b'l' in flags,
> + isexec=b'x' in flags,
> copied=copied.get(path))
> else:
> mctx = context.memfilectx(repo, fctx.path(), fctx.data(),
> - islink='l' in flags,
> - isexec='x' in flags,
> + islink=b'l' in flags,
> + isexec=b'x' in flags,
> copied=copied.get(path))
> return mctx
>
> def strdiff(a, b, fn1, fn2):
> """ A version of mdiff.unidiff for comparing two strings
> """
> - args = [a, '', b, '', fn1, fn2]
> + args = [a, b'', b, b'', fn1, fn2]
>
> # hg < 4.6 compat 8b6dd3922f70
> argspec = inspect.getargspec(mdiff.unidiff)
>
> - if 'binary' in argspec.args:
> + if b'binary' in argspec.args:
> args.append(False)
>
> return mdiff.unidiff(*args)
> @@ -123,7 +123,7 @@
> makedate = mercurial.util.makedate
> parsedate = mercurial.util.parsedate
>
> -def wireprotocommand(exthelper, name, args='', permission='pull'):
> +def wireprotocommand(exthelper, name, args=b'', permission=b'pull'):
> try:
> # Since b4d85bc1
> from mercurial.wireprotov1server import wireprotocommand
> @@ -161,7 +161,7 @@
> return bool(upres[-1])
> return bool(upres.unresolvedcount)
>
> -hg48 = util.safehasattr(copies, 'stringutil')
> +hg48 = util.safehasattr(copies, b'stringutil')
> # code imported from Mercurial core at ae17555ef93f + patch
> def fixedcopytracing(repo, c1, c2, base):
> """A complete copy-patse of copies._fullcopytrace with a one line fix to
> @@ -185,7 +185,7 @@
> # an endpoint is "dirty" if it isn't a descendant of the merge base
> # if we have a dirty endpoint, we need to trigger graft logic, and also
> # keep track of which endpoint is dirty
> - if util.safehasattr(base, 'isancestorof'):
> + if util.safehasattr(base, b'isancestorof'):
> dirtyc1 = not base.isancestorof(_c1)
> dirtyc2 = not base.isancestorof(_c2)
> else: # hg <= 4.6
> @@ -204,7 +204,7 @@
> if limit is None:
> # no common ancestor, no copies
> return {}, {}, {}, {}, {}
> - repo.ui.debug(" searching for copies back to rev %d\n" % limit)
> + repo.ui.debug(b" searching for copies back to rev %d\n" % limit)
>
> m1 = c1.manifest()
> m2 = c2.manifest()
> @@ -218,18 +218,18 @@
> # - incompletediverge = record divergent partial copies here
> diverge = {} # divergence data is shared
> incompletediverge = {}
> - data1 = {'copy': {},
> - 'fullcopy': {},
> - 'incomplete': {},
> - 'diverge': diverge,
> - 'incompletediverge': incompletediverge,
> - }
> - data2 = {'copy': {},
> - 'fullcopy': {},
> - 'incomplete': {},
> - 'diverge': diverge,
> - 'incompletediverge': incompletediverge,
> - }
> + data1 = {b'copy': {},
> + b'fullcopy': {},
> + b'incomplete': {},
> + b'diverge': diverge,
> + b'incompletediverge': incompletediverge,
> + }
> + data2 = {b'copy': {},
> + b'fullcopy': {},
> + b'incomplete': {},
> + b'diverge': diverge,
> + b'incompletediverge': incompletediverge,
> + }
>
> # find interesting file sets from manifests
> if hg48:
> @@ -246,20 +246,20 @@
> else:
> # unmatched file from base (DAG rotation in the graft case)
> u1r, u2r = copies._computenonoverlap(repo, c1, c2, addedinm1, addedinm2,
> - baselabel='base')
> + baselabel=b'base')
> # unmatched file from topological common ancestors (no DAG rotation)
> # need to recompute this for directory move handling when grafting
> mta = tca.manifest()
> if hg48:
> m1f = m1.filesnotin(mta, repo.narrowmatch())
> m2f = m2.filesnotin(mta, repo.narrowmatch())
> - baselabel = 'topological common ancestor'
> + baselabel = b'topological common ancestor'
> u1u, u2u = copies._computenonoverlap(repo, c1, c2, m1f, m2f,
> baselabel=baselabel)
> else:
> u1u, u2u = copies._computenonoverlap(repo, c1, c2, m1.filesnotin(mta),
> m2.filesnotin(mta),
> - baselabel='topological common ancestor')
> + baselabel=b'topological common ancestor')
>
> for f in u1u:
> copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1)
> @@ -267,16 +267,16 @@
> for f in u2u:
> copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2)
>
> - copy = dict(data1['copy'])
> - copy.update(data2['copy'])
> - fullcopy = dict(data1['fullcopy'])
> - fullcopy.update(data2['fullcopy'])
> + copy = dict(data1[b'copy'])
> + copy.update(data2[b'copy'])
> + fullcopy = dict(data1[b'fullcopy'])
> + fullcopy.update(data2[b'fullcopy'])
>
> if dirtyc1:
> - copies._combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge,
> + copies._combinecopies(data2[b'incomplete'], data1[b'incomplete'], copy, diverge,
> incompletediverge)
> else:
> - copies._combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge,
> + copies._combinecopies(data1[b'incomplete'], data2[b'incomplete'], copy, diverge,
> incompletediverge)
>
> renamedelete = {}
> @@ -294,23 +294,23 @@
> divergeset.update(fl) # reverse map for below
>
> if bothnew:
> - repo.ui.debug(" unmatched files new in both:\n %s\n"
> - % "\n ".join(bothnew))
> + repo.ui.debug(b" unmatched files new in both:\n %s\n"
> + % b"\n ".join(bothnew))
> bothdiverge = {}
> bothincompletediverge = {}
> remainder = {}
> - both1 = {'copy': {},
> - 'fullcopy': {},
> - 'incomplete': {},
> - 'diverge': bothdiverge,
> - 'incompletediverge': bothincompletediverge
> - }
> - both2 = {'copy': {},
> - 'fullcopy': {},
> - 'incomplete': {},
> - 'diverge': bothdiverge,
> - 'incompletediverge': bothincompletediverge
> - }
> + both1 = {b'copy': {},
> + b'fullcopy': {},
> + b'incomplete': {},
> + b'diverge': bothdiverge,
> + b'incompletediverge': bothincompletediverge
> + }
> + both2 = {b'copy': {},
> + b'fullcopy': {},
> + b'incomplete': {},
> + b'diverge': bothdiverge,
> + b'incompletediverge': bothincompletediverge
> + }
> for f in bothnew:
> copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1)
> copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2)
> @@ -319,17 +319,17 @@
> pass
> elif dirtyc1:
> # incomplete copies may only be found on the "dirty" side for bothnew
> - assert not both2['incomplete']
> - remainder = copies._combinecopies({}, both1['incomplete'], copy, bothdiverge,
> + assert not both2[b'incomplete']
> + remainder = copies._combinecopies({}, both1[b'incomplete'], copy, bothdiverge,
> bothincompletediverge)
> elif dirtyc2:
> - assert not both1['incomplete']
> - remainder = copies._combinecopies({}, both2['incomplete'], copy, bothdiverge,
> + assert not both1[b'incomplete']
> + remainder = copies._combinecopies({}, both2[b'incomplete'], copy, bothdiverge,
> bothincompletediverge)
> else:
> # incomplete copies and divergences can't happen outside grafts
> - assert not both1['incomplete']
> - assert not both2['incomplete']
> + assert not both1[b'incomplete']
> + assert not both2[b'incomplete']
> assert not bothincompletediverge
> for f in remainder:
> assert f not in bothdiverge
> @@ -342,52 +342,52 @@
> copy[fl[0]] = of # not actually divergent, just matching renames
>
> if fullcopy and repo.ui.debugflag:
> - repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
> - "% = renamed and deleted):\n")
> + repo.ui.debug(b" all copies found (* = to merge, ! = divergent, "
> + b"% = renamed and deleted):\n")
> for f in sorted(fullcopy):
> - note = ""
> + note = b""
> if f in copy:
> - note += "*"
> + note += b"*"
> if f in divergeset:
> - note += "!"
> + note += b"!"
> if f in renamedeleteset:
> - note += "%"
> - repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
> - note))
> + note += b"%"
> + repo.ui.debug(b" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
> + note))
> del divergeset
>
> if not fullcopy:
> return copy, {}, diverge, renamedelete, {}
>
> - repo.ui.debug(" checking for directory renames\n")
> + repo.ui.debug(b" checking for directory renames\n")
>
> # generate a directory move map
> d1, d2 = c1.dirs(), c2.dirs()
> # Hack for adding '', which is not otherwise added, to d1 and d2
> - d1.addpath('/')
> - d2.addpath('/')
> + d1.addpath(b'/')
> + d2.addpath(b'/')
> invalid = set()
> dirmove = {}
>
> # examine each file copy for a potential directory move, which is
> # when all the files in a directory are moved to a new directory
> - for dst, src in fullcopy.iteritems():
> + for dst, src in fullcopy.items():
> dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
> if dsrc in invalid:
> # already seen to be uninteresting
> continue
> elif dsrc in d1 and ddst in d1:
> # directory wasn't entirely moved locally
> - invalid.add(dsrc + "/")
> + invalid.add(dsrc + b"/")
> elif dsrc in d2 and ddst in d2:
> # directory wasn't entirely moved remotely
> - invalid.add(dsrc + "/")
> - elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
> + invalid.add(dsrc + b"/")
> + elif dsrc + b"/" in dirmove and dirmove[dsrc + b"/"] != ddst + b"/":
> # files from the same directory moved to two different places
> - invalid.add(dsrc + "/")
> + invalid.add(dsrc + b"/")
> else:
> # looks good so far
> - dirmove[dsrc + "/"] = ddst + "/"
> + dirmove[dsrc + b"/"] = ddst + b"/"
>
> for i in invalid:
> if i in dirmove:
> @@ -398,7 +398,7 @@
> return copy, {}, diverge, renamedelete, {}
>
> for d in dirmove:
> - repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
> + repo.ui.debug(b" discovered dir src: '%s' -> dst: '%s'\n" %
> (d, dirmove[d]))
>
> movewithdir = {}
> @@ -411,18 +411,18 @@
> df = dirmove[d] + f[len(d):]
> if df not in copy:
> movewithdir[f] = df
> - repo.ui.debug((" pending file src: '%s' -> "
> - "dst: '%s'\n") % (f, df))
> + repo.ui.debug((b" pending file src: '%s' -> "
> + b"dst: '%s'\n") % (f, df))
> break
>
> return copy, movewithdir, diverge, renamedelete, dirmove
>
> # hg <= 4.9 compat (7694b685bb10)
> -fixupstreamed = util.safehasattr(scmutil, 'movedirstate')
> +fixupstreamed = util.safehasattr(scmutil, b'movedirstate')
> if not fixupstreamed:
> copies._fullcopytracing = fixedcopytracing
>
> -if not util.safehasattr(obsutil, "_succs"):
> +if not util.safehasattr(obsutil, b"_succs"):
> class _succs(list):
> """small class to represent a successors with some metadata about it"""
>
> @@ -450,12 +450,12 @@
> _succs instance
> """
>
> - if not util.safehasattr(succs, "markers"):
> + if not util.safehasattr(succs, b"markers"):
> return _succs(succs)
> else:
> return succs
>
> -if not util.safehasattr(obsutil, "markersdates"):
> +if not util.safehasattr(obsutil, b"markersdates"):
> MARKERS_DATE_COMPAT = True
> else:
> MARKERS_DATE_COMPAT = False
> @@ -468,7 +468,7 @@
>
> return [m[4] for m in markers]
>
> -if not util.safehasattr(obsutil, "markersusers"):
> +if not util.safehasattr(obsutil, b"markersusers"):
> MARKERS_USERS_COMPAT = True
> else:
> MARKERS_USERS_COMPAT = False
> @@ -480,12 +480,12 @@
> return obsutil.markersusers(markers)
>
> markersmeta = [dict(m[3]) for m in markers]
> - users = set(encoding.tolocal(meta['user']) for meta in markersmeta
> - if meta.get('user'))
> + users = set(encoding.tolocal(meta[b'user']) for meta in markersmeta
> + if meta.get(b'user'))
>
> return sorted(users)
>
> -if not util.safehasattr(obsutil, "markersoperations"):
> +if not util.safehasattr(obsutil, b"markersoperations"):
> MARKERS_OPERATIONS_COMPAT = True
> else:
> MARKERS_OPERATIONS_COMPAT = False
> @@ -497,7 +497,7 @@
> return obsutil.markersoperations(markers)
>
> markersmeta = [dict(m[3]) for m in markers]
> - operations = set(meta.get('operation') for meta in markersmeta
> - if meta.get('operation'))
> + operations = set(meta.get(b'operation') for meta in markersmeta
> + if meta.get(b'operation'))
>
> return sorted(operations)
> diff --git a/hgext3rd/evolve/dagutil.py b/hgext3rd/evolve/dagutil.py
> --- a/hgext3rd/evolve/dagutil.py
> +++ b/hgext3rd/evolve/dagutil.py
> @@ -140,7 +140,7 @@
> def _internalize(self, id):
> ix = self._revlog.rev(id)
> if ix == nullrev:
> - raise LookupError(id, self._revlog.indexfile, _('nullid'))
> + raise LookupError(id, self._revlog.indexfile, _(b'nullid'))
> return ix
>
> def _internalizeall(self, ids, filterunknown):
> diff --git a/hgext3rd/evolve/debugcmd.py b/hgext3rd/evolve/debugcmd.py
> --- a/hgext3rd/evolve/debugcmd.py
> +++ b/hgext3rd/evolve/debugcmd.py
> @@ -21,7 +21,7 @@
>
> eh = exthelper.exthelper()
>
> - at eh.command('debugobsstorestat', [], '')
> + at eh.command(b'debugobsstorestat', [], b'')
> def cmddebugobsstorestat(ui, repo):
> """print statistics about obsolescence markers in the repo"""
> def _updateclustermap(nodes, mark, clustersmap):
> @@ -45,7 +45,7 @@
> unfi = repo.unfiltered()
> nm = unfi.changelog.nodemap
> nbmarkers = len(store._all)
> - ui.write(_('markers total: %9i\n') % nbmarkers)
> + ui.write(_(b'markers total: %9i\n') % nbmarkers)
> sucscount = [0, 0, 0, 0]
> known = 0
> parentsdata = 0
> @@ -67,7 +67,7 @@
> metakeys.setdefault(key, 0)
> metakeys[key] += 1
> meta = dict(meta)
> - parents = [meta.get('p1'), meta.get('p2')]
> + parents = [meta.get(b'p1'), meta.get(b'p2')]
> parents = [node.bin(p) for p in parents if p is not None]
> if parents:
> parentsdata += 1
> @@ -91,71 +91,71 @@
> fc = (frozenset(c[0]), frozenset(c[1]))
> for n in fc[0]:
> pclustersmap[n] = fc
> - numobs = len(unfi.revs('obsolete()'))
> + numobs = len(unfi.revs(b'obsolete()'))
> numtotal = len(unfi)
> - ui.write((' for known precursors: %9i' % known))
> - ui.write((' (%i/%i obsolete changesets)\n' % (numobs, numtotal)))
> - ui.write((' with parents data: %9i\n' % parentsdata))
> + ui.write((b' for known precursors: %9i' % known))
> + ui.write((b' (%i/%i obsolete changesets)\n' % (numobs, numtotal)))
> + ui.write((b' with parents data: %9i\n' % parentsdata))
> # successors data
> - ui.write(('markers with no successors: %9i\n' % sucscount[0]))
> - ui.write((' 1 successors: %9i\n' % sucscount[1]))
> - ui.write((' 2 successors: %9i\n' % sucscount[2]))
> - ui.write((' more than 2 successors: %9i\n' % sucscount[3]))
> + ui.write((b'markers with no successors: %9i\n' % sucscount[0]))
> + ui.write((b' 1 successors: %9i\n' % sucscount[1]))
> + ui.write((b' 2 successors: %9i\n' % sucscount[2]))
> + ui.write((b' more than 2 successors: %9i\n' % sucscount[3]))
> # meta data info
> - ui.write((' available keys:\n'))
> + ui.write((b' available keys:\n'))
> for key in sorted(metakeys):
> - ui.write((' %15s: %9i\n' % (key, metakeys[key])))
> + ui.write((b' %15s: %9i\n' % (key, metakeys[key])))
>
> size_v0.sort()
> size_v1.sort()
> if size_v0:
> - ui.write('marker size:\n')
> + ui.write(b'marker size:\n')
> # format v1
> - ui.write(' format v1:\n')
> - ui.write((' smallest length: %9i\n' % size_v1[0]))
> - ui.write((' longer length: %9i\n' % size_v1[-1]))
> + ui.write(b' format v1:\n')
> + ui.write((b' smallest length: %9i\n' % size_v1[0]))
> + ui.write((b' longer length: %9i\n' % size_v1[-1]))
> median = size_v1[nbmarkers // 2]
> - ui.write((' median length: %9i\n' % median))
> + ui.write((b' median length: %9i\n' % median))
> mean = sum(size_v1) // nbmarkers
> - ui.write((' mean length: %9i\n' % mean))
> + ui.write((b' mean length: %9i\n' % mean))
> # format v0
> - ui.write(' format v0:\n')
> - ui.write((' smallest length: %9i\n' % size_v0[0]))
> - ui.write((' longer length: %9i\n' % size_v0[-1]))
> + ui.write(b' format v0:\n')
> + ui.write((b' smallest length: %9i\n' % size_v0[0]))
> + ui.write((b' longer length: %9i\n' % size_v0[-1]))
> median = size_v0[nbmarkers // 2]
> - ui.write((' median length: %9i\n' % median))
> + ui.write((b' median length: %9i\n' % median))
> mean = sum(size_v0) // nbmarkers
> - ui.write((' mean length: %9i\n' % mean))
> + ui.write((b' mean length: %9i\n' % mean))
>
> allclusters = list(set(clustersmap.values()))
> allclusters.sort(key=lambda x: len(x[1]))
> - ui.write(('disconnected clusters: %9i\n' % len(allclusters)))
> + ui.write((b'disconnected clusters: %9i\n' % len(allclusters)))
>
> - ui.write(' any known node: %9i\n'
> + ui.write(b' any known node: %9i\n'
> % len([c for c in allclusters
> if [n for n in c[0] if nm.get(n) is not None]]))
> if allclusters:
> nbcluster = len(allclusters)
> - ui.write((' smallest length: %9i\n' % len(allclusters[0][1])))
> - ui.write((' longer length: %9i\n'
> - % len(allclusters[-1][1])))
> + ui.write((b' smallest length: %9i\n' % len(allclusters[0][1])))
> + ui.write((b' longer length: %9i\n'
> + % len(allclusters[-1][1])))
> median = len(allclusters[nbcluster // 2][1])
> - ui.write((' median length: %9i\n' % median))
> + ui.write((b' median length: %9i\n' % median))
> mean = sum(len(x[1]) for x in allclusters) // nbcluster
> - ui.write((' mean length: %9i\n' % mean))
> + ui.write((b' mean length: %9i\n' % mean))
> allpclusters = list(set(pclustersmap.values()))
> allpclusters.sort(key=lambda x: len(x[1]))
> - ui.write((' using parents data: %9i\n' % len(allpclusters)))
> - ui.write(' any known node: %9i\n'
> + ui.write((b' using parents data: %9i\n' % len(allpclusters)))
> + ui.write(b' any known node: %9i\n'
> % len([c for c in allclusters
> if [n for n in c[0] if nm.get(n) is not None]]))
> if allpclusters:
> nbcluster = len(allpclusters)
> - ui.write((' smallest length: %9i\n'
> - % len(allpclusters[0][1])))
> - ui.write((' longer length: %9i\n'
> - % len(allpclusters[-1][1])))
> + ui.write((b' smallest length: %9i\n'
> + % len(allpclusters[0][1])))
> + ui.write((b' longer length: %9i\n'
> + % len(allpclusters[-1][1])))
> median = len(allpclusters[nbcluster // 2][1])
> - ui.write((' median length: %9i\n' % median))
> + ui.write((b' median length: %9i\n' % median))
> mean = sum(len(x[1]) for x in allpclusters) // nbcluster
> - ui.write((' mean length: %9i\n' % mean))
> + ui.write((b' mean length: %9i\n' % mean))
> diff --git a/hgext3rd/evolve/depthcache.py b/hgext3rd/evolve/depthcache.py
> --- a/hgext3rd/evolve/depthcache.py
> +++ b/hgext3rd/evolve/depthcache.py
> @@ -32,39 +32,39 @@
>
> def simpledepth(repo, rev):
> """simple but obviously right implementation of depth"""
> - return len(repo.revs('::%d', rev))
> + return len(repo.revs(b'::%d', rev))
>
> @eh.command(
> - 'debugdepth',
> + b'debugdepth',
> [
> - ('r', 'rev', [], 'revs to print depth for'),
> - ('', 'method', 'cached', "one of 'simple', 'cached', 'compare'"),
> - ],
> - _('REVS'))
> + (b'r', b'rev', [], b'revs to print depth for'),
> + (b'', b'method', b'cached', b"one of 'simple', 'cached', 'compare'"),
> + ],
> + _(b'REVS'))
> def debugdepth(ui, repo, **opts):
> """display depth of REVS
> """
> - revs = scmutil.revrange(repo, opts['rev'])
> - method = opts['method']
> - if method in ('cached', 'compare'):
> + revs = scmutil.revrange(repo, opts[b'rev'])
> + method = opts[b'method']
> + if method in (b'cached', b'compare'):
> cache = repo.depthcache
> cache.save(repo)
> for r in revs:
> ctx = repo[r]
> - if method == 'simple':
> + if method == b'simple':
> depth = simpledepth(repo, r)
> - elif method == 'cached':
> + elif method == b'cached':
> depth = cache.get(r)
> - elif method == 'compare':
> + elif method == b'compare':
> simple = simpledepth(repo, r)
> cached = cache.get(r)
> if simple != cached:
> - raise error.Abort('depth differ for revision %s: %d != %d'
> + raise error.Abort(b'depth differ for revision %s: %d != %d'
> % (ctx, simple, cached))
> depth = simple
> else:
> - raise error.Abort('unknown method "%s"' % method)
> - ui.write('%s %d\n' % (ctx, depth))
> + raise error.Abort(b'unknown method "%s"' % method)
> + ui.write(b'%s %d\n' % (ctx, depth))
>
> @eh.reposetup
> def setupcache(ui, repo):
> @@ -79,7 +79,7 @@
>
> @localrepo.unfilteredmethod
> def destroyed(self):
> - if 'depthcache' in vars(self):
> + if b'depthcache' in vars(self):
> self.depthcache.clear()
> super(depthcacherepo, self).destroyed()
>
> @@ -94,16 +94,16 @@
>
> class depthcache(genericcaches.changelogsourcebase):
>
> - _filepath = 'evoext-depthcache-00'
> - _cachename = 'evo-ext-depthcache'
> + _filepath = b'evoext-depthcache-00'
> + _cachename = b'evo-ext-depthcache'
>
> def __init__(self):
> super(depthcache, self).__init__()
> - self._data = array.array('l')
> + self._data = array.array(b'l')
>
> def get(self, rev):
> if len(self._data) <= rev:
> - raise error.ProgrammingError('depthcache must be warmed before use')
> + raise error.ProgrammingError(b'depthcache must be warmed before use')
> return self._data[rev]
>
> def _updatefrom(self, repo, data):
> @@ -113,15 +113,15 @@
> total = len(data)
>
> def progress(pos, rev):
> - compat.progress(repo.ui, 'updating depth cache',
> - pos, 'rev %s' % rev, unit='revision', total=total)
> - progress(0, '')
> + compat.progress(repo.ui, b'updating depth cache',
> + pos, b'rev %s' % rev, unit=b'revision', total=total)
> + progress(0, b'')
> for idx, rev in enumerate(data, 1):
> assert rev == len(self._data), (rev, len(self._data))
> self._data.append(self._depth(cl, rev))
> if not (idx % 10000): # progress as a too high performance impact
> progress(idx, rev)
> - progress(None, '')
> + progress(None, b'')
>
> def _depth(self, changelog, rev):
> cl = changelog
> @@ -170,7 +170,7 @@
> Subclasses MUST overide this method to actually affect the cache data.
> """
> super(depthcache, self).clear()
> - self._data = array.array('l')
> + self._data = array.array(b'l')
>
> # crude version of a cache, to show the kind of information we have to store
>
> @@ -179,7 +179,7 @@
> assert repo.filtername is None
>
> data = repo.cachevfs.tryread(self._filepath)
> - self._data = array.array('l')
> + self._data = array.array(b'l')
> if not data:
> self._cachekey = self.emptykey
> else:
> @@ -198,12 +198,12 @@
> return
>
> try:
> - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True)
> + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True)
> headerdata = self._serializecachekey()
> cachefile.write(headerdata)
> cachefile.write(self._data.tostring())
> cachefile.close()
> self._ondiskkey = self._cachekey
> except (IOError, OSError) as exc:
> - repo.ui.log('depthcache', 'could not write update %s\n' % exc)
> - repo.ui.debug('depthcache: could not write update %s\n' % exc)
> + repo.ui.log(b'depthcache', b'could not write update %s\n' % exc)
> + repo.ui.debug(b'depthcache: could not write update %s\n' % exc)
> diff --git a/hgext3rd/evolve/evolvecmd.py b/hgext3rd/evolve/evolvecmd.py
> --- a/hgext3rd/evolve/evolvecmd.py
> +++ b/hgext3rd/evolve/evolvecmd.py
> @@ -52,7 +52,7 @@
> eh = exthelper.exthelper()
> mergetoolopts = commands.mergetoolopts
>
> -abortmessage = _("see `hg help evolve.interrupted`\n")
> +abortmessage = _(b"see `hg help evolve.interrupted`\n")
>
> def _solveone(ui, repo, ctx, evolvestate, dryrun, confirm,
> progresscb, category, lastsolved=None, stacktmplt=False):
> @@ -69,23 +69,23 @@
> displayer = None
> if stacktmplt:
> displayer = compat.changesetdisplayer(ui, repo,
> - {'template': stacktemplate})
> + {b'template': stacktemplate})
> else:
> displayer = compat.changesetdisplayer(ui, repo,
> - {'template': shorttemplate})
> - if 'orphan' == category:
> + {b'template': shorttemplate})
> + if b'orphan' == category:
> result = _solveunstable(ui, repo, ctx, evolvestate, displayer,
> dryrun, confirm, progresscb,
> lastsolved=lastsolved)
> - elif 'phasedivergent' == category:
> + elif b'phasedivergent' == category:
> result = _solvephasedivergence(ui, repo, ctx, evolvestate,
> displayer, dryrun, confirm,
> progresscb)
> - elif 'contentdivergent' == category:
> + elif b'contentdivergent' == category:
> result = _solvedivergent(ui, repo, ctx, evolvestate, displayer,
> dryrun, confirm, progresscb)
> else:
> - assert False, "unknown trouble category: %s" % (category)
> + assert False, b"unknown trouble category: %s" % (category)
> return result
>
> def _solveunstable(ui, repo, orig, evolvestate, displayer, dryrun=False,
> @@ -111,7 +111,7 @@
> else:
> # store that we are resolving an orphan merge with both parents
> # obsolete and proceed with first parent
> - evolvestate['orphanmerge'] = True
> + evolvestate[b'orphanmerge'] = True
> # we should process the second parent first, so that in case of
> # no-conflicts the first parent is processed later and preserved as
> # first parent
> @@ -119,40 +119,40 @@
> keepbranch = orig.p2().branch() != orig.branch()
>
> if not pctx.obsolete():
> - ui.warn(_("cannot solve instability of %s, skipping\n") % orig)
> - return (False, ".")
> + ui.warn(_(b"cannot solve instability of %s, skipping\n") % orig)
> + return (False, b".")
> obs = pctx
> newer = obsutil.successorssets(repo, obs.node())
> # search of a parent which is not killed
> while not newer or newer == [()]:
> - ui.debug("stabilize target %s is plain dead,"
> - " trying to stabilize on its parent\n" %
> + ui.debug(b"stabilize target %s is plain dead,"
> + b" trying to stabilize on its parent\n" %
> obs)
> obs = obs.parents()[0]
> newer = obsutil.successorssets(repo, obs.node())
> if len(newer) > 1:
> - msg = _("skipping %s: divergent rewriting. can't choose "
> - "destination\n") % obs
> + msg = _(b"skipping %s: divergent rewriting. can't choose "
> + b"destination\n") % obs
> ui.write_err(msg)
> - return (False, ".")
> + return (False, b".")
> targets = newer[0]
> assert targets
> if len(targets) > 1:
> # split target, figure out which one to pick, are they all in line?
> targetrevs = [repo[r].rev() for r in targets]
> - roots = repo.revs('roots(%ld)', targetrevs)
> - heads = repo.revs('heads(%ld)', targetrevs)
> + roots = repo.revs(b'roots(%ld)', targetrevs)
> + heads = repo.revs(b'heads(%ld)', targetrevs)
> if len(roots) > 1 or len(heads) > 1:
> - cheader = _("ancestor '%s' split over multiple topological"
> - " branches.\nchoose an evolve destination:") % orig
> + cheader = _(b"ancestor '%s' split over multiple topological"
> + b" branches.\nchoose an evolve destination:") % orig
> selectedrev = utility.revselectionprompt(ui, repo, list(heads),
> cheader)
> if selectedrev is None:
> - msg = _("could not solve instability, "
> - "ambiguous destination: "
> - "parent split across two branches\n")
> + msg = _(b"could not solve instability, "
> + b"ambiguous destination: "
> + b"parent split across two branches\n")
> ui.write_err(msg)
> - return (False, ".")
> + return (False, b".")
> target = repo[selectedrev]
> else:
> target = repo[heads.first()]
> @@ -160,29 +160,29 @@
> target = targets[0]
> target = repo[target]
> if not ui.quiet or confirm:
> - repo.ui.write(_('move:'), label='evolve.operation')
> + repo.ui.write(_(b'move:'), label=b'evolve.operation')
> displayer.show(orig)
> if lastsolved is None or target != repo[lastsolved]:
> - repo.ui.write(_('atop:'))
> + repo.ui.write(_(b'atop:'))
> displayer.show(target)
> - if confirm and ui.prompt('perform evolve? [Ny]', 'n') != 'y':
> - raise error.Abort(_('evolve aborted by user'))
> - todo = 'hg rebase -r %s -d %s\n' % (orig, target)
> + if confirm and ui.prompt(b'perform evolve? [Ny]', b'n') != b'y':
> + raise error.Abort(_(b'evolve aborted by user'))
> + todo = b'hg rebase -r %s -d %s\n' % (orig, target)
> if dryrun:
> if progresscb:
> progresscb()
> repo.ui.write(todo)
> - return (False, ".")
> + return (False, b".")
> else:
> repo.ui.note(todo)
> if progresscb:
> progresscb()
> try:
> newid = relocate(repo, orig, target, evolvestate, pctx,
> - keepbranch, 'orphan')
> + keepbranch, b'orphan')
> return (True, newid)
> except error.InterventionRequired:
> - ops = {'current': orig.node()}
> + ops = {b'current': orig.node()}
> evolvestate.addopts(ops)
> evolvestate.save()
> raise
> @@ -201,32 +201,32 @@
> bumped = repo[bumped.rev()]
> # For now we deny bumped merge
> if len(bumped.parents()) > 1:
> - msg = _('skipping %s : we do not handle merge yet\n') % bumped
> + msg = _(b'skipping %s : we do not handle merge yet\n') % bumped
> ui.write_err(msg)
> - return (False, ".")
> - prec = repo.set('last(allpredecessors(%d) and public())', bumped.rev()).next()
> + return (False, b".")
> + prec = repo.set(b'last(allpredecessors(%d) and public())', bumped.rev()).next()
> # For now we deny target merge
> if len(prec.parents()) > 1:
> - msg = _('skipping: %s: public version is a merge, '
> - 'this is not handled yet\n') % prec
> + msg = _(b'skipping: %s: public version is a merge, '
> + b'this is not handled yet\n') % prec
> ui.write_err(msg)
> - return (False, ".")
> + return (False, b".")
>
> if not ui.quiet or confirm:
> - repo.ui.write(_('recreate:'), label='evolve.operation')
> + repo.ui.write(_(b'recreate:'), label=b'evolve.operation')
> displayer.show(bumped)
> - repo.ui.write(_('atop:'))
> + repo.ui.write(_(b'atop:'))
> displayer.show(prec)
> - if confirm and ui.prompt('perform evolve? [Ny]', 'n') != 'y':
> - raise error.Abort(_('evolve aborted by user'))
> + if confirm and ui.prompt(b'perform evolve? [Ny]', b'n') != b'y':
> + raise error.Abort(_(b'evolve aborted by user'))
> if dryrun:
> - todo = 'hg rebase --rev %s --dest %s;\n' % (bumped, prec.p1())
> + todo = b'hg rebase --rev %s --dest %s;\n' % (bumped, prec.p1())
> repo.ui.write(todo)
> - repo.ui.write(('hg update %s;\n' % prec))
> - repo.ui.write(('hg revert --all --rev %s;\n' % bumped))
> - repo.ui.write(('hg commit --msg "%s update to %s"\n' %
> - (TROUBLES['PHASEDIVERGENT'], bumped)))
> - return (False, ".")
> + repo.ui.write((b'hg update %s;\n' % prec))
> + repo.ui.write((b'hg revert --all --rev %s;\n' % bumped))
> + repo.ui.write((b'hg commit --msg "%s update to %s"\n' %
> + (TROUBLES[b'PHASEDIVERGENT'], bumped)))
> + return (False, b".")
> if progresscb:
> progresscb()
>
> @@ -236,22 +236,22 @@
> # evolved or any other operation which can change parent. In such cases,
> # when parents are not same, we first rebase the divergent changeset onto
> # parent or precursor and then perform later steps
> - if not list(repo.set('parents(%d) and parents(%d)', bumped.rev(), prec.rev())):
> + if not list(repo.set(b'parents(%d) and parents(%d)', bumped.rev(), prec.rev())):
> # Need to rebase the changeset at the right place
> repo.ui.status(
> - _('rebasing to destination parent: %s\n') % prec.p1())
> + _(b'rebasing to destination parent: %s\n') % prec.p1())
> try:
> newnode = relocate(repo, bumped, prec.p1(), evolvestate,
> - category='phasedivergent')
> + category=b'phasedivergent')
> if newnode is not None:
> new = repo[newnode]
> obsolete.createmarkers(repo, [(bumped, (new,))],
> - operation='evolve')
> + operation=b'evolve')
> bumped = new
> - evolvestate['temprevs'].append(newnode)
> + evolvestate[b'temprevs'].append(newnode)
> except error.InterventionRequired:
> - evolvestate['current'] = bumped.hex()
> - evolvestate['precursor'] = prec.hex()
> + evolvestate[b'current'] = bumped.hex()
> + evolvestate[b'precursor'] = prec.hex()
> evolvestate.save()
> raise
>
> @@ -283,7 +283,7 @@
> merge.update(repo, bumped.node(), ancestor=prec, mergeancestor=True,
> branchmerge=True, force=False, wc=wctx)
> if not wctx.isempty():
> - text = '%s update to %s:\n\n' % (TROUBLES['PHASEDIVERGENT'], prec)
> + text = b'%s update to %s:\n\n' % (TROUBLES[b'PHASEDIVERGENT'], prec)
> text += bumped.description()
> memctx = wctx.tomemctx(text,
> parents=(prec.node(), nodemod.nullid),
> @@ -293,14 +293,14 @@
> newid = repo.commitctx(memctx)
> replacementnode = newid
> if newid is None:
> - repo.ui.status(_('no changes to commit\n'))
> - obsolete.createmarkers(repo, [(bumped, ())], operation='evolve')
> + repo.ui.status(_(b'no changes to commit\n'))
> + obsolete.createmarkers(repo, [(bumped, ())], operation=b'evolve')
> newid = prec.node()
> else:
> - repo.ui.status(_('committed as %s\n') % nodemod.short(newid))
> + repo.ui.status(_(b'committed as %s\n') % nodemod.short(newid))
> phases.retractboundary(repo, tr, bumped.phase(), [newid])
> obsolete.createmarkers(repo, [(bumped, (repo[newid],))],
> - flag=obsolete.bumpedfix, operation='evolve')
> + flag=obsolete.bumpedfix, operation=b'evolve')
> bmupdate(newid)
> # reroute the working copy parent to the new changeset
> with repo.dirstate.parentchange():
> @@ -319,45 +319,45 @@
> """
> repo = repo.unfiltered()
> divergent = repo[divergent.rev()]
> - evolvestate['divergent'] = divergent.node()
> - evolvestate['orig-divergent'] = divergent.node()
> + evolvestate[b'divergent'] = divergent.node()
> + evolvestate[b'orig-divergent'] = divergent.node()
> # sometimes we will relocate a node in case of different parents and we can
> # encounter conflicts after relocation is done while solving
> # content-divergence and if the user calls `hg evolve --stop`, we need to
> # strip that relocated commit. However if `--all` is passed, we need to
> # reset this value for each content-divergence resolution which we are doing
> # below.
> - evolvestate['relocated'] = None
> - evolvestate['relocating'] = False
> + evolvestate[b'relocated'] = None
> + evolvestate[b'relocating'] = False
> # in case or relocation we get a new other node, we need to store the old
> # other for purposes like `--abort` or `--stop`
> - evolvestate['old-other'] = None
> + evolvestate[b'old-other'] = None
> base, others = divergentdata(divergent)
>
> # we don't handle split in content-divergence yet
> if len(others) > 1:
> - othersstr = "[%s]" % (','.join([str(i) for i in others]))
> - msg = _("skipping %s: %s with a changeset that got split"
> - " into multiple ones:\n"
> - "|[%s]\n"
> - "| This is not handled by automatic evolution yet\n"
> - "| You have to fallback to manual handling with commands "
> - "such as:\n"
> - "| - hg touch -D\n"
> - "| - hg prune\n"
> - "| \n"
> - "| You should contact your local evolution Guru for help.\n"
> - ) % (divergent, TROUBLES['CONTENTDIVERGENT'], othersstr)
> + othersstr = b"[%s]" % (b','.join([str(i) for i in others]))
> + msg = _(b"skipping %s: %s with a changeset that got split"
> + b" into multiple ones:\n"
> + b"|[%s]\n"
> + b"| This is not handled by automatic evolution yet\n"
> + b"| You have to fallback to manual handling with commands "
> + b"such as:\n"
> + b"| - hg touch -D\n"
> + b"| - hg prune\n"
> + b"| \n"
> + b"| You should contact your local evolution Guru for help.\n"
> + ) % (divergent, TROUBLES[b'CONTENTDIVERGENT'], othersstr)
> ui.write_err(msg)
> - return (False, ".")
> + return (False, b".")
> other = others[0]
> - evolvestate['other-divergent'] = other.node()
> - evolvestate['base'] = base.node()
> + evolvestate[b'other-divergent'] = other.node()
> + evolvestate[b'base'] = base.node()
>
> def swapnodes(div, other):
> div, other = other, div
> - evolvestate['divergent'] = div.node()
> - evolvestate['other-divergent'] = other.node()
> + evolvestate[b'divergent'] = div.node()
> + evolvestate[b'other-divergent'] = other.node()
> return div, other
> # haspubdiv: to keep track if we are solving public content-divergence
> haspubdiv = False
> @@ -370,17 +370,17 @@
> divergent, other = swapnodes(divergent, other)
> else:
> publicdiv = divergent
> - evolvestate['public-divergent'] = publicdiv.node()
> + evolvestate[b'public-divergent'] = publicdiv.node()
> # we don't handle merge content-divergent changesets yet
> if len(other.parents()) > 1:
> - msg = _("skipping %s: %s changeset can't be "
> - "a merge (yet)\n") % (divergent, TROUBLES['CONTENTDIVERGENT'])
> + msg = _(b"skipping %s: %s changeset can't be "
> + b"a merge (yet)\n") % (divergent, TROUBLES[b'CONTENTDIVERGENT'])
> ui.write_err(msg)
> - hint = _("You have to fallback to solving this by hand...\n"
> - "| This probably means redoing the merge and using \n"
> - "| `hg prune` to kill older version.\n")
> + hint = _(b"You have to fallback to solving this by hand...\n"
> + b"| This probably means redoing the merge and using \n"
> + b"| `hg prune` to kill older version.\n")
> ui.write_err(hint)
> - return (False, ".")
> + return (False, b".")
>
> otherp1 = other.p1().rev()
> divp1 = divergent.p1().rev()
> @@ -399,15 +399,15 @@
> # the changeset on which resolution changeset will be based on
> resolutionparent = repo[divp1].node()
>
> - gca = repo.revs("ancestor(%d, %d)" % (otherp1, divp1))
> + gca = repo.revs(b"ancestor(%d, %d)" % (otherp1, divp1))
> # divonly: non-obsolete csets which are topological ancestor of "divergent"
> # but not "other"
> - divonly = repo.revs("only(%d, %d) - obsolete()" % (divergent.rev(),
> - other.rev()))
> + divonly = repo.revs(b"only(%d, %d) - obsolete()" % (divergent.rev(),
> + other.rev()))
> # otheronly: non-obsolete csets which are topological ancestor of "other"
> # but not "div"
> - otheronly = repo.revs("only(%d, %d) - obsolete()" % (other.rev(),
> - divergent.rev()))
> + otheronly = repo.revs(b"only(%d, %d) - obsolete()" % (other.rev(),
> + divergent.rev()))
> # make it exclusive set
> divonly = set(divonly) - {divergent.rev()}
> otheronly = set(otheronly) - {other.rev()}
> @@ -467,62 +467,62 @@
> divergent, other = swapnodes(divergent, other)
> resolutionparent = divergent.p1().node()
> else:
> - msg = _("skipping %s: have a different parent than %s "
> - "(not handled yet)\n") % (divergent, other)
> - hint = _("| %(d)s, %(o)s are not based on the same changeset.\n"
> - "| With the current state of its implementation, \n"
> - "| evolve does not work in that case.\n"
> - "| rebase one of them next to the other and run \n"
> - "| this command again.\n"
> - "| - either: hg rebase --dest 'p1(%(d)s)' -r %(o)s\n"
> - "| - or: hg rebase --dest 'p1(%(o)s)' -r %(d)s\n"
> - ) % {'d': divergent, 'o': other}
> + msg = _(b"skipping %s: have a different parent than %s "
> + b"(not handled yet)\n") % (divergent, other)
> + hint = _(b"| %(d)s, %(o)s are not based on the same changeset.\n"
> + b"| With the current state of its implementation, \n"
> + b"| evolve does not work in that case.\n"
> + b"| rebase one of them next to the other and run \n"
> + b"| this command again.\n"
> + b"| - either: hg rebase --dest 'p1(%(d)s)' -r %(o)s\n"
> + b"| - or: hg rebase --dest 'p1(%(o)s)' -r %(d)s\n"
> + ) % {b'd': divergent, b'o': other}
> ui.write_err(msg)
> ui.write_err(hint)
> - return (False, ".")
> + return (False, b".")
>
> if not ui.quiet or confirm:
> - ui.write(_('merge:'), label='evolve.operation')
> + ui.write(_(b'merge:'), label=b'evolve.operation')
> displayer.show(divergent)
> - ui.write(_('with: '))
> + ui.write(_(b'with: '))
> displayer.show(other)
> - ui.write(_('base: '))
> + ui.write(_(b'base: '))
> displayer.show(base)
> - if confirm and ui.prompt(_('perform evolve? [Ny]'), 'n') != 'y':
> - raise error.Abort(_('evolve aborted by user'))
> + if confirm and ui.prompt(_(b'perform evolve? [Ny]'), b'n') != b'y':
> + raise error.Abort(_(b'evolve aborted by user'))
> if dryrun:
> - ui.write(('hg update -c %s &&\n' % divergent))
> - ui.write(('hg merge %s &&\n' % other))
> - ui.write(('hg commit -m "auto merge resolving conflict between '
> - '%s and %s"&&\n' % (divergent, other)))
> - ui.write(('hg up -C %s &&\n' % base))
> - ui.write(('hg revert --all --rev tip &&\n'))
> - ui.write(('hg commit -m "`hg log -r %s --template={desc}`";\n'
> - % divergent))
> - return (False, ".")
> + ui.write((b'hg update -c %s &&\n' % divergent))
> + ui.write((b'hg merge %s &&\n' % other))
> + ui.write((b'hg commit -m "auto merge resolving conflict between '
> + b'%s and %s"&&\n' % (divergent, other)))
> + ui.write((b'hg up -C %s &&\n' % base))
> + ui.write((b'hg revert --all --rev tip &&\n'))
> + ui.write((b'hg commit -m "`hg log -r %s --template={desc}`";\n'
> + % divergent))
> + return (False, b".")
>
> - evolvestate['resolutionparent'] = resolutionparent
> + evolvestate[b'resolutionparent'] = resolutionparent
> # relocate the other divergent if required
> if relocatereq:
> # relocating will help us understand during the time of conflicts that
> # whether conflicts occur at reloacting or they occured at merging
> # content divergent changesets
> - evolvestate['relocating'] = True
> - ui.status(_('rebasing "other" content-divergent changeset %s on'
> - ' %s\n' % (other, divergent.p1())))
> + evolvestate[b'relocating'] = True
> + ui.status(_(b'rebasing "other" content-divergent changeset %s on'
> + b' %s\n' % (other, divergent.p1())))
> try:
> newother = relocate(repo, other, divergent.p1(), evolvestate,
> keepbranch=True)
> except error.InterventionRequired:
> - evolvestate['current'] = other.node()
> + evolvestate[b'current'] = other.node()
> evolvestate.save()
> raise
> - evolvestate['old-other'] = other.node()
> + evolvestate[b'old-other'] = other.node()
> other = repo[newother]
> - evolvestate['relocating'] = False
> - evolvestate['relocated'] = other.node()
> - evolvestate['temprevs'].append(other.node())
> - evolvestate['other-divergent'] = other.node()
> + evolvestate[b'relocating'] = False
> + evolvestate[b'relocated'] = other.node()
> + evolvestate[b'temprevs'].append(other.node())
> + evolvestate[b'other-divergent'] = other.node()
>
> _mergecontentdivergents(repo, progresscb, divergent, other, base,
> evolvestate)
> @@ -544,9 +544,9 @@
> # case 2)
> pubstr = str(publicdiv)
> othstr = str(other)
> - msg = _('content divergence resolution between %s '
> - '(public) and %s has same content as %s, '
> - 'discarding %s\n')
> + msg = _(b'content divergence resolution between %s '
> + b'(public) and %s has same content as %s, '
> + b'discarding %s\n')
> msg %= (pubstr, othstr, pubstr, othstr)
> repo.ui.status(msg)
> return (res, newnode)
> @@ -558,12 +558,12 @@
> def _mergecontentdivergents(repo, progresscb, divergent, other, base,
> evolvestate):
> if divergent not in repo[None].parents():
> - repo.ui.note(_("updating to \"local\" side of the conflict: %s\n") %
> + repo.ui.note(_(b"updating to \"local\" side of the conflict: %s\n") %
> divergent.hex()[:12])
> hg.updaterepo(repo, divergent.node(), False)
> # merging the two content-divergent changesets
> - repo.ui.note(_("merging \"other\" %s changeset '%s'\n") %
> - (TROUBLES['CONTENTDIVERGENT'], other.hex()[:12]))
> + repo.ui.note(_(b"merging \"other\" %s changeset '%s'\n") %
> + (TROUBLES[b'CONTENTDIVERGENT'], other.hex()[:12]))
> if progresscb:
> progresscb()
> mergeancestor = repo.changelog.isancestor(divergent.node(), other.node())
> @@ -578,8 +578,8 @@
> # conflicts while merging content-divergent changesets
> if compat.hasconflict(stats):
> evolvestate.save()
> - raise error.InterventionRequired(_("fix conflicts and see `hg help "
> - "evolve.interrupted`"))
> + raise error.InterventionRequired(_(b"fix conflicts and see `hg help "
> + b"evolve.interrupted`"))
>
> def _completecontentdivergent(ui, repo, progresscb, divergent, other,
> base, evolvestate):
> @@ -588,20 +588,20 @@
> # resume resolution
> if progresscb:
> progresscb()
> - emtpycommitallowed = repo.ui.backupconfig('ui', 'allowemptycommit')
> + emtpycommitallowed = repo.ui.backupconfig(b'ui', b'allowemptycommit')
> tr = repo.currenttransaction()
> assert tr is not None
> # whether to store the obsmarker in the evolvestate
> storemarker = False
> - resparent = evolvestate['resolutionparent']
> + resparent = evolvestate[b'resolutionparent']
>
> # whether we are solving public divergence
> haspubdiv = False
> - if evolvestate.get('public-divergent'):
> + if evolvestate.get(b'public-divergent'):
> haspubdiv = True
> - publicnode = evolvestate['public-divergent']
> + publicnode = evolvestate[b'public-divergent']
> publicdiv = repo[publicnode]
> - othernode = evolvestate['other-divergent']
> + othernode = evolvestate[b'other-divergent']
> otherdiv = repo[othernode]
>
> with repo.dirstate.parentchange():
> @@ -614,7 +614,7 @@
> warnmetadataloss(repo, publicdiv, otherdiv)
> # no changes, create markers to resolve divergence
> obsolete.createmarkers(repo, [(otherdiv, (publicdiv,))],
> - operation='evolve')
> + operation=b'evolve')
> return (True, publicnode)
> try:
> with repo.dirstate.parentchange():
> @@ -640,29 +640,29 @@
> # no changes
> new = divergent
> storemarker = True
> - repo.ui.status(_("nothing changed\n"))
> + repo.ui.status(_(b"nothing changed\n"))
> hg.updaterepo(repo, divergent.rev(), False)
> else:
> new = repo[newnode]
> newnode = new.node()
> hg.updaterepo(repo, new.rev(), False)
> if haspubdiv and publicdiv == divergent:
> - bypassphase(repo, (divergent, new), operation='evolve')
> + bypassphase(repo, (divergent, new), operation=b'evolve')
> else:
> obsolete.createmarkers(repo, [(divergent, (new,))],
> - operation='evolve')
> + operation=b'evolve')
>
> # creating markers and moving phases post-resolution
> if haspubdiv and publicdiv == other:
> - bypassphase(repo, (other, new), operation='evolve')
> + bypassphase(repo, (other, new), operation=b'evolve')
> else:
> - obsolete.createmarkers(repo, [(other, (new,))], operation='evolve')
> + obsolete.createmarkers(repo, [(other, (new,))], operation=b'evolve')
> if storemarker:
> # storing the marker in the evolvestate
> # we just store the precursors and successor pair for now, we might
> # want to store more data and serialize obsmarker in a better way in
> # future
> - evolvestate['obsmarkers'].append((other.node(), new.node()))
> + evolvestate[b'obsmarkers'].append((other.node(), new.node()))
>
> phases.retractboundary(repo, tr, other.phase(), [new.node()])
> return (True, newnode)
> @@ -674,7 +674,7 @@
> public content-divergence"""
>
> # needtowarn: aspects where we need to warn user
> - needtowarn = ['branch', 'topic', 'close']
> + needtowarn = [b'branch', b'topic', b'close']
> aspects = set()
> localextra = local.extra()
> otherextra = other.extra()
> @@ -686,48 +686,48 @@
> aspects.add(asp)
>
> if other.description() != local.description():
> - aspects.add('description')
> + aspects.add(b'description')
>
> if aspects:
> # warn user
> locstr = str(local)
> othstr = str(other)
> - if 'close' in aspects:
> - filteredasp = aspects - {'close'}
> + if b'close' in aspects:
> + filteredasp = aspects - {b'close'}
> if filteredasp:
> - msg = _('other divergent changeset %s is a closed branch head '
> - 'and differs from local %s by "%s" only,' %
> - (othstr, locstr, ', '.join(sorted(filteredasp))))
> + msg = _(b'other divergent changeset %s is a closed branch head '
> + b'and differs from local %s by "%s" only,' %
> + (othstr, locstr, b', '.join(sorted(filteredasp))))
> else:
> - msg = _('other divergent changeset %s is a closed branch head '
> - 'and has same content as local %s,' % (othstr, locstr))
> + msg = _(b'other divergent changeset %s is a closed branch head '
> + b'and has same content as local %s,' % (othstr, locstr))
> else:
> - msg = _('other divergent changeset %s has same content as local %s'
> - ' and differs by "%s" only,' %
> - (othstr, locstr, ', '.join(sorted(aspects))))
> - msg += _(' discarding %s\n' % othstr)
> + msg = _(b'other divergent changeset %s has same content as local %s'
> + b' and differs by "%s" only,' %
> + (othstr, locstr, b', '.join(sorted(aspects))))
> + msg += _(b' discarding %s\n' % othstr)
> repo.ui.warn(msg)
>
> -def bypassphase(repo, relation, flag=0, metadata=None, operation='evolve'):
> +def bypassphase(repo, relation, flag=0, metadata=None, operation=b'evolve'):
> """function to create a single obsmarker relation even for public csets
> where relation should be a single pair (prec, succ)"""
>
> # prepare metadata
> if metadata is None:
> metadata = {}
> - if 'user' not in metadata:
> - luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username()
> - metadata['user'] = encoding.fromlocal(luser)
> + if b'user' not in metadata:
> + luser = repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username()
> + metadata[b'user'] = encoding.fromlocal(luser)
> # Operation metadata handling
> - useoperation = repo.ui.configbool('experimental',
> - 'evolution.track-operation')
> + useoperation = repo.ui.configbool(b'experimental',
> + b'evolution.track-operation')
> if useoperation and operation:
> - metadata['operation'] = operation
> + metadata[b'operation'] = operation
>
> # Effect flag metadata handling
> - saveeffectflag = repo.ui.configbool('experimental',
> - 'evolution.effect-flags')
> - with repo.transaction('add-obsolescence-marker') as tr:
> + saveeffectflag = repo.ui.configbool(b'experimental',
> + b'evolution.effect-flags')
> + with repo.transaction(b'add-obsolescence-marker') as tr:
> prec, succ = relation
> nprec = prec.node()
> npare = None
> @@ -735,7 +735,7 @@
> if not nsucs:
> npare = tuple(p.node() for p in prec.parents())
> if nprec in nsucs:
> - raise error.Abort(_("changeset %s cannot obsolete itself") % prec)
> + raise error.Abort(_(b"changeset %s cannot obsolete itself") % prec)
>
> if saveeffectflag:
> # The effect flag is saved in a versioned field name for
> @@ -745,7 +745,7 @@
> except TypeError:
> # hg <= 4.7
> effectflag = obsutil.geteffectflag((prec, (succ,)))
> - metadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag
> + metadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag
>
> # create markers
> repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
> @@ -847,11 +847,11 @@
> repo.dirstate.setbranch(othbranch)
> else:
> # all the three branches are different
> - index = repo.ui.promptchoice(_("content divergent changesets on "
> - "different branches.\nchoose branch"
> - " for the resolution changeset. (a) "
> - "%s or (b) %s or (c) %s? $$ &a $$ &b"
> - " $$ &c") %
> + index = repo.ui.promptchoice(_(b"content divergent changesets on "
> + b"different branches.\nchoose branch"
> + b" for the resolution changeset. (a) "
> + b"%s or (b) %s or (c) %s? $$ &a $$ &b"
> + b" $$ &c") %
> (basebranch, divbranch, othbranch), 0)
>
> if index == 0:
> @@ -868,20 +868,20 @@
> merger = simplemerge.Merge3Text(basedesc, divdesc, othdesc)
> mdesc = []
> kwargs = {}
> - kwargs['name_base'] = 'base'
> - kwargs['base_marker'] = '|||||||'
> - for line in merger.merge_lines(name_a='divergent', name_b='other',
> + kwargs[b'name_base'] = b'base'
> + kwargs[b'base_marker'] = b'|||||||'
> + for line in merger.merge_lines(name_a=b'divergent', name_b=b'other',
> **kwargs):
> mdesc.append(line)
>
> - desc = ''.join(mdesc)
> + desc = b''.join(mdesc)
> if merger.conflicts:
>
> - prefixes = ("HG: Conflicts while merging changeset description of"
> - " content-divergent changesets.\nHG: Resolve conflicts"
> - " in commit messages to continue.\n\n")
> + prefixes = (b"HG: Conflicts while merging changeset description of"
> + b" content-divergent changesets.\nHG: Resolve conflicts"
> + b" in commit messages to continue.\n\n")
>
> - resolveddesc = ui.edit(prefixes + desc, ui.username(), action='desc')
> + resolveddesc = ui.edit(prefixes + desc, ui.username(), action=b'desc')
> # make sure we remove the prefixes part from final commit message
> if prefixes in resolveddesc:
> # hack, we should find something better
> @@ -932,17 +932,17 @@
> returns the node of new commit which is formed
> """
> if orig.rev() == dest.rev():
> - msg = _('tried to relocate a node on top of itself')
> - hint = _("This shouldn't happen. If you still need to move changesets, "
> - "please do so manually with nothing to rebase - working "
> - "directory parent is also destination")
> + msg = _(b'tried to relocate a node on top of itself')
> + hint = _(b"This shouldn't happen. If you still need to move changesets, "
> + b"please do so manually with nothing to rebase - working "
> + b"directory parent is also destination")
> raise error.ProgrammingError(msg, hint=hint)
>
> if pctx is None:
> if len(orig.parents()) == 2:
> - msg = _("tried to relocate a merge commit without specifying which "
> - "parent should be moved")
> - hint = _("Specify the parent by passing in pctx")
> + msg = _(b"tried to relocate a merge commit without specifying which "
> + b"parent should be moved")
> + hint = _(b"Specify the parent by passing in pctx")
> raise error.ProgrammingError(msg, hint)
> pctx = orig.p1()
>
> @@ -952,7 +952,7 @@
> sha1s = re.findall(sha1re, commitmsg)
> unfi = repo.unfiltered()
> for sha1 in sha1s:
> - if util.safehasattr(scmutil, 'resolvehexnodeidprefix'): # > hg-4.6
> + if util.safehasattr(scmutil, b'resolvehexnodeidprefix'): # > hg-4.6
> fullnode = scmutil.resolvehexnodeidprefix(unfi, sha1)
> else:
> fullnode = unfi.changelog.index.partialmatch(sha1)
> @@ -970,8 +970,8 @@
> newsha1 = nodemod.hex(successors[0][0])
> commitmsg = commitmsg.replace(sha1, newsha1[:len(sha1)])
> else:
> - repo.ui.note(_('The stale commit message reference to %s could '
> - 'not be updated\n') % sha1)
> + repo.ui.note(_(b'The stale commit message reference to %s could '
> + b'not be updated\n') % sha1)
>
> tr = repo.currenttransaction()
> assert tr is not None
> @@ -984,8 +984,8 @@
> copies.duplicatecopies(repo, repo[None], dest.rev(),
> orig.p1().rev())
> dirstatedance(repo, dest, orig.node(), None)
> - raise error.InterventionRequired(_("fix conflicts and see `hg help "
> - "evolve.interrupted`"))
> + raise error.InterventionRequired(_(b"fix conflicts and see `hg help "
> + b"evolve.interrupted`"))
> nodenew = _relocatecommit(repo, orig, commitmsg)
> _finalizerelocate(repo, orig, dest, nodenew, tr, category, evolvestate)
> return nodenew
> @@ -994,14 +994,14 @@
> if commitmsg is None:
> commitmsg = orig.description()
> extra = dict(orig.extra())
> - if 'branch' in extra:
> - del extra['branch']
> - extra['rebase_source'] = orig.hex()
> + if b'branch' in extra:
> + del extra[b'branch']
> + extra[b'rebase_source'] = orig.hex()
>
> - backup = repo.ui.backupconfig('phases', 'new-commit')
> + backup = repo.ui.backupconfig(b'phases', b'new-commit')
> try:
> targetphase = max(orig.phase(), phases.draft)
> - repo.ui.setconfig('phases', 'new-commit', targetphase, 'evolve')
> + repo.ui.setconfig(b'phases', b'new-commit', targetphase, b'evolve')
> # Commit might fail if unresolved files exist
> nodenew = repo.commit(text=commitmsg, user=orig.user(),
> date=orig.date(), extra=extra)
> @@ -1017,18 +1017,18 @@
>
> if nodenew is not None:
> obsolete.createmarkers(repo, [(repo[nodesrc], (repo[nodenew],))],
> - operation='evolve')
> + operation=b'evolve')
> for book in oldbookmarks:
> bmchanges.append((book, nodenew))
> - evolvestate['bookmarkchanges'].append((book, nodesrc))
> + evolvestate[b'bookmarkchanges'].append((book, nodesrc))
> else:
> - if category == 'orphan':
> - repo.ui.status(_("evolution of %d:%s created no changes "
> - "to commit\n") % (orig.rev(), orig))
> - obsolete.createmarkers(repo, [(repo[nodesrc], ())], operation='evolve')
> + if category == b'orphan':
> + repo.ui.status(_(b"evolution of %d:%s created no changes "
> + b"to commit\n") % (orig.rev(), orig))
> + obsolete.createmarkers(repo, [(repo[nodesrc], ())], operation=b'evolve')
> # Behave like rebase, move bookmarks to dest
> for book in oldbookmarks:
> - evolvestate['bookmarkchanges'].append((book, nodesrc))
> + evolvestate[b'bookmarkchanges'].append((book, nodesrc))
> bmchanges.append((book, dest.node()))
> for book in destbookmarks: # restore bookmark that rebase move
> bmchanges.append((book, dest.node()))
> @@ -1038,61 +1038,61 @@
> def _evolvemerge(repo, orig, dest, pctx, keepbranch):
> """Used by the evolve function to merge dest on top of pctx.
> return the same tuple as merge.graft"""
> - if repo['.'].rev() != dest.rev():
> + if repo[b'.'].rev() != dest.rev():
> merge.update(repo,
> dest,
> branchmerge=False,
> force=True)
> if repo._activebookmark:
> - repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark)
> + repo.ui.status(_(b"(leaving bookmark %s)\n") % repo._activebookmark)
> bookmarksmod.deactivate(repo)
> if keepbranch:
> repo.dirstate.setbranch(orig.branch())
> - if util.safehasattr(repo, 'currenttopic'):
> + if util.safehasattr(repo, b'currenttopic'):
> # uurrgs
> # there no other topic setter yet
> - if not orig.topic() and repo.vfs.exists('topic'):
> - repo.vfs.unlink('topic')
> + if not orig.topic() and repo.vfs.exists(b'topic'):
> + repo.vfs.unlink(b'topic')
> else:
> - with repo.vfs.open('topic', 'w') as f:
> + with repo.vfs.open(b'topic', b'w') as f:
> f.write(orig.topic())
>
> - return merge.graft(repo, orig, pctx, ['destination', 'evolving'], True)
> + return merge.graft(repo, orig, pctx, [b'destination', b'evolving'], True)
>
> instabilities_map = {
> - 'contentdivergent': "content-divergent",
> - 'phasedivergent': "phase-divergent"
> + b'contentdivergent': b"content-divergent",
> + b'phasedivergent': b"phase-divergent"
> }
>
> def _selectrevs(repo, allopt, revopt, anyopt, targetcat):
> """select troubles in repo matching according to given options"""
> revs = set()
> if allopt or revopt:
> - revs = repo.revs("%s()" % targetcat)
> + revs = repo.revs(b"%s()" % targetcat)
> if revopt:
> revs = scmutil.revrange(repo, revopt) & revs
> elif not anyopt:
> - topic = getattr(repo, 'currenttopic', '')
> + topic = getattr(repo, 'currenttopic', b'')
> if topic:
> - revs = repo.revs('topic(%s)', topic) & revs
> - elif targetcat == 'orphan':
> + revs = repo.revs(b'topic(%s)', topic) & revs
> + elif targetcat == b'orphan':
> revs = _aspiringdescendant(repo,
> - repo.revs('(.::) - obsolete()::'))
> + repo.revs(b'(.::) - obsolete()::'))
> revs = set(revs)
> - if targetcat == 'contentdivergent':
> + if targetcat == b'contentdivergent':
> # Pick one divergent per group of divergents
> revs = _dedupedivergents(repo, revs)
> elif anyopt:
> - revs = repo.revs('first(%s())' % (targetcat))
> - elif targetcat == 'orphan':
> - revs = set(_aspiringchildren(repo, repo.revs('(.::) - obsolete()::')))
> + revs = repo.revs(b'first(%s())' % (targetcat))
> + elif targetcat == b'orphan':
> + revs = set(_aspiringchildren(repo, repo.revs(b'(.::) - obsolete()::')))
> if 1 < len(revs):
> - msg = "multiple evolve candidates"
> - hint = (_("select one of %s with --rev")
> - % ', '.join([str(repo[r]) for r in sorted(revs)]))
> + msg = b"multiple evolve candidates"
> + hint = (_(b"select one of %s with --rev")
> + % b', '.join([str(repo[r]) for r in sorted(revs)]))
> raise error.Abort(msg, hint=hint)
> - elif instabilities_map.get(targetcat, targetcat) in repo['.'].instabilities():
> - revs = set([repo['.'].rev()])
> + elif instabilities_map.get(targetcat, targetcat) in repo[b'.'].instabilities():
> + revs = set([repo[b'.'].rev()])
> return revs
>
> def _dedupedivergents(repo, revs):
> @@ -1121,14 +1121,14 @@
> XXX this woobly function won't survive XXX
> """
> repo = ctx._repo.unfiltered()
> - for base in repo.set('reverse(allpredecessors(%d))', ctx.rev()):
> + for base in repo.set(b'reverse(allpredecessors(%d))', ctx.rev()):
> newer = obsutil.successorssets(ctx._repo, base.node())
> # drop filter and solution including the original ctx
> newer = [n for n in newer if n and ctx.node() not in n]
> if newer:
> return base, tuple(ctx._repo[o] for o in newer[0])
> - raise error.Abort(_("base of divergent changeset %s not found") % ctx,
> - hint=_('this case is not yet handled'))
> + raise error.Abort(_(b"base of divergent changeset %s not found") % ctx,
> + hint=_(b'this case is not yet handled'))
>
> def _aspiringdescendant(repo, revs):
> """Return a list of changectx which can be stabilized on top of pctx or
> @@ -1136,7 +1136,7 @@
> target = set(revs)
> result = set(target)
> paths = collections.defaultdict(set)
> - for r in repo.revs('orphan() - %ld', revs):
> + for r in repo.revs(b'orphan() - %ld', revs):
> for d in _possibledestination(repo, r):
> paths[d].add(r)
>
> @@ -1155,7 +1155,7 @@
> one of its descendants. Empty list if none can be found."""
> target = set(revs)
> result = []
> - for r in repo.revs('orphan() - %ld', revs):
> + for r in repo.revs(b'orphan() - %ld', revs):
> dest = _possibledestination(repo, r)
> if target & dest:
> result.append(r)
> @@ -1188,104 +1188,104 @@
> def _handlenotrouble(ui, repo, allopt, revopt, anyopt, targetcat):
> """Used by the evolve function to display an error message when
> no troubles can be resolved"""
> - troublecategories = ['phasedivergent', 'contentdivergent', 'orphan']
> + troublecategories = [b'phasedivergent', b'contentdivergent', b'orphan']
> unselectedcategories = [c for c in troublecategories if c != targetcat]
> msg = None
> hint = None
> retoverride = None
>
> troubled = {
> - "orphan": repo.revs("orphan()"),
> - "contentdivergent": repo.revs("contentdivergent()"),
> - "phasedivergent": repo.revs("phasedivergent()"),
> - "all": repo.revs("unstable()"),
> + b"orphan": repo.revs(b"orphan()"),
> + b"contentdivergent": repo.revs(b"contentdivergent()"),
> + b"phasedivergent": repo.revs(b"phasedivergent()"),
> + b"all": repo.revs(b"unstable()"),
> }
>
> hintmap = {
> - 'phasedivergent': _("do you want to use --phase-divergent"),
> - 'phasedivergent+contentdivergent': _("do you want to use "
> - "--phase-divergent or"
> - " --content-divergent"),
> - 'phasedivergent+orphan': _("do you want to use --phase-divergent"
> - " or --orphan"),
> - 'contentdivergent': _("do you want to use --content-divergent"),
> - 'contentdivergent+orphan': _("do you want to use --content-divergent"
> - " or --orphan"),
> - 'orphan': _("do you want to use --orphan"),
> - 'any+phasedivergent': _("do you want to use --any (or --rev) and"
> - " --phase-divergent"),
> - 'any+phasedivergent+contentdivergent': _("do you want to use --any"
> - " (or --rev) and"
> - " --phase-divergent or"
> - " --content-divergent"),
> - 'any+phasedivergent+orphan': _("do you want to use --any (or --rev)"
> - " and --phase-divergent or --orphan"),
> - 'any+contentdivergent': _("do you want to use --any (or --rev) and"
> - " --content-divergent"),
> - 'any+contentdivergent+orphan': _("do you want to use --any (or --rev)"
> - " and --content-divergent or "
> - "--orphan"),
> - 'any+orphan': _("do you want to use --any (or --rev)"
> - "and --orphan"),
> + b'phasedivergent': _(b"do you want to use --phase-divergent"),
> + b'phasedivergent+contentdivergent': _(b"do you want to use "
> + b"--phase-divergent or"
> + b" --content-divergent"),
> + b'phasedivergent+orphan': _(b"do you want to use --phase-divergent"
> + b" or --orphan"),
> + b'contentdivergent': _(b"do you want to use --content-divergent"),
> + b'contentdivergent+orphan': _(b"do you want to use --content-divergent"
> + b" or --orphan"),
> + b'orphan': _(b"do you want to use --orphan"),
> + b'any+phasedivergent': _(b"do you want to use --any (or --rev) and"
> + b" --phase-divergent"),
> + b'any+phasedivergent+contentdivergent': _(b"do you want to use --any"
> + b" (or --rev) and"
> + b" --phase-divergent or"
> + b" --content-divergent"),
> + b'any+phasedivergent+orphan': _(b"do you want to use --any (or --rev)"
> + b" and --phase-divergent or --orphan"),
> + b'any+contentdivergent': _(b"do you want to use --any (or --rev) and"
> + b" --content-divergent"),
> + b'any+contentdivergent+orphan': _(b"do you want to use --any (or --rev)"
> + b" and --content-divergent or "
> + b"--orphan"),
> + b'any+orphan': _(b"do you want to use --any (or --rev)"
> + b"and --orphan"),
> }
>
> if revopt:
> revs = scmutil.revrange(repo, revopt)
> if not revs:
> - msg = _("set of specified revisions is empty")
> + msg = _(b"set of specified revisions is empty")
> else:
> - msg = _("no %s changesets in specified revisions") % targetcat
> + msg = _(b"no %s changesets in specified revisions") % targetcat
> othertroubles = []
> for cat in unselectedcategories:
> if revs & troubled[cat]:
> othertroubles.append(cat)
> if othertroubles:
> - hint = hintmap['+'.join(othertroubles)]
> + hint = hintmap[b'+'.join(othertroubles)]
>
> elif anyopt:
> - msg = _("no %s changesets to evolve") % targetcat
> + msg = _(b"no %s changesets to evolve") % targetcat
> othertroubles = []
> for cat in unselectedcategories:
> if troubled[cat]:
> othertroubles.append(cat)
> if othertroubles:
> - hint = hintmap['+'.join(othertroubles)]
> + hint = hintmap[b'+'.join(othertroubles)]
>
> else:
> # evolve without any option = relative to the current wdir
> - if targetcat == 'orphan':
> - msg = _("nothing to evolve on current working copy parent")
> + if targetcat == b'orphan':
> + msg = _(b"nothing to evolve on current working copy parent")
> else:
> - msg = _("current working copy parent is not %s") % targetcat
> + msg = _(b"current working copy parent is not %s") % targetcat
>
> - p1 = repo['.'].rev()
> + p1 = repo[b'.'].rev()
> othertroubles = []
> for cat in unselectedcategories:
> if p1 in troubled[cat]:
> othertroubles.append(cat)
> if othertroubles:
> - hint = hintmap['+'.join(othertroubles)]
> + hint = hintmap[b'+'.join(othertroubles)]
> else:
> length = len(troubled[targetcat])
> if length:
> - hint = _("%d other %s in the repository, do you want --any "
> - "or --rev") % (length, targetcat)
> + hint = _(b"%d other %s in the repository, do you want --any "
> + b"or --rev") % (length, targetcat)
> else:
> othertroubles = []
> for cat in unselectedcategories:
> if troubled[cat]:
> othertroubles.append(cat)
> if othertroubles:
> - hint = hintmap['any+' + ('+'.join(othertroubles))]
> + hint = hintmap[b'any+' + (b'+'.join(othertroubles))]
> else:
> - msg = _("no troubled changesets")
> + msg = _(b"no troubled changesets")
> # Exit with a 0 (success) status in this case.
> retoverride = 0
>
> assert msg is not None
> - ui.write_err("%s\n" % msg)
> + ui.write_err(b"%s\n" % msg)
> if hint:
> - ui.write_err("(%s)\n" % hint)
> + ui.write_err(b"(%s)\n" % hint)
> ret = 2
> else:
> ret = 1
> @@ -1305,21 +1305,21 @@
>
> def listtroubles(ui, repo, troublecategories, **opts):
> """Print all the troubles for the repo (or given revset)"""
> - troublecategories = troublecategories or ['contentdivergent', 'orphan', 'phasedivergent']
> - showunstable = 'orphan' in troublecategories
> - showbumped = 'phasedivergent' in troublecategories
> - showdivergent = 'contentdivergent' in troublecategories
> + troublecategories = troublecategories or [b'contentdivergent', b'orphan', b'phasedivergent']
> + showunstable = b'orphan' in troublecategories
> + showbumped = b'phasedivergent' in troublecategories
> + showdivergent = b'contentdivergent' in troublecategories
>
> - revs = repo.revs('+'.join("%s()" % t for t in troublecategories))
> - if opts.get('rev'):
> - revs = scmutil.revrange(repo, opts.get('rev'))
> + revs = repo.revs(b'+'.join(b"%s()" % t for t in troublecategories))
> + if opts.get(b'rev'):
> + revs = scmutil.revrange(repo, opts.get(b'rev'))
>
> - fm = ui.formatter('evolvelist', opts)
> + fm = ui.formatter(b'evolvelist', opts)
> for rev in revs:
> ctx = repo[rev]
> unpars = _preparelistctxs(ctx.parents(), lambda p: p.orphan())
> obspars = _preparelistctxs(ctx.parents(), lambda p: p.obsolete())
> - imprecs = _preparelistctxs(repo.set("allpredecessors(%n)", ctx.node()),
> + imprecs = _preparelistctxs(repo.set(b"allpredecessors(%n)", ctx.node()),
> lambda p: not p.mutable())
> dsets = divergentsets(repo, ctx)
>
> @@ -1329,55 +1329,55 @@
> desc = ctx.description()
> if desc:
> desc = desc.splitlines()[0]
> - desc = (desc[:desclen] + '...') if len(desc) > desclen else desc
> - fm.plain('%s: ' % ctx.hex()[:hashlen])
> - fm.plain('%s\n' % desc)
> + desc = (desc[:desclen] + b'...') if len(desc) > desclen else desc
> + fm.plain(b'%s: ' % ctx.hex()[:hashlen])
> + fm.plain(b'%s\n' % desc)
> fm.data(node=ctx.hex(), rev=ctx.rev(), desc=desc, phase=ctx.phasestr())
>
> for unpar in unpars if showunstable else []:
> - fm.plain(' %s: %s (%s parent)\n' % (TROUBLES['ORPHAN'],
> - unpar[:hashlen],
> - TROUBLES['ORPHAN']))
> + fm.plain(b' %s: %s (%s parent)\n' % (TROUBLES[b'ORPHAN'],
> + unpar[:hashlen],
> + TROUBLES[b'ORPHAN']))
> for obspar in obspars if showunstable else []:
> - fm.plain(' %s: %s (obsolete parent)\n' % (TROUBLES['ORPHAN'],
> - obspar[:hashlen]))
> + fm.plain(b' %s: %s (obsolete parent)\n' % (TROUBLES[b'ORPHAN'],
> + obspar[:hashlen]))
> for imprec in imprecs if showbumped else []:
> - fm.plain(' %s: %s (immutable precursor)\n' %
> - (TROUBLES['PHASEDIVERGENT'], imprec[:hashlen]))
> + fm.plain(b' %s: %s (immutable precursor)\n' %
> + (TROUBLES[b'PHASEDIVERGENT'], imprec[:hashlen]))
>
> if dsets and showdivergent:
> for dset in dsets:
> - fm.plain(' %s: ' % TROUBLES['CONTENTDIVERGENT'])
> + fm.plain(b' %s: ' % TROUBLES[b'CONTENTDIVERGENT'])
> first = True
> - for n in dset['divergentnodes']:
> - t = "%s (%s)" if first else " %s (%s)"
> + for n in dset[b'divergentnodes']:
> + t = b"%s (%s)" if first else b" %s (%s)"
> first = False
> fm.plain(t % (nodemod.hex(n)[:hashlen], repo[n].phasestr()))
> - comprec = nodemod.hex(dset['commonprecursor'])[:hashlen]
> - fm.plain(" (precursor %s)\n" % comprec)
> - fm.plain("\n")
> + comprec = nodemod.hex(dset[b'commonprecursor'])[:hashlen]
> + fm.plain(b" (precursor %s)\n" % comprec)
> + fm.plain(b"\n")
>
> # templater-friendly section
> _formatctx(fm, ctx)
> troubles = []
> for unpar in unpars:
> - troubles.append({'troubletype': TROUBLES['ORPHAN'],
> - 'sourcenode': unpar, 'sourcetype': 'orphanparent'})
> + troubles.append({b'troubletype': TROUBLES[b'ORPHAN'],
> + b'sourcenode': unpar, b'sourcetype': b'orphanparent'})
> for obspar in obspars:
> - troubles.append({'troubletype': TROUBLES['ORPHAN'],
> - 'sourcenode': obspar,
> - 'sourcetype': 'obsoleteparent'})
> + troubles.append({b'troubletype': TROUBLES[b'ORPHAN'],
> + b'sourcenode': obspar,
> + b'sourcetype': b'obsoleteparent'})
> for imprec in imprecs:
> - troubles.append({'troubletype': TROUBLES['PHASEDIVERGENT'],
> - 'sourcenode': imprec,
> - 'sourcetype': 'immutableprecursor'})
> + troubles.append({b'troubletype': TROUBLES[b'PHASEDIVERGENT'],
> + b'sourcenode': imprec,
> + b'sourcetype': b'immutableprecursor'})
> for dset in dsets:
> - divnodes = [{'node': nodemod.hex(n),
> - 'phase': repo[n].phasestr(),
> - } for n in dset['divergentnodes']]
> - troubles.append({'troubletype': TROUBLES['CONTENTDIVERGENT'],
> - 'commonprecursor': nodemod.hex(dset['commonprecursor']),
> - 'divergentnodes': divnodes})
> + divnodes = [{b'node': nodemod.hex(n),
> + b'phase': repo[n].phasestr(),
> + } for n in dset[b'divergentnodes']]
> + troubles.append({b'troubletype': TROUBLES[b'CONTENTDIVERGENT'],
> + b'commonprecursor': nodemod.hex(dset[b'commonprecursor']),
> + b'divergentnodes': divnodes})
> fm.data(troubles=troubles)
>
> fm.end()
> @@ -1386,65 +1386,65 @@
> """ check the options passed to `hg evolve` and warn for deprecation warning
> if any """
>
> - if opts['continue']:
> - if opts['any']:
> - raise error.Abort(_('cannot specify both "--any" and "--continue"'))
> - if opts['all']:
> - raise error.Abort(_('cannot specify both "--all" and "--continue"'))
> - if opts['rev']:
> - raise error.Abort(_('cannot specify both "--rev" and "--continue"'))
> - if opts['stop']:
> - raise error.Abort(_('cannot specify both "--stop" and'
> - ' "--continue"'))
> - if opts['abort']:
> - raise error.Abort(_('cannot specify both "--abort" and'
> - ' "--continue"'))
> + if opts[b'continue']:
> + if opts[b'any']:
> + raise error.Abort(_(b'cannot specify both "--any" and "--continue"'))
> + if opts[b'all']:
> + raise error.Abort(_(b'cannot specify both "--all" and "--continue"'))
> + if opts[b'rev']:
> + raise error.Abort(_(b'cannot specify both "--rev" and "--continue"'))
> + if opts[b'stop']:
> + raise error.Abort(_(b'cannot specify both "--stop" and'
> + b' "--continue"'))
> + if opts[b'abort']:
> + raise error.Abort(_(b'cannot specify both "--abort" and'
> + b' "--continue"'))
>
> - if opts['stop']:
> - if opts['any']:
> - raise error.Abort(_('cannot specify both "--any" and "--stop"'))
> - if opts['all']:
> - raise error.Abort(_('cannot specify both "--all" and "--stop"'))
> - if opts['rev']:
> - raise error.Abort(_('cannot specify both "--rev" and "--stop"'))
> - if opts['abort']:
> - raise error.Abort(_('cannot specify both "--abort" and "--stop"'))
> + if opts[b'stop']:
> + if opts[b'any']:
> + raise error.Abort(_(b'cannot specify both "--any" and "--stop"'))
> + if opts[b'all']:
> + raise error.Abort(_(b'cannot specify both "--all" and "--stop"'))
> + if opts[b'rev']:
> + raise error.Abort(_(b'cannot specify both "--rev" and "--stop"'))
> + if opts[b'abort']:
> + raise error.Abort(_(b'cannot specify both "--abort" and "--stop"'))
>
> - if opts['abort']:
> - if opts['any']:
> - raise error.Abort(_('cannot specify both "--any" and "--abort"'))
> - if opts['all']:
> - raise error.Abort(_('cannot specify both "--all" and "--abort"'))
> - if opts['rev']:
> - raise error.Abort(_('cannot specify both "--rev" and "--abort"'))
> + if opts[b'abort']:
> + if opts[b'any']:
> + raise error.Abort(_(b'cannot specify both "--any" and "--abort"'))
> + if opts[b'all']:
> + raise error.Abort(_(b'cannot specify both "--all" and "--abort"'))
> + if opts[b'rev']:
> + raise error.Abort(_(b'cannot specify both "--rev" and "--abort"'))
>
> - if opts['rev']:
> - if opts['any']:
> - raise error.Abort(_('cannot specify both "--rev" and "--any"'))
> - if opts['all']:
> - raise error.Abort(_('cannot specify both "--rev" and "--all"'))
> + if opts[b'rev']:
> + if opts[b'any']:
> + raise error.Abort(_(b'cannot specify both "--rev" and "--any"'))
> + if opts[b'all']:
> + raise error.Abort(_(b'cannot specify both "--rev" and "--all"'))
>
> # Backward compatibility
> - if opts['unstable']:
> - msg = ("'evolve --unstable' is deprecated, "
> - "use 'evolve --orphan'")
> - repo.ui.deprecwarn(msg, '4.4')
> + if opts[b'unstable']:
> + msg = (b"'evolve --unstable' is deprecated, "
> + b"use 'evolve --orphan'")
> + repo.ui.deprecwarn(msg, b'4.4')
>
> - opts['orphan'] = opts['divergent']
> + opts[b'orphan'] = opts[b'divergent']
>
> - if opts['divergent']:
> - msg = ("'evolve --divergent' is deprecated, "
> - "use 'evolve --content-divergent'")
> - repo.ui.deprecwarn(msg, '4.4')
> + if opts[b'divergent']:
> + msg = (b"'evolve --divergent' is deprecated, "
> + b"use 'evolve --content-divergent'")
> + repo.ui.deprecwarn(msg, b'4.4')
>
> - opts['content_divergent'] = opts['divergent']
> + opts[b'content_divergent'] = opts[b'divergent']
>
> - if opts['bumped']:
> - msg = ("'evolve --bumped' is deprecated, "
> - "use 'evolve --phase-divergent'")
> - repo.ui.deprecwarn(msg, '4.4')
> + if opts[b'bumped']:
> + msg = (b"'evolve --bumped' is deprecated, "
> + b"use 'evolve --phase-divergent'")
> + repo.ui.deprecwarn(msg, b'4.4')
>
> - opts['phase_divergent'] = opts['bumped']
> + opts[b'phase_divergent'] = opts[b'bumped']
>
> return opts
>
> @@ -1455,8 +1455,8 @@
> unfi = repo.unfiltered()
> succ = utility._singlesuccessor(repo, unfi[startnode])
> hg.updaterepo(repo, repo[succ].node(), False)
> - if repo['.'].node() != startnode:
> - ui.status(_('working directory is now at %s\n') % repo['.'])
> + if repo[b'.'].node() != startnode:
> + ui.status(_(b'working directory is now at %s\n') % repo[b'.'])
>
> def divergentsets(repo, ctx):
> """Compute sets of commits divergent with a given one"""
> @@ -1476,40 +1476,40 @@
> continue
> base[tuple(nsuccset)] = n
> divergence = []
> - for divset, b in base.iteritems():
> + for divset, b in base.items():
> divergence.append({
> - 'divergentnodes': divset,
> - 'commonprecursor': b
> + b'divergentnodes': divset,
> + b'commonprecursor': b
> })
>
> return divergence
>
> @eh.command(
> - 'evolve|stabilize|solve',
> - [('n', 'dry-run', False,
> - _('do not perform actions, just print what would be done')),
> - ('', 'confirm', False,
> - _('ask for confirmation before performing the action')),
> - ('A', 'any', False,
> - _('also consider troubled changesets unrelated to current working '
> - 'directory')),
> - ('r', 'rev', [], _('solves troubles of these revisions'), _('REV')),
> - ('', 'bumped', False, _('solves only bumped changesets (DEPRECATED)')),
> - ('', 'phase-divergent', False, _('solves only phase-divergent changesets')),
> - ('', 'divergent', False, _('solves only divergent changesets (DEPRECATED)')),
> - ('', 'content-divergent', False, _('solves only content-divergent changesets')),
> - ('', 'unstable', False, _('solves only unstable changesets (DEPRECATED)')),
> - ('', 'orphan', False, _('solves only orphan changesets (default)')),
> - ('a', 'all', None, _('evolve all troubled changesets related to the current'
> - ' working directory and its descendants (default)')),
> - ('', 'update', False, _('update to the head of evolved changesets')),
> - ('c', 'continue', False, _('continue an interrupted evolution')),
> - ('', 'stop', False, _('stop the interrupted evolution')),
> - ('', 'abort', False, _('abort the interrupted evolution')),
> - ('l', 'list', False, _('provide details on troubled changesets'
> - ' in the repo')),
> - ] + mergetoolopts,
> - _('[OPTIONS]...'),
> + b'evolve|stabilize|solve',
> + [(b'n', b'dry-run', False,
> + _(b'do not perform actions, just print what would be done')),
> + (b'', b'confirm', False,
> + _(b'ask for confirmation before performing the action')),
> + (b'A', b'any', False,
> + _(b'also consider troubled changesets unrelated to current working '
> + b'directory')),
> + (b'r', b'rev', [], _(b'solves troubles of these revisions'), _(b'REV')),
> + (b'', b'bumped', False, _(b'solves only bumped changesets (DEPRECATED)')),
> + (b'', b'phase-divergent', False, _(b'solves only phase-divergent changesets')),
> + (b'', b'divergent', False, _(b'solves only divergent changesets (DEPRECATED)')),
> + (b'', b'content-divergent', False, _(b'solves only content-divergent changesets')),
> + (b'', b'unstable', False, _(b'solves only unstable changesets (DEPRECATED)')),
> + (b'', b'orphan', False, _(b'solves only orphan changesets (default)')),
> + (b'a', b'all', None, _(b'evolve all troubled changesets related to the current'
> + b' working directory and its descendants (default)')),
> + (b'', b'update', False, _(b'update to the head of evolved changesets')),
> + (b'c', b'continue', False, _(b'continue an interrupted evolution')),
> + (b'', b'stop', False, _(b'stop the interrupted evolution')),
> + (b'', b'abort', False, _(b'abort the interrupted evolution')),
> + (b'l', b'list', False, _(b'provide details on troubled changesets'
> + b' in the repo')),
> + ] + mergetoolopts,
> + _(b'[OPTIONS]...'),
> helpbasic=True
> )
> def evolve(ui, repo, **opts):
> @@ -1607,72 +1607,72 @@
>
> opts = _checkevolveopts(repo, opts)
> # Options
> - contopt = opts['continue']
> - anyopt = opts['any']
> - allopt = opts['all']
> + contopt = opts[b'continue']
> + anyopt = opts[b'any']
> + allopt = opts[b'all']
> if allopt is None:
> allopt = True
> - startnode = repo['.'].node()
> - dryrunopt = opts['dry_run']
> - confirmopt = opts['confirm']
> - revopt = opts['rev']
> - stopopt = opts['stop']
> - abortopt = opts['abort']
> - shouldupdate = opts['update']
> + startnode = repo[b'.'].node()
> + dryrunopt = opts[b'dry_run']
> + confirmopt = opts[b'confirm']
> + revopt = opts[b'rev']
> + stopopt = opts[b'stop']
> + abortopt = opts[b'abort']
> + shouldupdate = opts[b'update']
>
> - troublecategories = ['phase_divergent', 'content_divergent', 'orphan']
> - specifiedcategories = [t.replace('_', '')
> + troublecategories = [b'phase_divergent', b'content_divergent', b'orphan']
> + specifiedcategories = [t.replace(b'_', b'')
> for t in troublecategories
> if opts[t]]
> - if opts['list']:
> - ui.pager('evolve')
> + if opts[b'list']:
> + ui.pager(b'evolve')
> listtroubles(ui, repo, specifiedcategories, **opts)
> return
>
> - targetcat = 'orphan'
> + targetcat = b'orphan'
> has_some_opts = bool(revopt or anyopt or allopt or contopt or stopopt or abortopt)
> if 1 < len(specifiedcategories):
> - msg = _('cannot specify more than one trouble category to solve (yet)')
> + msg = _(b'cannot specify more than one trouble category to solve (yet)')
> raise error.Abort(msg)
> elif len(specifiedcategories) == 1:
> targetcat = specifiedcategories[0]
> - elif repo['.'].obsolete() and not has_some_opts:
> + elif repo[b'.'].obsolete() and not has_some_opts:
> # if no args and parent is obsolete, update to successors
> return solveobswdp(ui, repo, opts)
>
> - ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'evolve')
> + ui.setconfig(b'ui', b'forcemerge', opts.get(b'tool', b''), b'evolve')
>
> evolvestate = state.cmdstate(repo)
> # Continuation handling
> if contopt:
> if not evolvestate:
> - raise error.Abort(_('no interrupted evolve to continue'))
> + raise error.Abort(_(b'no interrupted evolve to continue'))
> evolvestate.load()
> continueevolve(ui, repo, evolvestate)
> - if evolvestate['command'] != 'evolve':
> + if evolvestate[b'command'] != b'evolve':
> evolvestate.delete()
> return
> - startnode = repo.unfiltered()[evolvestate['startnode']]
> - if 'update' in evolvestate:
> - shouldupdate = evolvestate['update']
> + startnode = repo.unfiltered()[evolvestate[b'startnode']]
> + if b'update' in evolvestate:
> + shouldupdate = evolvestate[b'update']
> evolvestate.delete()
> elif stopopt:
> if not evolvestate:
> - raise error.Abort(_('no interrupted evolve to stop'))
> + raise error.Abort(_(b'no interrupted evolve to stop'))
> evolvestate.load()
> stopevolve(ui, repo, evolvestate)
> evolvestate.delete()
> return
> elif abortopt:
> if not evolvestate:
> - raise error.Abort(_('no interrupted evolve to stop'))
> + raise error.Abort(_(b'no interrupted evolve to stop'))
> evolvestate.load()
> # `hg next --evolve` in play
> - if evolvestate['command'] != 'evolve':
> - pctx = repo['.']
> + if evolvestate[b'command'] != b'evolve':
> + pctx = repo[b'.']
> hg.updaterepo(repo, pctx.node(), True)
> - ui.status(_('evolve aborted\n'))
> - ui.status(_('working directory is now at %s\n')
> + ui.status(_(b'evolve aborted\n'))
> + ui.status(_(b'working directory is now at %s\n')
> % pctx.hex()[:12])
> evolvestate.delete()
> return 0
> @@ -1686,7 +1686,7 @@
> # Handling it here otherwise `revs` set would change, after
> # performing update to successor of obsolete wdir parent.
> # (in case when user passes a revset related to wdir parent '.::')
> - if repo['.'].obsolete():
> + if repo[b'.'].obsolete():
> result = solveobswdp(ui, repo, opts)
> if result != 0 or result is True:
> return result
> @@ -1701,27 +1701,27 @@
>
> def progresscb():
> if showprogress:
> - compat.progress(ui, _('evolve'), seen, unit=_('changesets'),
> + compat.progress(ui, _(b'evolve'), seen, unit=_(b'changesets'),
> total=count)
>
> # Order the revisions
> revs = _orderrevs(repo, revs)
>
> # cbor does not know how to serialize sets, using list for skippedrevs
> - stateopts = {'category': targetcat, 'replacements': {},
> - 'revs': list(revs), 'confirm': confirmopt,
> - 'startnode': startnode, 'skippedrevs': [],
> - 'command': 'evolve', 'orphanmerge': False,
> - 'bookmarkchanges': [], 'temprevs': [], 'obsmarkers': [],
> - 'update': shouldupdate}
> + stateopts = {b'category': targetcat, b'replacements': {},
> + b'revs': list(revs), b'confirm': confirmopt,
> + b'startnode': startnode, b'skippedrevs': [],
> + b'command': b'evolve', b'orphanmerge': False,
> + b'bookmarkchanges': [], b'temprevs': [], b'obsmarkers': [],
> + b'update': shouldupdate}
> evolvestate.addopts(stateopts)
> # lastsolved: keep track of successor of last troubled cset we evolved
> # to confirm that if atop msg should be suppressed to remove redundancy
> lastsolved = None
>
> - activetopic = getattr(repo, 'currenttopic', '')
> + activetopic = getattr(repo, 'currenttopic', b'')
> with repo.wlock(), repo.lock():
> - tr = repo.transaction("evolve")
> + tr = repo.transaction(b"evolve")
> with util.acceptintervention(tr):
> for rev in revs:
> lastsolved = _solveonerev(ui, repo, rev, evolvestate,
> @@ -1731,7 +1731,7 @@
> seen += 1
>
> if showprogress:
> - compat.progress(ui, _('evolve'), None)
> + compat.progress(ui, _(b'evolve'), None)
>
> _cleanup(ui, repo, startnode, shouldupdate)
>
> @@ -1743,7 +1743,7 @@
> stabilizes for both parents of orphan merges.
> """
> curctx = repo[rev]
> - revtopic = getattr(curctx, 'topic', lambda: '')()
> + revtopic = getattr(curctx, 'topic', lambda: b'')()
> topicidx = getattr(curctx, 'topicidx', lambda: None)()
> stacktmplt = False
> # check if revision being evolved is in active topic to make sure
> @@ -1755,44 +1755,44 @@
> confirmopt, progresscb, targetcat,
> lastsolved=lastsolved, stacktmplt=stacktmplt)
> if ret[0]:
> - evolvestate['replacements'][curctx.node()] = ret[1]
> + evolvestate[b'replacements'][curctx.node()] = ret[1]
> lastsolved = ret[1]
> else:
> - evolvestate['skippedrevs'].append(curctx.node())
> + evolvestate[b'skippedrevs'].append(curctx.node())
>
> - if evolvestate['orphanmerge']:
> + if evolvestate[b'orphanmerge']:
> # we were processing an orphan merge with both parents obsolete,
> # stabilized for second parent, re-stabilize for the first parent
> ret = _solveone(ui, repo, repo[ret[1]], evolvestate, dryrunopt,
> confirmopt, progresscb, targetcat,
> stacktmplt=stacktmplt)
> if ret[0]:
> - evolvestate['replacements'][curctx.node()] = ret[1]
> + evolvestate[b'replacements'][curctx.node()] = ret[1]
> lastsolved = ret[1]
> else:
> - evolvestate['skippedrevs'].append(curctx.node())
> + evolvestate[b'skippedrevs'].append(curctx.node())
>
> - evolvestate['orphanmerge'] = False
> + evolvestate[b'orphanmerge'] = False
> return lastsolved
>
> def solveobswdp(ui, repo, opts):
> """this function updates to the successor of obsolete wdir parent"""
> - oldid = repo['.'].node()
> - startctx = repo['.']
> - dryrunopt = opts.get('dry_run', False)
> + oldid = repo[b'.'].node()
> + startctx = repo[b'.']
> + dryrunopt = opts.get(b'dry_run', False)
> displayer = compat.changesetdisplayer(ui, repo,
> - {'template': shorttemplate})
> + {b'template': shorttemplate})
> try:
> - ctx = repo[utility._singlesuccessor(repo, repo['.'])]
> + ctx = repo[utility._singlesuccessor(repo, repo[b'.'])]
> except utility.MultipleSuccessorsError as exc:
> - repo.ui.write_err(_('parent is obsolete with multiple'
> - ' successors:\n'))
> + repo.ui.write_err(_(b'parent is obsolete with multiple'
> + b' successors:\n'))
> for ln in exc.successorssets:
> for n in ln:
> displayer.show(repo[n])
> return 2
>
> - ui.status(_('update:'))
> + ui.status(_(b'update:'))
> if not ui.quiet:
> displayer.show(ctx)
>
> @@ -1802,33 +1802,33 @@
> newid = ctx.node()
>
> if ctx != startctx:
> - with repo.wlock(), repo.lock(), repo.transaction('evolve') as tr:
> + with repo.wlock(), repo.lock(), repo.transaction(b'evolve') as tr:
> bmupdater = rewriteutil.bookmarksupdater(repo, oldid, tr)
> bmupdater(newid)
> - ui.status(_('working directory is now at %s\n') % ctx)
> + ui.status(_(b'working directory is now at %s\n') % ctx)
> return res
>
> def stopevolve(ui, repo, evolvestate):
> """logic for handling of `hg evolve --stop`"""
> updated = False
> pctx = None
> - if (evolvestate['command'] == 'evolve'
> - and evolvestate['category'] == 'contentdivergent'
> - and evolvestate['relocated']):
> - oldother = evolvestate['old-other']
> + if (evolvestate[b'command'] == b'evolve'
> + and evolvestate[b'category'] == b'contentdivergent'
> + and evolvestate[b'relocated']):
> + oldother = evolvestate[b'old-other']
> if oldother:
> with repo.wlock(), repo.lock():
> repo = repo.unfiltered()
> hg.updaterepo(repo, oldother, True)
> - strips = [evolvestate['relocated']]
> + strips = [evolvestate[b'relocated']]
> repair.strip(ui, repo, strips, False)
> updated = True
> pctx = repo[oldother]
> if not updated:
> - pctx = repo['.']
> + pctx = repo[b'.']
> hg.updaterepo(repo, pctx.node(), True)
> - ui.status(_('stopped the interrupted evolve\n'))
> - ui.status(_('working directory is now at %s\n') % pctx)
> + ui.status(_(b'stopped the interrupted evolve\n'))
> + ui.status(_(b'working directory is now at %s\n') % pctx)
>
> def abortevolve(ui, repo, evolvestate):
> """ logic for handling of `hg evolve --abort`"""
> @@ -1838,11 +1838,11 @@
> evolvedctx = []
> # boolean value to say whether we should strip or not
> cleanup = True
> - startnode = evolvestate['startnode']
> - for old, new in evolvestate['replacements'].iteritems():
> + startnode = evolvestate[b'startnode']
> + for old, new in evolvestate[b'replacements'].items():
> if new:
> evolvedctx.append(repo[new])
> - for temp in evolvestate['temprevs']:
> + for temp in evolvestate[b'temprevs']:
> if temp:
> evolvedctx.append(repo[temp])
> evolvedrevs = [c.rev() for c in evolvedctx]
> @@ -1850,9 +1850,9 @@
> # checking if phase changed of any of the evolved rev
> immutable = [c for c in evolvedctx if not c.mutable()]
> if immutable:
> - repo.ui.warn(_("cannot clean up public changesets: %s\n")
> - % ', '.join(str(c) for c in immutable),
> - hint=_("see 'hg help phases' for details"))
> + repo.ui.warn(_(b"cannot clean up public changesets: %s\n")
> + % b', '.join(str(c) for c in immutable),
> + hint=_(b"see 'hg help phases' for details"))
> cleanup = False
>
> # checking no new changesets are created on evolved revs
> @@ -1860,15 +1860,15 @@
> if evolvedrevs:
> descendants = set(repo.changelog.descendants(evolvedrevs))
> if descendants - set(evolvedrevs):
> - repo.ui.warn(_("warning: new changesets detected on destination "
> - "branch\n"))
> + repo.ui.warn(_(b"warning: new changesets detected on destination "
> + b"branch\n"))
> cleanup = False
>
> # finding the indices of the obsmarkers to be stripped and stripping
> # them
> - if evolvestate['obsmarkers']:
> + if evolvestate[b'obsmarkers']:
> stripmarkers = set()
> - for m in evolvestate['obsmarkers']:
> + for m in evolvestate[b'obsmarkers']:
> m = (m[0], m[1])
> stripmarkers.add(m)
> indices = []
> @@ -1879,12 +1879,12 @@
> indices.append(i)
>
> repair.deleteobsmarkers(repo.obsstore, indices)
> - repo.ui.debug('deleted %d obsmarkers\n' % len(indices))
> + repo.ui.debug(b'deleted %d obsmarkers\n' % len(indices))
>
> if cleanup:
> if evolvedrevs:
> strippoints = [c.node()
> - for c in repo.set('roots(%ld)', evolvedrevs)]
> + for c in repo.set(b'roots(%ld)', evolvedrevs)]
>
> # updating the working directory
> hg.updaterepo(repo, startnode, True)
> @@ -1894,19 +1894,19 @@
> # no backup of evolved cset versions needed
> repair.strip(repo.ui, repo, strippoints, False)
>
> - with repo.transaction('evolve') as tr:
> + with repo.transaction(b'evolve') as tr:
> # restoring bookmarks at there original place
> - bmchanges = evolvestate['bookmarkchanges']
> + bmchanges = evolvestate[b'bookmarkchanges']
> if bmchanges:
> repo._bookmarks.applychanges(repo, tr, bmchanges)
>
> evolvestate.delete()
> - ui.status(_('evolve aborted\n'))
> - ui.status(_('working directory is now at %s\n')
> + ui.status(_(b'evolve aborted\n'))
> + ui.status(_(b'working directory is now at %s\n')
> % nodemod.hex(startnode)[:12])
> else:
> - raise error.Abort(_("unable to abort interrupted evolve, use 'hg "
> - "evolve --stop' to stop evolve"))
> + raise error.Abort(_(b"unable to abort interrupted evolve, use 'hg "
> + b"evolve --stop' to stop evolve"))
>
> def continueevolve(ui, repo, evolvestate):
> """logic for handling of `hg evolve --continue`"""
> @@ -1914,81 +1914,81 @@
> with repo.wlock(), repo.lock():
> ms = merge.mergestate.read(repo)
> mergeutil.checkunresolved(ms)
> - if (evolvestate['command'] == 'next'
> - or evolvestate['category'] == 'orphan'):
> + if (evolvestate[b'command'] == b'next'
> + or evolvestate[b'category'] == b'orphan'):
> _completeorphan(ui, repo, evolvestate)
> - elif evolvestate['category'] == 'phasedivergent':
> + elif evolvestate[b'category'] == b'phasedivergent':
> _completephasedivergent(ui, repo, evolvestate)
> - elif evolvestate['category'] == 'contentdivergent':
> + elif evolvestate[b'category'] == b'contentdivergent':
> _continuecontentdivergent(ui, repo, evolvestate, None)
> else:
> - repo.ui.status(_("continuing interrupted '%s' resolution is not yet"
> - " supported\n") % evolvestate['category'])
> + repo.ui.status(_(b"continuing interrupted '%s' resolution is not yet"
> + b" supported\n") % evolvestate[b'category'])
> return
>
> # make sure we are continuing evolve and not `hg next --evolve`
> - if evolvestate['command'] != 'evolve':
> + if evolvestate[b'command'] != b'evolve':
> return
>
> # Progress handling
> seen = 1
> - count = len(evolvestate['revs'])
> + count = len(evolvestate[b'revs'])
>
> def progresscb():
> - compat.progress(ui, _('evolve'), seen, unit=_('changesets'),
> + compat.progress(ui, _(b'evolve'), seen, unit=_(b'changesets'),
> total=count)
>
> - category = evolvestate['category']
> - confirm = evolvestate['confirm']
> + category = evolvestate[b'category']
> + confirm = evolvestate[b'confirm']
> unfi = repo.unfiltered()
> # lastsolved: keep track of successor of last troubled cset we
> # evolved to confirm that if atop msg should be suppressed to remove
> # redundancy
> lastsolved = None
> - activetopic = getattr(repo, 'currenttopic', '')
> - tr = repo.transaction("evolve")
> + activetopic = getattr(repo, 'currenttopic', b'')
> + tr = repo.transaction(b"evolve")
> with util.acceptintervention(tr):
> - for rev in evolvestate['revs']:
> + for rev in evolvestate[b'revs']:
> # XXX: prevent this lookup by storing nodes instead of revnums
> curctx = unfi[rev]
>
> # check if we can use stack template
> - revtopic = getattr(curctx, 'topic', lambda: '')()
> + revtopic = getattr(curctx, 'topic', lambda: b'')()
> topicidx = getattr(curctx, 'topicidx', lambda: None)()
> stacktmplt = False
> if (activetopic and (activetopic == revtopic)
> and topicidx is not None):
> stacktmplt = True
>
> - if (curctx.node() not in evolvestate['replacements']
> - and curctx.node() not in evolvestate['skippedrevs']):
> + if (curctx.node() not in evolvestate[b'replacements']
> + and curctx.node() not in evolvestate[b'skippedrevs']):
> newnode = _solveone(ui, repo, curctx, evolvestate, False,
> confirm, progresscb, category,
> lastsolved=lastsolved,
> stacktmplt=stacktmplt)
> if newnode[0]:
> - evolvestate['replacements'][curctx.node()] = newnode[1]
> + evolvestate[b'replacements'][curctx.node()] = newnode[1]
> lastsolved = newnode[1]
> else:
> - evolvestate['skippedrevs'].append(curctx.node())
> + evolvestate[b'skippedrevs'].append(curctx.node())
> seen += 1
>
> def _continuecontentdivergent(ui, repo, evolvestate, progresscb):
> """function to continue the interrupted content-divergence resolution."""
> - tr = repo.transaction('evolve')
> + tr = repo.transaction(b'evolve')
> with util.acceptintervention(tr):
> - divergent = evolvestate['divergent']
> - base = evolvestate['base']
> + divergent = evolvestate[b'divergent']
> + base = evolvestate[b'base']
> repo = repo.unfiltered()
> - if evolvestate['relocating']:
> + if evolvestate[b'relocating']:
> newother = _completerelocation(ui, repo, evolvestate)
> - current = repo[evolvestate['current']]
> + current = repo[evolvestate[b'current']]
> obsolete.createmarkers(repo, [(current, (repo[newother],))],
> - operation='evolve')
> - evolvestate['relocating'] = False
> - evolvestate['relocated'] = newother
> - evolvestate['temprevs'].append(newother)
> - evolvestate['other-divergent'] = newother
> + operation=b'evolve')
> + evolvestate[b'relocating'] = False
> + evolvestate[b'relocated'] = newother
> + evolvestate[b'temprevs'].append(newother)
> + evolvestate[b'other-divergent'] = newother
> # continue the resolution by merging the content-divergence
> _mergecontentdivergents(repo, progresscb,
> repo[divergent],
> @@ -1996,16 +1996,16 @@
> repo[base],
> evolvestate)
>
> - other = evolvestate['other-divergent']
> + other = evolvestate[b'other-divergent']
> ret = _completecontentdivergent(ui, repo, progresscb,
> repo[divergent],
> repo[other],
> repo[base],
> evolvestate)
> - origdivergent = evolvestate['orig-divergent']
> - evolvestate['replacements'][origdivergent] = ret[1]
> + origdivergent = evolvestate[b'orig-divergent']
> + evolvestate[b'replacements'][origdivergent] = ret[1]
> # logic to continue the public content-divergent
> - publicnode = evolvestate.get('public-divergent')
> + publicnode = evolvestate.get(b'public-divergent')
> if publicnode:
> res, newnode = ret
> if not res:
> @@ -2028,19 +2028,19 @@
> phase-divergence"""
>
> # need to start transaction for bookmark changes
> - with repo.transaction('evolve'):
> + with repo.transaction(b'evolve'):
> node = _completerelocation(ui, repo, evolvestate)
> - evolvestate['temprevs'].append(node)
> + evolvestate[b'temprevs'].append(node)
> # resolving conflicts can lead to empty wdir and node can be None in
> # those cases
> - ctx = repo[evolvestate['current']]
> - newctx = repo[node] if node is not None else repo['.']
> - obsolete.createmarkers(repo, [(ctx, (newctx,))], operation='evolve')
> + ctx = repo[evolvestate[b'current']]
> + newctx = repo[node] if node is not None else repo[b'.']
> + obsolete.createmarkers(repo, [(ctx, (newctx,))], operation=b'evolve')
>
> # now continuing the phase-divergence resolution part
> - prec = repo[evolvestate['precursor']]
> + prec = repo[evolvestate[b'precursor']]
> retvalue = _resolvephasedivergent(ui, repo, prec, newctx)
> - evolvestate['replacements'][ctx.node()] = retvalue[1]
> + evolvestate[b'replacements'][ctx.node()] = retvalue[1]
>
> def _completeorphan(ui, repo, evolvestate):
> """function to complete the interrupted orphan resolution"""
> @@ -2048,44 +2048,44 @@
> node = _completerelocation(ui, repo, evolvestate)
> # resolving conflicts can lead to empty wdir and node can be None in
> # those cases
> - ctx = repo[evolvestate['current']]
> + ctx = repo[evolvestate[b'current']]
> if node is None:
> - repo.ui.status(_("evolution of %d:%s created no changes"
> - " to commit\n") % (ctx.rev(), ctx))
> - newctx = repo[node] if node is not None else repo['.']
> - obsolete.createmarkers(repo, [(ctx, (newctx,))], operation='evolve')
> + repo.ui.status(_(b"evolution of %d:%s created no changes"
> + b" to commit\n") % (ctx.rev(), ctx))
> + newctx = repo[node] if node is not None else repo[b'.']
> + obsolete.createmarkers(repo, [(ctx, (newctx,))], operation=b'evolve')
>
> # make sure we are continuing evolve and not `hg next --evolve`
> - if evolvestate['command'] == 'evolve':
> - evolvestate['replacements'][ctx.node()] = node
> - if evolvestate['orphanmerge']:
> + if evolvestate[b'command'] == b'evolve':
> + evolvestate[b'replacements'][ctx.node()] = node
> + if evolvestate[b'orphanmerge']:
> # processing a merge changeset with both parents obsoleted,
> # stabilized on second parent, insert in front of list to
> # re-process to stabilize on first parent
> - evolvestate['revs'].insert(0, repo[node].rev())
> - evolvestate['orphanmerge'] = False
> + evolvestate[b'revs'].insert(0, repo[node].rev())
> + evolvestate[b'orphanmerge'] = False
>
> def _completerelocation(ui, repo, evolvestate):
> """function to complete the interrupted relocation of a commit
> return the new node formed
> """
>
> - orig = repo[evolvestate['current']]
> + orig = repo[evolvestate[b'current']]
> ctx = orig
> - source = ctx.extra().get('source')
> + source = ctx.extra().get(b'source')
> extra = {}
> if source:
> - extra['source'] = source
> - extra['intermediate-source'] = ctx.hex()
> + extra[b'source'] = source
> + extra[b'intermediate-source'] = ctx.hex()
> else:
> - extra['source'] = ctx.hex()
> + extra[b'source'] = ctx.hex()
> user = ctx.user()
> date = ctx.date()
> message = ctx.description()
> - ui.status(_('evolving %d:%s "%s"\n') % (ctx.rev(), ctx,
> - message.split('\n', 1)[0]))
> + ui.status(_(b'evolving %d:%s "%s"\n') % (ctx.rev(), ctx,
> + message.split(b'\n', 1)[0]))
> targetphase = max(ctx.phase(), phases.draft)
> - overrides = {('phases', 'new-commit'): targetphase}
> + overrides = {(b'phases', b'new-commit'): targetphase}
>
> ctxparents = orig.parents()
> if len(ctxparents) == 2:
> @@ -2112,7 +2112,7 @@
> else:
> # both the parents were obsoleted, if orphanmerge is set, we
> # are processing the second parent first (to keep parent order)
> - if evolvestate.get('orphanmerge'):
> + if evolvestate.get(b'orphanmerge'):
> with repo.dirstate.parentchange():
> repo.dirstate.setparents(ctxparents[0].node(),
> currentp1)
> @@ -2121,7 +2121,7 @@
> with repo.dirstate.parentchange():
> repo.dirstate.setparents(repo.dirstate.parents()[0], nodemod.nullid)
>
> - with repo.ui.configoverride(overrides, 'evolve-continue'):
> + with repo.ui.configoverride(overrides, b'evolve-continue'):
> node = repo.commit(text=message, user=user,
> date=date, extra=extra)
> return node
> diff --git a/hgext3rd/evolve/exthelper.py b/hgext3rd/evolve/exthelper.py
> --- a/hgext3rd/evolve/exthelper.py
> +++ b/hgext3rd/evolve/exthelper.py
> @@ -83,12 +83,12 @@
> self._duckpunchers = []
> self.cmdtable = {}
> self.command = registrar.command(self.cmdtable)
> - if '^init' in commands.table:
> + if b'^init' in commands.table:
> olddoregister = self.command._doregister
>
> def _newdoregister(self, name, *args, **kwargs):
> - if kwargs.pop('helpbasic', False):
> - name = '^' + name
> + if kwargs.pop(b'helpbasic', False):
> + name = b'^' + name
> return olddoregister(self, name, *args, **kwargs)
> self.command._doregister = _newdoregister
>
> @@ -111,7 +111,7 @@
> self._functionwrappers.extend(other._functionwrappers)
> self._duckpunchers.extend(other._duckpunchers)
> self.cmdtable.update(other.cmdtable)
> - for section, items in other.configtable.iteritems():
> + for section, items in other.configtable.items():
> if section in self.configtable:
> self.configtable[section].update(items)
> else:
> @@ -277,9 +277,9 @@
> else:
> for opt in opts:
> if not isinstance(opt, tuple):
> - raise error.ProgrammingError('opts must be list of tuples')
> + raise error.ProgrammingError(b'opts must be list of tuples')
> if len(opt) not in (4, 5):
> - msg = 'each opt tuple must contain 4 or 5 values'
> + msg = b'each opt tuple must contain 4 or 5 values'
> raise error.ProgrammingError(msg)
>
> def dec(wrapper):
> diff --git a/hgext3rd/evolve/firstmergecache.py b/hgext3rd/evolve/firstmergecache.py
> --- a/hgext3rd/evolve/firstmergecache.py
> +++ b/hgext3rd/evolve/firstmergecache.py
> @@ -41,7 +41,7 @@
>
> @localrepo.unfilteredmethod
> def destroyed(self):
> - if 'firstmergecach' in vars(self):
> + if b'firstmergecach' in vars(self):
> self.firstmergecache.clear()
> super(firstmergecacherepo, self).destroyed()
>
> @@ -56,16 +56,16 @@
>
> class firstmergecache(genericcaches.changelogsourcebase):
>
> - _filepath = 'evoext-firstmerge-00'
> - _cachename = 'evo-ext-firstmerge'
> + _filepath = b'evoext-firstmerge-00'
> + _cachename = b'evo-ext-firstmerge'
>
> def __init__(self):
> super(firstmergecache, self).__init__()
> - self._data = array.array('l')
> + self._data = array.array(b'l')
>
> def get(self, rev):
> if len(self._data) <= rev:
> - raise error.ProgrammingError('firstmergecache must be warmed before use')
> + raise error.ProgrammingError(b'firstmergecache must be warmed before use')
> return self._data[rev]
>
> def _updatefrom(self, repo, data):
> @@ -75,15 +75,15 @@
> total = len(data)
>
> def progress(pos, rev):
> - compat.progress(repo.ui, 'updating firstmerge cache',
> - pos, 'rev %s' % rev, unit='revision', total=total)
> - progress(0, '')
> + compat.progress(repo.ui, b'updating firstmerge cache',
> + pos, b'rev %s' % rev, unit=b'revision', total=total)
> + progress(0, b'')
> for idx, rev in enumerate(data, 1):
> assert rev == len(self._data), (rev, len(self._data))
> self._data.append(self._firstmerge(cl, rev))
> if not (idx % 10000): # progress as a too high performance impact
> progress(idx, rev)
> - progress(None, '')
> + progress(None, b'')
>
> def _firstmerge(self, changelog, rev):
> cl = changelog
> @@ -107,7 +107,7 @@
> Subclasses MUST overide this method to actually affect the cache data.
> """
> super(firstmergecache, self).clear()
> - self._data = array.array('l')
> + self._data = array.array(b'l')
>
> # crude version of a cache, to show the kind of information we have to store
>
> @@ -116,7 +116,7 @@
> assert repo.filtername is None
>
> data = repo.cachevfs.tryread(self._filepath)
> - self._data = array.array('l')
> + self._data = array.array(b'l')
> if not data:
> self._cachekey = self.emptykey
> else:
> @@ -135,12 +135,12 @@
> return
>
> try:
> - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True)
> + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True)
> headerdata = self._serializecachekey()
> cachefile.write(headerdata)
> cachefile.write(self._data.tostring())
> cachefile.close()
> self._ondiskkey = self._cachekey
> except (IOError, OSError) as exc:
> - repo.ui.log('firstmergecache', 'could not write update %s\n' % exc)
> - repo.ui.debug('firstmergecache: could not write update %s\n' % exc)
> + repo.ui.log(b'firstmergecache', b'could not write update %s\n' % exc)
> + repo.ui.debug(b'firstmergecache: could not write update %s\n' % exc)
> diff --git a/hgext3rd/evolve/genericcaches.py b/hgext3rd/evolve/genericcaches.py
> --- a/hgext3rd/evolve/genericcaches.py
> +++ b/hgext3rd/evolve/genericcaches.py
> @@ -31,7 +31,7 @@
> # default key used for an empty cache
> emptykey = ()
>
> - _cachekeyspec = '' # used for serialization
> + _cachekeyspec = b'' # used for serialization
> _cachename = None # used for debug message
>
> @abc.abstractmethod
> @@ -42,7 +42,7 @@
> @util.propertycache
> def _cachekeystruct(self):
> # dynamic property to help subclass to change it
> - return struct.Struct('>' + self._cachekeyspec)
> + return struct.Struct(b'>' + self._cachekeyspec)
>
> @util.propertycache
> def _cachekeysize(self):
> @@ -112,7 +112,7 @@
> if newkey == self._cachekey:
> return
> if reset or self._cachekey is None:
> - repo.ui.log('cache', 'strip detected, %s cache reset\n'
> + repo.ui.log(b'cache', b'strip detected, %s cache reset\n'
> % self._cachename)
> self.clear(reset=True)
>
> @@ -120,7 +120,7 @@
> self._updatefrom(repo, data)
> duration = util.timer() - starttime
> summary = self._updatesummary(data)
> - repo.ui.log('cache', 'updated %s in %.4f seconds (%s)\n',
> + repo.ui.log(b'cache', b'updated %s in %.4f seconds (%s)\n',
> self._cachename, duration, summary)
>
> self._cachekey = newkey
> @@ -144,7 +144,7 @@
>
> # default key used for an empty cache
> emptykey = (0, node.nullid)
> - _cachekeyspec = 'i20s'
> + _cachekeyspec = b'i20s'
> _cachename = None # used for debug message
>
> # Useful "public" function (no need to override them)
> @@ -172,4 +172,4 @@
> return self._fetchchangelogdata(self._cachekey, repo.changelog)
>
> def _updatesummary(self, data):
> - return '%ir' % len(data)
> + return b'%ir' % len(data)
> diff --git a/hgext3rd/evolve/hack/drophack.py b/hgext3rd/evolve/hack/drophack.py
> --- a/hgext3rd/evolve/hack/drophack.py
> +++ b/hgext3rd/evolve/hack/drophack.py
> @@ -34,7 +34,7 @@
> user = ostop[0] - ostart[0]
> sys = ostop[1] - ostart[1]
> comb = user + sys
> - ui.write("%s: wall %f comb %f user %f sys %f\n"
> + ui.write(b"%s: wall %f comb %f user %f sys %f\n"
> % (caption, wall, comb, user, sys))
>
> def obsmarkerchainfrom(obsstore, nodes):
> @@ -66,13 +66,13 @@
> repo = repo.unfiltered()
> repo.destroying()
> oldmarkers = list(repo.obsstore._all)
> - util.rename(repo.svfs.join('obsstore'),
> - repo.vfs.join('obsstore.prestrip'))
> + util.rename(repo.svfs.join(b'obsstore'),
> + repo.vfs.join(b'obsstore.prestrip'))
> del repo.obsstore # drop the cache
> newstore = repo.obsstore
> assert not newstore # should be empty after rename
> newmarkers = [m for m in oldmarkers if m not in markers]
> - tr = repo.transaction('drophack')
> + tr = repo.transaction(b'drophack')
> try:
> newstore.add(tr, newmarkers)
> tr.close()
> @@ -81,7 +81,7 @@
> repo.destroyed()
>
>
> - at command('drop', [('r', 'rev', [], 'revision to update')], _('[-r] revs'))
> + at command(b'drop', [(b'r', b'rev', [], b'revision to update')], _(b'[-r] revs'))
> def cmddrop(ui, repo, *revs, **opts):
> """I'm hacky do not use me!
>
> @@ -95,13 +95,13 @@
> This intended for Matt Mackall usage only. do not use me.
> """
> revs = list(revs)
> - revs.extend(opts['rev'])
> + revs.extend(opts[b'rev'])
> if not revs:
> - revs = ['.']
> + revs = [b'.']
> # get the changeset
> revs = scmutil.revrange(repo, revs)
> if not revs:
> - ui.write_err('no revision to drop\n')
> + ui.write_err(b'no revision to drop\n')
> return 1
> # lock from the beginning to prevent race
> wlock = lock = None
> @@ -109,49 +109,49 @@
> wlock = repo.wlock()
> lock = repo.lock()
> # check they have no children
> - if repo.revs('%ld and public()', revs):
> - ui.write_err('cannot drop public revision')
> + if repo.revs(b'%ld and public()', revs):
> + ui.write_err(b'cannot drop public revision')
> return 1
> - if repo.revs('children(%ld) - %ld', revs, revs):
> - ui.write_err('cannot drop revision with children')
> + if repo.revs(b'children(%ld) - %ld', revs, revs):
> + ui.write_err(b'cannot drop revision with children')
> return 1
> - if repo.revs('. and %ld', revs):
> - newrevs = repo.revs('max(::. - %ld)', revs)
> + if repo.revs(b'. and %ld', revs):
> + newrevs = repo.revs(b'max(::. - %ld)', revs)
> if newrevs:
> assert len(newrevs) == 1
> newrev = newrevs.first()
> else:
> newrev = -1
> commands.update(ui, repo, newrev)
> - ui.status(_('working directory now at %s\n') % repo[newrev])
> + ui.status(_(b'working directory now at %s\n') % repo[newrev])
> # get all markers and successors up to root
> nodes = [repo[r].node() for r in revs]
> - with timed(ui, 'search obsmarker'):
> + with timed(ui, b'search obsmarker'):
> markers = set(obsmarkerchainfrom(repo.obsstore, nodes))
> - ui.write('%i obsmarkers found\n' % len(markers))
> + ui.write(b'%i obsmarkers found\n' % len(markers))
> cl = repo.unfiltered().changelog
> - with timed(ui, 'search nodes'):
> + with timed(ui, b'search nodes'):
> allnodes = set(nodes)
> allnodes.update(m[0] for m in markers if cl.hasnode(m[0]))
> - ui.write('%i nodes found\n' % len(allnodes))
> + ui.write(b'%i nodes found\n' % len(allnodes))
> cl = repo.changelog
> visiblenodes = set(n for n in allnodes if cl.hasnode(n))
> # check constraint again
> - if repo.revs('%ln and public()', visiblenodes):
> - ui.write_err('cannot drop public revision')
> + if repo.revs(b'%ln and public()', visiblenodes):
> + ui.write_err(b'cannot drop public revision')
> return 1
> - if repo.revs('children(%ln) - %ln', visiblenodes, visiblenodes):
> - ui.write_err('cannot drop revision with children')
> + if repo.revs(b'children(%ln) - %ln', visiblenodes, visiblenodes):
> + ui.write_err(b'cannot drop revision with children')
> return 1
>
> if markers:
> # strip them
> - with timed(ui, 'strip obsmarker'):
> + with timed(ui, b'strip obsmarker'):
> stripmarker(ui, repo, markers)
> # strip the changeset
> - with timed(ui, 'strip nodes'):
> - repair.strip(ui, repo, list(allnodes), backup="all",
> - topic='drophack')
> + with timed(ui, b'strip nodes'):
> + repair.strip(ui, repo, list(allnodes), backup=b"all",
> + topic=b'drophack')
>
> finally:
> lockmod.release(lock, wlock)
> diff --git a/hgext3rd/evolve/legacy.py b/hgext3rd/evolve/legacy.py
> --- a/hgext3rd/evolve/legacy.py
> +++ b/hgext3rd/evolve/legacy.py
> @@ -45,20 +45,20 @@
> """
> if not repo.local():
> return
> - evolveopts = ui.configlist('experimental', 'evolution')
> + evolveopts = ui.configlist(b'experimental', b'evolution')
> if not evolveopts:
> - evolveopts = 'all'
> - ui.setconfig('experimental', 'evolution', evolveopts)
> + evolveopts = b'all'
> + ui.setconfig(b'experimental', b'evolution', evolveopts)
> for arg in sys.argv:
> - if 'debugc' in arg:
> + if b'debugc' in arg:
> break
> else:
> - data = repo.vfs.tryread('obsolete-relations')
> + data = repo.vfs.tryread(b'obsolete-relations')
> if not data:
> - data = repo.svfs.tryread('obsoletemarkers')
> + data = repo.svfs.tryread(b'obsoletemarkers')
> if data:
> - raise error.Abort('old format of obsolete marker detected!\n'
> - 'run `hg debugconvertobsolete` once.')
> + raise error.Abort(b'old format of obsolete marker detected!\n'
> + b'run `hg debugconvertobsolete` once.')
>
> def _obsdeserialize(flike):
> """read a file like object serialized with _obsserialize
> @@ -77,7 +77,7 @@
>
> cmdtable = {}
> command = commandfunc(cmdtable)
> - at command('debugconvertobsolete', [], '')
> + at command(b'debugconvertobsolete', [], b'')
> def cmddebugconvertobsolete(ui, repo):
> """import markers from an .hg/obsolete-relations file"""
> cnt = 0
> @@ -86,13 +86,13 @@
> some = False
> try:
> unlink = []
> - tr = repo.transaction('convert-obsolete')
> + tr = repo.transaction(b'convert-obsolete')
> try:
> repo._importoldobsolete = True
> store = repo.obsstore
> ### very first format
> try:
> - f = repo.vfs('obsolete-relations')
> + f = repo.vfs(b'obsolete-relations')
> try:
> some = True
> for line in f:
> @@ -101,30 +101,30 @@
> prec = bin(objhex)
> sucs = (suc == nullid) and [] or [suc]
> meta = {
> - 'date': '%i %i' % makedate(),
> - 'user': ui.username(),
> + b'date': b'%i %i' % makedate(),
> + b'user': ui.username(),
> }
> try:
> store.create(tr, prec, sucs, 0, metadata=meta)
> cnt += 1
> except ValueError:
> - repo.ui.write_err("invalid old marker line: %s"
> + repo.ui.write_err(b"invalid old marker line: %s"
> % (line))
> err += 1
> finally:
> f.close()
> - unlink.append(repo.vfs.join('obsolete-relations'))
> + unlink.append(repo.vfs.join(b'obsolete-relations'))
> except IOError:
> pass
> ### second (json) format
> - data = repo.svfs.tryread('obsoletemarkers')
> + data = repo.svfs.tryread(b'obsoletemarkers')
> if data:
> some = True
> for oldmark in json.loads(data):
> - del oldmark['id'] # dropped for now
> - del oldmark['reason'] # unused until then
> - oldobject = str(oldmark.pop('object'))
> - oldsubjects = [str(s) for s in oldmark.pop('subjects', [])]
> + del oldmark[b'id'] # dropped for now
> + del oldmark[b'reason'] # unused until then
> + oldobject = str(oldmark.pop(b'object'))
> + oldsubjects = [str(s) for s in oldmark.pop(b'subjects', [])]
> lookup_errors = (error.RepoLookupError, error.LookupError)
> if len(oldobject) != 40:
> try:
> @@ -137,9 +137,9 @@
> except lookup_errors:
> pass
>
> - oldmark['date'] = '%i %i' % tuple(oldmark['date'])
> + oldmark[b'date'] = b'%i %i' % tuple(oldmark[b'date'])
> meta = dict((k.encode('utf-8'), v.encode('utf-8'))
> - for k, v in oldmark.iteritems())
> + for k, v in oldmark.items())
> try:
> succs = [bin(n) for n in oldsubjects]
> succs = [n for n in succs if n != nullid]
> @@ -147,11 +147,11 @@
> 0, metadata=meta)
> cnt += 1
> except ValueError:
> - msg = "invalid marker %s -> %s\n"
> + msg = b"invalid marker %s -> %s\n"
> msg %= (oldobject, oldsubjects)
> repo.ui.write_err(msg)
> err += 1
> - unlink.append(repo.svfs.join('obsoletemarkers'))
> + unlink.append(repo.svfs.join(b'obsoletemarkers'))
> tr.close()
> for path in unlink:
> util.unlink(path)
> @@ -161,12 +161,12 @@
> del repo._importoldobsolete
> lock.release()
> if not some:
> - ui.warn(_('nothing to do\n'))
> - ui.status('%i obsolete marker converted\n' % cnt)
> + ui.warn(_(b'nothing to do\n'))
> + ui.status(b'%i obsolete marker converted\n' % cnt)
> if err:
> - ui.write_err('%i conversion failed. check you graph!\n' % err)
> + ui.write_err(b'%i conversion failed. check you graph!\n' % err)
>
> - at command('debugrecordpruneparents', [], '')
> + at command(b'debugrecordpruneparents', [], b'')
> def cmddebugrecordpruneparents(ui, repo):
> """add parent data to prune markers when possible
>
> @@ -174,14 +174,14 @@
> If the pruned node is locally known, it creates a new marker with parent
> data.
> """
> - pgop = 'reading markers'
> + pgop = b'reading markers'
>
> # lock from the beginning to prevent race
> wlock = lock = tr = None
> try:
> wlock = repo.wlock()
> lock = repo.lock()
> - tr = repo.transaction('recordpruneparents')
> + tr = repo.transaction(b'recordpruneparents')
> unfi = repo.unfiltered()
> nm = unfi.changelog.nodemap
> store = repo.obsstore
> @@ -196,7 +196,7 @@
> store.create(tr, prec=mark[0], succs=mark[1], flag=mark[2],
> metadata=dict(mark[3]), parents=parents)
> if len(store._all) - before:
> - ui.write(_('created new markers for %i\n') % rev)
> + ui.write(_(b'created new markers for %i\n') % rev)
> ui.progress(pgop, idx, total=pgtotal)
> tr.close()
> ui.progress(pgop, None)
> diff --git a/hgext3rd/evolve/metadata.py b/hgext3rd/evolve/metadata.py
> --- a/hgext3rd/evolve/metadata.py
> +++ b/hgext3rd/evolve/metadata.py
> @@ -5,7 +5,7 @@
> # This software may be used and distributed according to the terms of the
> # GNU General Public License version 2 or any later version.
>
> -__version__ = '9.0.0'
> -testedwith = '4.5.2 4.6.2 4.7 4.8 4.9 5.0'
> -minimumhgversion = '4.5'
> -buglink = 'https://bz.mercurial-scm.org/'
> +__version__ = b'9.0.0'
> +testedwith = b'4.5.2 4.6.2 4.7 4.8 4.9 5.0'
> +minimumhgversion = b'4.5'
> +buglink = b'https://bz.mercurial-scm.org/'
> diff --git a/hgext3rd/evolve/obscache.py b/hgext3rd/evolve/obscache.py
> --- a/hgext3rd/evolve/obscache.py
> +++ b/hgext3rd/evolve/obscache.py
> @@ -28,7 +28,7 @@
> obsstorefilecache = localrepo.localrepository.obsstore
>
> # obsstore is a filecache so we have do to some spacial dancing
> - at eh.wrapfunction(obsstorefilecache, 'func')
> + at eh.wrapfunction(obsstorefilecache, b'func')
> def obsstorewithcache(orig, repo):
> obsstore = orig(repo)
> obsstore.obscache = obscache(repo.unfiltered())
> @@ -50,10 +50,10 @@
> length, cachekey will be set to None."""
> # default value
> obsstoresize = 0
> - keydata = ''
> + keydata = b''
> # try to get actual data from the obsstore
> try:
> - with self.svfs('obsstore') as obsfile:
> + with self.svfs(b'obsstore') as obsfile:
> obsfile.seek(0, 2)
> obsstoresize = obsfile.tell()
> if index is None:
> @@ -82,11 +82,11 @@
> def markersfrom(obsstore, byteoffset, firstmarker):
> if not firstmarker:
> return list(obsstore)
> - elif '_all' in vars(obsstore):
> + elif b'_all' in vars(obsstore):
> # if the data are in memory, just use that
> return obsstore._all[firstmarker:]
> else:
> - obsdata = obsstore.svfs.tryread('obsstore')
> + obsdata = obsstore.svfs.tryread(b'obsstore')
> return obsolete._readmarkers(obsdata, byteoffset)[1]
>
>
> @@ -178,7 +178,7 @@
>
> reset, revs, obsmarkers, obskeypair = upgrade
> if reset or self._cachekey is None:
> - repo.ui.log('evoext-cache', 'strip detected, %s cache reset\n' % self._cachename)
> + repo.ui.log(b'evoext-cache', b'strip detected, %s cache reset\n' % self._cachename)
> self.clear(reset=True)
>
> starttime = util.timer()
> @@ -186,7 +186,7 @@
> obsmarkers = list(obsmarkers)
> self._updatefrom(repo, revs, obsmarkers)
> duration = util.timer() - starttime
> - repo.ui.log('evoext-cache', 'updated %s in %.4f seconds (%sr, %so)\n',
> + repo.ui.log(b'evoext-cache', b'updated %s in %.4f seconds (%sr, %so)\n',
> self._cachename, duration, len(revs), len(obsmarkers))
>
> # update the key from the new data
> @@ -314,10 +314,10 @@
> zero. That would be especially useful for the '.pending' overlay.
> """
>
> - _filepath = 'evoext-obscache-00'
> - _headerformat = '>q20sQQ20s'
> + _filepath = b'evoext-obscache-00'
> + _headerformat = b'>q20sQQ20s'
>
> - _cachename = 'evo-ext-obscache' # used for error message
> + _cachename = b'evo-ext-obscache' # used for error message
>
> def __init__(self, repo):
> super(obscache, self).__init__()
> @@ -339,7 +339,7 @@
> def _setdata(self, data):
> """set a new bytearray data, invalidating the 'get' shortcut if needed"""
> self._data = data
> - if 'get' in vars(self):
> + if b'get' in vars(self):
> del self.get
>
> def clear(self, reset=False):
> @@ -403,15 +403,15 @@
> return
>
> try:
> - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True)
> + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True)
> headerdata = struct.pack(self._headerformat, *self._cachekey)
> cachefile.write(headerdata)
> cachefile.write(self._data)
> cachefile.close()
> self._ondiskkey = self._cachekey
> except (IOError, OSError) as exc:
> - repo.ui.log('obscache', 'could not write update %s\n' % exc)
> - repo.ui.debug('obscache: could not write update %s\n' % exc)
> + repo.ui.log(b'obscache', b'could not write update %s\n' % exc)
> + repo.ui.debug(b'obscache: could not write update %s\n' % exc)
>
> def load(self, repo):
> """load data from disk"""
> @@ -447,10 +447,10 @@
> # will be about as fast...
> if not obscache.uptodate(repo):
> if repo.currenttransaction() is None:
> - repo.ui.log('evoext-cache',
> - 'obscache is out of date, '
> - 'falling back to slower obsstore version\n')
> - repo.ui.debug('obscache is out of date\n')
> + repo.ui.log(b'evoext-cache',
> + b'obscache is out of date, '
> + b'falling back to slower obsstore version\n')
> + repo.ui.debug(b'obscache is out of date\n')
> return orig(repo)
> else:
> # If a transaction is open, it is worthwhile to update and use
> @@ -465,9 +465,9 @@
>
> @eh.uisetup
> def cachefuncs(ui):
> - orig = obsolete.cachefuncs['obsolete']
> + orig = obsolete.cachefuncs[b'obsolete']
> wrapped = lambda repo: _computeobsoleteset(orig, repo)
> - obsolete.cachefuncs['obsolete'] = wrapped
> + obsolete.cachefuncs[b'obsolete'] = wrapped
>
> @eh.reposetup
> def setupcache(ui, repo):
> @@ -476,7 +476,7 @@
>
> @localrepo.unfilteredmethod
> def destroyed(self):
> - if 'obsstore' in vars(self):
> + if b'obsstore' in vars(self):
> self.obsstore.obscache.clear()
> super(obscacherepo, self).destroyed()
>
> diff --git a/hgext3rd/evolve/obsdiscovery.py b/hgext3rd/evolve/obsdiscovery.py
> --- a/hgext3rd/evolve/obsdiscovery.py
> +++ b/hgext3rd/evolve/obsdiscovery.py
> @@ -59,11 +59,11 @@
> obsexcmsg = utility.obsexcmsg
>
> # Config
> -eh.configitem('experimental', 'evolution.obsdiscovery', True)
> -eh.configitem('experimental', 'obshashrange', True)
> -eh.configitem('experimental', 'obshashrange.warm-cache', 'auto')
> -eh.configitem('experimental', 'obshashrange.max-revs', None)
> -eh.configitem('experimental', 'obshashrange.lru-size', 2000)
> +eh.configitem(b'experimental', b'evolution.obsdiscovery', True)
> +eh.configitem(b'experimental', b'obshashrange', True)
> +eh.configitem(b'experimental', b'obshashrange.warm-cache', b'auto')
> +eh.configitem(b'experimental', b'obshashrange.max-revs', None)
> +eh.configitem(b'experimental', b'obshashrange.lru-size', 2000)
>
> ##################################
> ### Code performing discovery ###
> @@ -75,7 +75,7 @@
> missing = set()
> starttime = util.timer()
>
> - heads = local.revs('heads(%ld)', probeset)
> + heads = local.revs(b'heads(%ld)', probeset)
> local.stablerange.warmup(local)
>
> rangelength = local.stablerange.rangelength
> @@ -103,8 +103,8 @@
>
> local.obsstore.rangeobshashcache.update(local)
> querycount = 0
> - compat.progress(ui, _("comparing obsmarker with other"), querycount,
> - unit=_("queries"))
> + compat.progress(ui, _(b"comparing obsmarker with other"), querycount,
> + unit=_(b"queries"))
> overflow = []
> while sample or overflow:
> if overflow:
> @@ -116,7 +116,7 @@
> overflow = sample[samplesize:]
> sample = sample[:samplesize]
> elif len(sample) < samplesize:
> - ui.debug("query %i; add more sample (target %i, current %i)\n"
> + ui.debug(b"query %i; add more sample (target %i, current %i)\n"
> % (querycount, samplesize, len(sample)))
> # we need more sample !
> needed = samplesize - len(sample)
> @@ -142,7 +142,7 @@
>
> nbsample = len(sample)
> maxsize = max([rangelength(local, r) for r in sample])
> - ui.debug("query %i; sample size is %i, largest range %i\n"
> + ui.debug(b"query %i; sample size is %i, largest range %i\n"
> % (querycount, nbsample, maxsize))
> nbreplies = 0
> replies = list(_queryrange(ui, local, remote, sample))
> @@ -159,15 +159,15 @@
> addentry(new)
> assert nbsample == nbreplies
> querycount += 1
> - compat.progress(ui, _("comparing obsmarker with other"), querycount,
> - unit=_("queries"))
> - compat.progress(ui, _("comparing obsmarker with other"), None)
> + compat.progress(ui, _(b"comparing obsmarker with other"), querycount,
> + unit=_(b"queries"))
> + compat.progress(ui, _(b"comparing obsmarker with other"), None)
> local.obsstore.rangeobshashcache.save(local)
> duration = util.timer() - starttime
> - logmsg = ('obsdiscovery, %d/%d mismatch'
> - ' - %d obshashrange queries in %.4f seconds\n')
> + logmsg = (b'obsdiscovery, %d/%d mismatch'
> + b' - %d obshashrange queries in %.4f seconds\n')
> logmsg %= (len(missing), len(probeset), querycount, duration)
> - ui.log('evoext-obsdiscovery', logmsg)
> + ui.log(b'evoext-obsdiscovery', logmsg)
> ui.debug(logmsg)
> return sorted(missing)
>
> @@ -186,30 +186,30 @@
> ##############################
>
> @eh.command(
> - 'debugobshashrange',
> + b'debugobshashrange',
> [
> - ('', 'rev', [], 'display obshash for all (rev, 0) range in REVS'),
> - ('', 'subranges', False, 'display all subranges'),
> - ],
> - _(''))
> + (b'', b'rev', [], b'display obshash for all (rev, 0) range in REVS'),
> + (b'', b'subranges', False, b'display all subranges'),
> + ],
> + _(b''))
> def debugobshashrange(ui, repo, **opts):
> """display the ::REVS set topologically sorted in a stable way
> """
> s = node.short
> - revs = scmutil.revrange(repo, opts['rev'])
> + revs = scmutil.revrange(repo, opts[b'rev'])
> # prewarm depth cache
> if revs:
> repo.stablerange.warmup(repo, max(revs))
> cl = repo.changelog
> rangelength = repo.stablerange.rangelength
> depthrev = repo.stablerange.depthrev
> - if opts['subranges']:
> + if opts[b'subranges']:
> ranges = stablerange.subrangesclosure(repo, repo.stablerange, revs)
> else:
> ranges = [(r, 0) for r in revs]
> - headers = ('rev', 'node', 'index', 'size', 'depth', 'obshash')
> - linetemplate = '%12d %12s %12d %12d %12d %12s\n'
> - headertemplate = linetemplate.replace('d', 's')
> + headers = (b'rev', b'node', b'index', b'size', b'depth', b'obshash')
> + linetemplate = b'%12d %12s %12d %12d %12d %12s\n'
> + headertemplate = linetemplate.replace(b'd', b's')
> ui.status(headertemplate % headers)
> repo.obsstore.rangeobshashcache.update(repo)
> for r in ranges:
> @@ -265,7 +265,7 @@
> idx INTEGER NOT NULL,
> obshash BLOB NOT NULL,
> PRIMARY KEY(rev, idx));""",
> - "CREATE INDEX range_index ON obshashrange(rev, idx);",
> + b"CREATE INDEX range_index ON obshashrange(rev, idx);",
> """CREATE TABLE meta(schemaversion INTEGER NOT NULL,
> tiprev INTEGER NOT NULL,
> tipnode BLOB NOT NULL,
> @@ -274,17 +274,17 @@
> obskey BLOB NOT NULL
> );""",
> ]
> -_queryexist = "SELECT name FROM sqlite_master WHERE type='table' AND name='meta';"
> +_queryexist = b"SELECT name FROM sqlite_master WHERE type='table' AND name='meta';"
> _clearmeta = """DELETE FROM meta;"""
> _newmeta = """INSERT INTO meta (schemaversion, tiprev, tipnode, nbobsmarker, obssize, obskey)
> VALUES (?,?,?,?,?,?);"""
> -_updateobshash = "INSERT INTO obshashrange(rev, idx, obshash) VALUES (?,?,?);"
> -_querymeta = "SELECT schemaversion, tiprev, tipnode, nbobsmarker, obssize, obskey FROM meta;"
> -_queryobshash = "SELECT obshash FROM obshashrange WHERE (rev = ? AND idx = ?);"
> -_query_max_stored = "SELECT MAX(rev) FROM obshashrange"
> +_updateobshash = b"INSERT INTO obshashrange(rev, idx, obshash) VALUES (?,?,?);"
> +_querymeta = b"SELECT schemaversion, tiprev, tipnode, nbobsmarker, obssize, obskey FROM meta;"
> +_queryobshash = b"SELECT obshash FROM obshashrange WHERE (rev = ? AND idx = ?);"
> +_query_max_stored = b"SELECT MAX(rev) FROM obshashrange"
>
> -_reset = "DELETE FROM obshashrange;"
> -_delete = "DELETE FROM obshashrange WHERE (rev = ? AND idx = ?);"
> +_reset = b"DELETE FROM obshashrange;"
> +_delete = b"DELETE FROM obshashrange WHERE (rev = ? AND idx = ?);"
>
> def _affectedby(repo, markers):
> """return all nodes whose relevant set is affected by this changeset
> @@ -332,8 +332,8 @@
>
> _schemaversion = 3
>
> - _cachename = 'evo-ext-obshashrange' # used for error message
> - _filename = 'evoext_obshashrange_v2.sqlite'
> + _cachename = b'evo-ext-obshashrange' # used for error message
> + _filename = b'evoext_obshashrange_v2.sqlite'
>
> def __init__(self, repo):
> super(_obshashcache, self).__init__()
> @@ -352,7 +352,7 @@
> self._new.clear()
> if reset:
> self._valid = False
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
>
> def get(self, rangeid):
> @@ -361,7 +361,7 @@
> # XXX there are issue with cache warming, we hack around it for now
> if not getattr(self, '_updating', False):
> if self._cachekey[0] < rangeid[0]:
> - msg = ('using unwarmed obshashrangecache (%s %s)'
> + msg = (b'using unwarmed obshashrangecache (%s %s)'
> % (rangeid[0], self._cachekey[0]))
> raise error.ProgrammingError(msg)
>
> @@ -376,7 +376,7 @@
> except (sqlite3.DatabaseError, sqlite3.OperationalError):
> # something is wrong with the sqlite db
> # Since this is a cache, we ignore it.
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
> self._new.clear()
> return value
> @@ -405,8 +405,8 @@
> affected = []
> if RESET_ABOVE < len(obsmarkers):
> # lots of new obsmarkers, probably smarter to reset the cache
> - repo.ui.log('evoext-cache', 'obshashcache reset - '
> - 'many new markers (%d)\n'
> + repo.ui.log(b'evoext-cache', b'obshashcache reset - '
> + b'many new markers (%d)\n'
> % len(obsmarkers))
> reset = True
> elif obsmarkers:
> @@ -419,23 +419,23 @@
> if r is not None and r <= max_stored]
>
> if RESET_ABOVE < len(affected):
> - repo.ui.log('evoext-cache', 'obshashcache reset - '
> - 'new markers affect many changeset (%d)\n'
> + repo.ui.log(b'evoext-cache', b'obshashcache reset - '
> + b'new markers affect many changeset (%d)\n'
> % len(affected))
> reset = True
>
> if affected or reset:
> if not reset:
> - repo.ui.log('evoext-cache', 'obshashcache clean - '
> - 'new markers affect %d changeset and cached ranges\n'
> + repo.ui.log(b'evoext-cache', b'obshashcache clean - '
> + b'new markers affect %d changeset and cached ranges\n'
> % len(affected))
> if con is not None:
> # always reset for now, the code detecting affect is buggy
> # so we need to reset more broadly than we would like.
> try:
> if repo.stablerange._con is None:
> - repo.ui.log('evoext-cache', 'obshashcache reset - '
> - 'underlying stablerange cache unavailable\n')
> + repo.ui.log(b'evoext-cache', b'obshashcache reset - '
> + b'underlying stablerange cache unavailable\n')
> reset = True
> if reset:
> con.execute(_reset)
> @@ -446,7 +446,7 @@
> for r in ranges:
> self._data.pop(r, None)
> except (sqlite3.DatabaseError, sqlite3.OperationalError) as exc:
> - repo.ui.log('evoext-cache', 'error while updating obshashrange cache: %s' % exc)
> + repo.ui.log(b'evoext-cache', b'error while updating obshashrange cache: %s' % exc)
> del self._updating
> return
>
> @@ -456,7 +456,7 @@
> # single revision is quite costly)
> newrevs = []
> stop = self._cachekey[0] # tiprev
> - for h in repo.filtered('immutable').changelog.headrevs():
> + for h in repo.filtered(b'immutable').changelog.headrevs():
> if h <= stop and h in affected:
> newrevs.append(h)
> newrevs.extend(revs)
> @@ -466,14 +466,14 @@
> total = len(revs)
>
> def progress(pos, rev):
> - compat.progress(repo.ui, 'updating obshashrange cache',
> - pos, 'rev %s' % rev, unit='revision', total=total)
> + compat.progress(repo.ui, b'updating obshashrange cache',
> + pos, b'rev %s' % rev, unit=b'revision', total=total)
> # warm the cache for the new revs
> - progress(0, '')
> + progress(0, b'')
> for idx, r in enumerate(revs):
> _obshashrange(repo, (r, 0))
> progress(idx, r)
> - progress(None, '')
> + progress(None, b'')
>
> del self._updating
>
> @@ -492,7 +492,7 @@
> util.makedirs(self._vfs.dirname(self._path))
> except OSError:
> return None
> - con = sqlite3.connect(self._path, timeout=30, isolation_level="IMMEDIATE")
> + con = sqlite3.connect(self._path, timeout=30, isolation_level=b"IMMEDIATE")
> con.text_factory = str
> return con
>
> @@ -525,14 +525,14 @@
> repo = repo.unfiltered()
> try:
> with repo.lock():
> - if 'stablerange' in vars(repo):
> + if b'stablerange' in vars(repo):
> repo.stablerange.save(repo)
> self._save(repo)
> except error.LockError:
> # Exceptionnally we are noisy about it since performance impact
> # is large We should address that before using this more
> # widely.
> - msg = _('obshashrange cache: skipping save unable to lock repo\n')
> + msg = _(b'obshashrange cache: skipping save unable to lock repo\n')
> repo.ui.warn(msg)
>
> def _save(self, repo):
> @@ -545,22 +545,22 @@
> #
> # operational error catch read-only and locked database
> # IntegrityError catch Unique constraint error that may arise
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
> self._new.clear()
> - repo.ui.log('evoext-cache', 'error while saving new data: %s' % exc)
> - repo.ui.debug('evoext-cache: error while saving new data: %s' % exc)
> + repo.ui.log(b'evoext-cache', b'error while saving new data: %s' % exc)
> + repo.ui.debug(b'evoext-cache: error while saving new data: %s' % exc)
>
> def _trysave(self, repo):
> if self._con is None:
> util.unlinkpath(self._path, ignoremissing=True)
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
>
> con = self._db()
> if con is None:
> - repo.ui.log('evoext-cache', 'unable to write obshashrange cache'
> - ' - cannot create database')
> + repo.ui.log(b'evoext-cache', b'unable to write obshashrange cache'
> + b' - cannot create database')
> return
> with con:
> for req in _sqliteschema:
> @@ -577,12 +577,12 @@
> # drifting is currently an issue because this means another
> # process might have already added the cache line we are about
> # to add. This will confuse sqlite
> - msg = _('obshashrange cache: skipping write, '
> - 'database drifted under my feet\n')
> + msg = _(b'obshashrange cache: skipping write, '
> + b'database drifted under my feet\n')
> repo.ui.warn(msg)
> self._new.clear()
> self._valid = False
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
> self._valid = False
> return
> @@ -594,7 +594,7 @@
> self._new.clear()
> self._valid = True
> self._ondiskcachekey = self._cachekey
> - at eh.wrapfunction(obsolete.obsstore, '_addmarkers')
> + at eh.wrapfunction(obsolete.obsstore, b'_addmarkers')
> def _addmarkers(orig, obsstore, *args, **kwargs):
> obsstore.rangeobshashcache.clear()
> return orig(obsstore, *args, **kwargs)
> @@ -603,7 +603,7 @@
>
>
> # obsstore is a filecache so we have do to some spacial dancing
> - at eh.wrapfunction(obsstorefilecache, 'func')
> + at eh.wrapfunction(obsstorefilecache, b'func')
> def obsstorewithcache(orig, repo):
> obsstore = orig(repo)
> obsstore.rangeobshashcache = _obshashcache(repo.unfiltered())
> @@ -615,9 +615,9 @@
> class obshashrepo(repo.__class__):
> @localrepo.unfilteredmethod
> def destroyed(self):
> - if 'obsstore' in vars(self):
> + if b'obsstore' in vars(self):
> self.obsstore.rangeobshashcache.clear()
> - toplevel = not util.safehasattr(self, '_destroying')
> + toplevel = not util.safehasattr(self, b'_destroying')
> if toplevel:
> self._destroying = True
> try:
> @@ -664,7 +664,7 @@
> return _obshashrange_v0(peer._repo, ranges)
>
>
> -_indexformat = '>I'
> +_indexformat = b'>I'
> _indexsize = _calcsize(_indexformat)
> def _encrange(node_rangeid):
> """encode a (node) range"""
> @@ -682,13 +682,13 @@
> def peer_obshashrange_v0(self, ranges):
> binranges = [_encrange(r) for r in ranges]
> encranges = encodelist(binranges)
> - d = self._call("evoext_obshashrange_v1", ranges=encranges)
> + d = self._call(b"evoext_obshashrange_v1", ranges=encranges)
> try:
> return decodelist(d)
> except ValueError:
> - self._abort(error.ResponseError(_("unexpected response:"), d))
> + self._abort(error.ResponseError(_(b"unexpected response:"), d))
>
> - at compat.wireprotocommand(eh, 'evoext_obshashrange_v1', 'ranges')
> + at compat.wireprotocommand(eh, b'evoext_obshashrange_v1', b'ranges')
> def srv_obshashrange_v1(repo, proto, ranges):
> ranges = decodelist(ranges)
> ranges = [_decrange(r) for r in ranges]
> @@ -696,16 +696,16 @@
> return encodelist(hashes)
>
> def _useobshashrange(repo):
> - base = repo.ui.configbool('experimental', 'obshashrange')
> + base = repo.ui.configbool(b'experimental', b'obshashrange')
> if base:
> - maxrevs = repo.ui.configint('experimental', 'obshashrange.max-revs')
> + maxrevs = repo.ui.configint(b'experimental', b'obshashrange.max-revs')
> if maxrevs is not None and maxrevs < len(repo.unfiltered()):
> base = False
> return base
>
> def _canobshashrange(local, remote):
> return (_useobshashrange(local)
> - and remote.capable('_evoext_obshashrange_v1'))
> + and remote.capable(b'_evoext_obshashrange_v1'))
>
> def _obshashrange_capabilities(orig, repo, proto):
> """wrapper to advertise new capability"""
> @@ -715,7 +715,7 @@
>
> # Compat hg 4.6+ (2f7290555c96)
> bytesresponse = False
> - if util.safehasattr(caps, 'data'):
> + if util.safehasattr(caps, b'data'):
> bytesresponse = True
> caps = caps.data
>
> @@ -732,14 +732,14 @@
> @eh.extsetup
> def obshashrange_extsetup(ui):
> ###
> - extensions.wrapfunction(wireprotov1server, 'capabilities',
> + extensions.wrapfunction(wireprotov1server, b'capabilities',
> _obshashrange_capabilities)
> # wrap command content
> - oldcap, args = wireprotov1server.commands['capabilities']
> + oldcap, args = wireprotov1server.commands[b'capabilities']
>
> def newcap(repo, proto):
> return _obshashrange_capabilities(oldcap, repo, proto)
> - wireprotov1server.commands['capabilities'] = (newcap, args)
> + wireprotov1server.commands[b'capabilities'] = (newcap, args)
>
> ##########################################
> ### trigger discovery during exchange ###
> @@ -751,7 +751,7 @@
> # exchange of obsmarkers is enabled locally
> and obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
> # remote server accept markers
> - and 'obsolete' in pushop.remote.listkeys('namespaces'))
> + and b'obsolete' in pushop.remote.listkeys(b'namespaces'))
>
> def _pushobshashrange(pushop, commonrevs):
> repo = pushop.repo.unfiltered()
> @@ -772,15 +772,15 @@
> """
>
> def usediscovery(repo):
> - return repo.ui.configbool('experimental', 'evolution.obsdiscovery')
> + return repo.ui.configbool(b'experimental', b'evolution.obsdiscovery')
>
> - at eh.wrapfunction(exchange, '_pushdiscoveryobsmarkers')
> + at eh.wrapfunction(exchange, b'_pushdiscoveryobsmarkers')
> def _pushdiscoveryobsmarkers(orig, pushop):
> if _dopushmarkers(pushop):
> repo = pushop.repo
> remote = pushop.remote
> - obsexcmsg(repo.ui, "computing relevant nodes\n")
> - revs = list(repo.revs('::%ln', pushop.futureheads))
> + obsexcmsg(repo.ui, b"computing relevant nodes\n")
> + revs = list(repo.revs(b'::%ln', pushop.futureheads))
> unfi = repo.unfiltered()
>
> if not usediscovery(repo):
> @@ -800,27 +800,27 @@
> # obs markers.
> return orig(pushop)
>
> - obsexcmsg(repo.ui, "looking for common markers in %i nodes\n"
> + obsexcmsg(repo.ui, b"looking for common markers in %i nodes\n"
> % len(revs))
> - commonrevs = list(unfi.revs('::%ln', pushop.outgoing.commonheads))
> + commonrevs = list(unfi.revs(b'::%ln', pushop.outgoing.commonheads))
> # find the nodes where the relevant obsmarkers mismatches
> nodes = discovery(pushop, commonrevs)
>
> if nodes:
> - obsexcmsg(repo.ui, "computing markers relevant to %i nodes\n"
> + obsexcmsg(repo.ui, b"computing markers relevant to %i nodes\n"
> % len(nodes))
> pushop.outobsmarkers = repo.obsstore.relevantmarkers(nodes)
> else:
> - obsexcmsg(repo.ui, "markers already in sync\n")
> + obsexcmsg(repo.ui, b"markers already in sync\n")
> pushop.outobsmarkers = []
>
> @eh.extsetup
> def _installobsmarkersdiscovery(ui):
> - olddisco = exchange.pushdiscoverymapping['obsmarker']
> + olddisco = exchange.pushdiscoverymapping[b'obsmarker']
>
> def newdisco(pushop):
> _pushdiscoveryobsmarkers(olddisco, pushop)
> - exchange.pushdiscoverymapping['obsmarker'] = newdisco
> + exchange.pushdiscoverymapping[b'obsmarker'] = newdisco
>
> def buildpullobsmarkersboundaries(pullop, bundle2=True):
> """small function returning the argument for pull markers call
> @@ -830,25 +830,25 @@
> repo = pullop.repo
> remote = pullop.remote
> unfi = repo.unfiltered()
> - revs = unfi.revs('::(%ln - null)', pullop.common)
> - boundaries = {'heads': pullop.pulledsubset}
> + revs = unfi.revs(b'::(%ln - null)', pullop.common)
> + boundaries = {b'heads': pullop.pulledsubset}
> if not revs: # nothing common
> - boundaries['common'] = [node.nullid]
> + boundaries[b'common'] = [node.nullid]
> return boundaries
>
> if not usediscovery(repo):
> # discovery disabled by users.
> repo.ui.status(obsdiscovery_skip_message)
> - boundaries['common'] = [node.nullid]
> + boundaries[b'common'] = [node.nullid]
> return boundaries
>
> if bundle2 and _canobshashrange(repo, remote):
> - obsexcmsg(repo.ui, "looking for common markers in %i nodes\n"
> + obsexcmsg(repo.ui, b"looking for common markers in %i nodes\n"
> % len(revs))
> - boundaries['missing'] = findmissingrange(repo.ui, unfi, pullop.remote,
> - revs)
> + boundaries[b'missing'] = findmissingrange(repo.ui, unfi, pullop.remote,
> + revs)
> else:
> - boundaries['common'] = [node.nullid]
> + boundaries[b'common'] = [node.nullid]
> return boundaries
>
> # merge later for outer layer wrapping
> diff --git a/hgext3rd/evolve/obsexchange.py b/hgext3rd/evolve/obsexchange.py
> --- a/hgext3rd/evolve/obsexchange.py
> +++ b/hgext3rd/evolve/obsexchange.py
> @@ -37,7 +37,7 @@
> obsexcmsg = utility.obsexcmsg
> obsexcprg = utility.obsexcprg
>
> -eh.configitem('experimental', 'verbose-obsolescence-exchange', False)
> +eh.configitem(b'experimental', b'verbose-obsolescence-exchange', False)
>
> _bestformat = max(obsolete.formats.keys())
>
> @@ -58,49 +58,49 @@
> # <= hg 4.5
> from mercurial import wireproto
> gboptsmap = wireproto.gboptsmap
> - gboptsmap['evo_obscommon'] = 'nodes'
> - gboptsmap['evo_missing_nodes'] = 'nodes'
> + gboptsmap[b'evo_obscommon'] = b'nodes'
> + gboptsmap[b'evo_missing_nodes'] = b'nodes'
>
> - at eh.wrapfunction(exchange, '_pullbundle2extraprepare')
> + at eh.wrapfunction(exchange, b'_pullbundle2extraprepare')
> def _addobscommontob2pull(orig, pullop, kwargs):
> ret = orig(pullop, kwargs)
> ui = pullop.repo.ui
> - if ('obsmarkers' in kwargs
> - and pullop.remote.capable('_evoext_getbundle_obscommon')):
> + if (b'obsmarkers' in kwargs
> + and pullop.remote.capable(b'_evoext_getbundle_obscommon')):
> boundaries = obsdiscovery.buildpullobsmarkersboundaries(pullop)
> - if 'common' in boundaries:
> - common = boundaries['common']
> + if b'common' in boundaries:
> + common = boundaries[b'common']
> if common != pullop.common:
> - obsexcmsg(ui, 'request obsmarkers for some common nodes\n')
> + obsexcmsg(ui, b'request obsmarkers for some common nodes\n')
> if common != [node.nullid]:
> - kwargs['evo_obscommon'] = common
> - elif 'missing' in boundaries:
> - missing = boundaries['missing']
> + kwargs[b'evo_obscommon'] = common
> + elif b'missing' in boundaries:
> + missing = boundaries[b'missing']
> if missing:
> - obsexcmsg(ui, 'request obsmarkers for %d common nodes\n'
> + obsexcmsg(ui, b'request obsmarkers for %d common nodes\n'
> % len(missing))
> - kwargs['evo_missing_nodes'] = missing
> + kwargs[b'evo_missing_nodes'] = missing
> return ret
>
> def _getbundleobsmarkerpart(orig, bundler, repo, source, **kwargs):
> - if not (set(['evo_obscommon', 'evo_missing_nodes']) & set(kwargs)):
> + if not (set([b'evo_obscommon', b'evo_missing_nodes']) & set(kwargs)):
> return orig(bundler, repo, source, **kwargs)
>
> - if kwargs.get('obsmarkers', False):
> - heads = kwargs.get('heads')
> - if 'evo_obscommon' in kwargs:
> + if kwargs.get(b'obsmarkers', False):
> + heads = kwargs.get(b'heads')
> + if b'evo_obscommon' in kwargs:
> if heads is None:
> heads = repo.heads()
> - obscommon = kwargs.get('evo_obscommon', ())
> + obscommon = kwargs.get(b'evo_obscommon', ())
> assert obscommon
> - obsset = repo.unfiltered().set('::%ln - ::%ln', heads, obscommon)
> + obsset = repo.unfiltered().set(b'::%ln - ::%ln', heads, obscommon)
> subset = [c.node() for c in obsset]
> else:
> - common = kwargs.get('common')
> - subset = [c.node() for c in repo.unfiltered().set('only(%ln, %ln)', heads, common)]
> - subset += kwargs['evo_missing_nodes']
> + common = kwargs.get(b'common')
> + subset = [c.node() for c in repo.unfiltered().set(b'only(%ln, %ln)', heads, common)]
> + subset += kwargs[b'evo_missing_nodes']
> markers = repo.obsstore.relevantmarkers(subset)
> - if util.safehasattr(bundle2, 'buildobsmarkerspart'):
> + if util.safehasattr(bundle2, b'buildobsmarkerspart'):
> bundle2.buildobsmarkerspart(bundler, markers)
> else:
> exchange.buildobsmarkerspart(bundler, markers)
> @@ -113,7 +113,7 @@
>
> # Compat hg 4.6+ (2f7290555c96)
> bytesresponse = False
> - if util.safehasattr(caps, 'data'):
> + if util.safehasattr(caps, b'data'):
> bytesresponse = True
> caps = caps.data
>
> @@ -138,36 +138,36 @@
> from mercurial import wireproto
> gboptsmap = wireproto.gboptsmap
> wireprotov1server = wireproto
> - gboptsmap['evo_obscommon'] = 'nodes'
> + gboptsmap[b'evo_obscommon'] = b'nodes'
>
> # wrap module content
> - origfunc = exchange.getbundle2partsmapping['obsmarkers']
> + origfunc = exchange.getbundle2partsmapping[b'obsmarkers']
>
> def newfunc(*args, **kwargs):
> return _getbundleobsmarkerpart(origfunc, *args, **kwargs)
> - exchange.getbundle2partsmapping['obsmarkers'] = newfunc
> + exchange.getbundle2partsmapping[b'obsmarkers'] = newfunc
>
> - extensions.wrapfunction(wireprotov1server, 'capabilities',
> + extensions.wrapfunction(wireprotov1server, b'capabilities',
> _obscommon_capabilities)
> # wrap command content
> - oldcap, args = wireprotov1server.commands['capabilities']
> + oldcap, args = wireprotov1server.commands[b'capabilities']
>
> def newcap(repo, proto):
> return _obscommon_capabilities(oldcap, repo, proto)
> - wireprotov1server.commands['capabilities'] = (newcap, args)
> + wireprotov1server.commands[b'capabilities'] = (newcap, args)
>
> def _pushobsmarkers(repo, data):
> tr = lock = None
> try:
> lock = repo.lock()
> - tr = repo.transaction('pushkey: obsolete markers')
> + tr = repo.transaction(b'pushkey: obsolete markers')
> new = repo.obsstore.mergemarkers(tr, data)
> if new is not None:
> - obsexcmsg(repo.ui, "%i obsolescence markers added\n" % new, True)
> + obsexcmsg(repo.ui, b"%i obsolescence markers added\n" % new, True)
> tr.close()
> finally:
> lockmod.release(tr, lock)
> - repo.hook('evolve_pushobsmarkers')
> + repo.hook(b'evolve_pushobsmarkers')
>
> def srv_pushobsmarkers(repo, proto):
> """wireprotocol command"""
> @@ -187,18 +187,18 @@
> def _getobsmarkersstream(repo, heads=None, common=None):
> """Get a binary stream for all markers relevant to `::<heads> - ::<common>`
> """
> - revset = ''
> + revset = b''
> args = []
> repo = repo.unfiltered()
> if heads is None:
> - revset = 'all()'
> + revset = b'all()'
> elif heads:
> - revset += "(::%ln)"
> + revset += b"(::%ln)"
> args.append(heads)
> else:
> - assert False, 'pulling no heads?'
> + assert False, b'pulling no heads?'
> if common:
> - revset += ' - (::%ln)'
> + revset += b' - (::%ln)'
> args.append(common)
> nodes = [c.node() for c in repo.set(revset, *args)]
> markers = repo.obsstore.relevantmarkers(nodes)
> @@ -220,20 +220,20 @@
> except (ImportError, AttributeError):
> from mercurial import wireproto as wireprototypes
> wireprotov1server = wireprototypes
> - opts = wireprotov1server.options('', ['heads', 'common'], others)
> - for k, v in opts.iteritems():
> - if k in ('heads', 'common'):
> + opts = wireprotov1server.options(b'', [b'heads', b'common'], others)
> + for k, v in opts.items():
> + if k in (b'heads', b'common'):
> opts[k] = wireprototypes.decodelist(v)
> obsdata = _getobsmarkersstream(repo, **opts)
> finaldata = StringIO()
> obsdata = obsdata.getvalue()
> - finaldata.write('%20i' % len(obsdata))
> + finaldata.write(b'%20i' % len(obsdata))
> finaldata.write(obsdata)
> finaldata.seek(0)
> return wireprototypes.streamres(reader=finaldata, v1compressible=True)
>
> -abortmsg = "won't exchange obsmarkers through pushkey"
> -hint = "upgrade your client or server to use the bundle2 protocol"
> +abortmsg = b"won't exchange obsmarkers through pushkey"
> +hint = b"upgrade your client or server to use the bundle2 protocol"
>
> class HTTPCompatibleAbort(hgwebcommon.ErrorResponse, error.Abort):
> def __init__(self, message, code, hint=None):
> @@ -256,4 +256,4 @@
>
> @eh.uisetup
> def setuppushkeyforbidding(ui):
> - pushkey._namespaces['obsolete'] = (forbidpushkey, forbidlistkey)
> + pushkey._namespaces[b'obsolete'] = (forbidpushkey, forbidlistkey)
> diff --git a/hgext3rd/evolve/obshashtree.py b/hgext3rd/evolve/obshashtree.py
> --- a/hgext3rd/evolve/obshashtree.py
> +++ b/hgext3rd/evolve/obshashtree.py
> @@ -28,9 +28,9 @@
> # the obshash of its parents. This is similar to what happend for changeset
> # node where the parent is used in the computation
> @eh.command(
> - 'debugobsrelsethashtree',
> - [('', 'v0', None, 'hash on marker format "0"'),
> - ('', 'v1', None, 'hash on marker format "1" (default)')], _(''))
> + b'debugobsrelsethashtree',
> + [(b'', b'v0', None, b'hash on marker format "0"'),
> + (b'', b'v1', None, b'hash on marker format "1" (default)')], _(b''))
> def debugobsrelsethashtree(ui, repo, v0=False, v1=False):
> """display Obsolete markers, Relevant Set, Hash Tree
> changeset-node obsrelsethashtree-node
> @@ -42,14 +42,14 @@
> debug command stayed as an inspection tool. It does not seem supseful to
> upstream the command with the rest of evolve. We can safely drop it."""
> if v0 and v1:
> - raise error.Abort('cannot only specify one format')
> + raise error.Abort(b'cannot only specify one format')
> elif v0:
> treefunc = _obsrelsethashtreefm0
> else:
> treefunc = _obsrelsethashtreefm1
>
> for chg, obs in treefunc(repo):
> - ui.status('%s %s\n' % (node.hex(chg), node.hex(obs)))
> + ui.status(b'%s %s\n' % (node.hex(chg), node.hex(obs)))
>
> def _obsrelsethashtreefm0(repo):
> return _obsrelsethashtree(repo, obsolete._fm0encodeonemarker)
> @@ -61,8 +61,8 @@
> cache = []
> unfi = repo.unfiltered()
> markercache = {}
> - compat.progress(repo.ui, _("preparing locally"), 0, total=len(unfi),
> - unit=_("changesets"))
> + compat.progress(repo.ui, _(b"preparing locally"), 0, total=len(unfi),
> + unit=_(b"changesets"))
> for i in unfi:
> ctx = unfi[i]
> entry = 0
> @@ -92,7 +92,7 @@
> cache.append((ctx.node(), sha.digest()))
> else:
> cache.append((ctx.node(), node.nullid))
> - compat.progress(repo.ui, _("preparing locally"), i, total=len(unfi),
> - unit=_("changesets"))
> - compat.progress(repo.ui, _("preparing locally"), None)
> + compat.progress(repo.ui, _(b"preparing locally"), i, total=len(unfi),
> + unit=_(b"changesets"))
> + compat.progress(repo.ui, _(b"preparing locally"), None)
> return cache
> diff --git a/hgext3rd/evolve/obshistory.py b/hgext3rd/evolve/obshistory.py
> --- a/hgext3rd/evolve/obshistory.py
> +++ b/hgext3rd/evolve/obshistory.py
> @@ -30,26 +30,26 @@
> eh = exthelper.exthelper()
>
> # Config
> -efd = {'default': True} # pass a default value unless the config is registered
> +efd = {b'default': True} # pass a default value unless the config is registered
>
> @eh.extsetup
> def enableeffectflags(ui):
> item = (getattr(ui, '_knownconfig', {})
> - .get('experimental', {})
> - .get('evolution.effect-flags'))
> + .get(b'experimental', {})
> + .get(b'evolution.effect-flags'))
> if item is not None:
> item.default = True
> efd.clear()
>
> @eh.command(
> - 'obslog|olog',
> - [('G', 'graph', True, _("show the revision DAG")),
> - ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
> - ('a', 'all', False, _('show all related changesets, not only precursors')),
> - ('p', 'patch', False, _('show the patch between two obs versions')),
> - ('f', 'filternonlocal', False, _('filter out non local commits')),
> - ] + commands.formatteropts,
> - _('hg olog [OPTION]... [REV]'))
> + b'obslog|olog',
> + [(b'G', b'graph', True, _(b"show the revision DAG")),
> + (b'r', b'rev', [], _(b'show the specified revision or revset'), _(b'REV')),
> + (b'a', b'all', False, _(b'show all related changesets, not only precursors')),
> + (b'p', b'patch', False, _(b'show the patch between two obs versions')),
> + (b'f', b'filternonlocal', False, _(b'filter out non local commits')),
> + ] + commands.formatteropts,
> + _(b'hg olog [OPTION]... [REV]'))
> def cmdobshistory(ui, repo, *revs, **opts):
> """show the obsolescence history of the specified revisions
>
> @@ -78,13 +78,13 @@
>
> Returns 0 on success.
> """
> - ui.pager('obslog')
> - revs = list(revs) + opts['rev']
> + ui.pager(b'obslog')
> + revs = list(revs) + opts[b'rev']
> if not revs:
> - revs = ['.']
> + revs = [b'.']
> revs = scmutil.revrange(repo, revs)
>
> - if opts['graph']:
> + if opts[b'graph']:
> return _debugobshistorygraph(ui, repo, revs, opts)
>
> revs.reverse()
> @@ -130,7 +130,7 @@
>
> values = []
> for sset in fullsuccessorsets:
> - values.append({'successors': sset, 'markers': sset.markers})
> + values.append({b'successors': sset, b'markers': sset.markers})
>
> return values
>
> @@ -142,21 +142,21 @@
>
> def __init__(self, ui, repo, *args, **kwargs):
>
> - if kwargs.pop('obspatch', False):
> + if kwargs.pop(b'obspatch', False):
> if compat.changesetdiffer is None:
> - kwargs['matchfn'] = scmutil.matchall(repo)
> + kwargs[b'matchfn'] = scmutil.matchall(repo)
> else:
> - kwargs['differ'] = scmutil.matchall(repo)
> + kwargs[b'differ'] = scmutil.matchall(repo)
>
> super(obsmarker_printer, self).__init__(ui, repo, *args, **kwargs)
> - diffopts = kwargs.get('diffopts', {})
> + diffopts = kwargs.get(b'diffopts', {})
>
> # Compat 4.6
> - if not util.safehasattr(self, "_includediff"):
> - self._includediff = diffopts and diffopts.get('patch')
> + if not util.safehasattr(self, b"_includediff"):
> + self._includediff = diffopts and diffopts.get(b'patch')
>
> - self.template = diffopts and diffopts.get('template')
> - self.filter = diffopts and diffopts.get('filternonlocal')
> + self.template = diffopts and diffopts.get(b'template')
> + self.filter = diffopts and diffopts.get(b'filternonlocal')
>
> def show(self, ctx, copies=None, matchfn=None, **props):
> if self.buffered:
> @@ -164,12 +164,12 @@
>
> changenode = ctx.node()
>
> - _props = {"template": self.template}
> - fm = self.ui.formatter('debugobshistory', _props)
> + _props = {b"template": self.template}
> + fm = self.ui.formatter(b'debugobshistory', _props)
>
> _debugobshistorydisplaynode(fm, self.repo, changenode)
>
> - markerfm = fm.nested("markers")
> + markerfm = fm.nested(b"markers")
>
> # Succs markers
> if self.filter is False:
> @@ -185,21 +185,21 @@
> r = _successorsandmarkers(self.repo, ctx)
>
> for succset in sorted(r):
> - markers = succset["markers"]
> + markers = succset[b"markers"]
> if not markers:
> continue
> - successors = succset["successors"]
> + successors = succset[b"successors"]
> _debugobshistorydisplaysuccsandmarkers(markerfm, successors, markers, ctx.node(), self.repo, self._includediff)
>
> markerfm.end()
>
> - markerfm.plain('\n')
> + markerfm.plain(b'\n')
> fm.end()
>
> self.hunk[ctx.node()] = self.ui.popbuffer()
> else:
> ### graph output is buffered only
> - msg = 'cannot be used outside of the graphlog (yet)'
> + msg = b'cannot be used outside of the graphlog (yet)'
> raise error.ProgrammingError(msg)
>
> def flush(self, ctx):
> @@ -210,43 +210,43 @@
>
> def patchavailable(node, repo, successors):
> if node not in repo:
> - return False, "context is not local"
> + return False, b"context is not local"
>
> if len(successors) == 0:
> - return False, "no successors"
> + return False, b"no successors"
> elif len(successors) > 1:
> - return False, "too many successors (%d)" % len(successors)
> + return False, b"too many successors (%d)" % len(successors)
>
> succ = successors[0]
>
> if succ not in repo:
> - return False, "successor is unknown locally"
> + return False, b"successor is unknown locally"
>
> # Check that both node and succ have the same parents
> nodep1, nodep2 = repo[node].p1(), repo[node].p2()
> succp1, succp2 = repo[succ].p1(), repo[succ].p2()
>
> if nodep1 != succp1 or nodep2 != succp2:
> - return False, "changesets rebased"
> + return False, b"changesets rebased"
>
> return True, succ
>
> def getmarkerdescriptionpatch(repo, basedesc, succdesc):
> # description are stored without final new line,
> # add one to avoid ugly diff
> - basedesc += '\n'
> - succdesc += '\n'
> + basedesc += b'\n'
> + succdesc += b'\n'
>
> # fake file name
> - basename = "changeset-description"
> - succname = "changeset-description"
> + basename = b"changeset-description"
> + succname = b"changeset-description"
>
> d = compat.strdiff(basedesc, succdesc, basename, succname)
> uheaders, hunks = d
>
> # Copied from patch.diff
> - text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
> - patch = "\n".join(uheaders + [text])
> + text = b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
> + patch = b"\n".join(uheaders + [text])
>
> return patch
>
> @@ -332,7 +332,7 @@
> # Then choose a random node from the cycle
> breaknode = sorted(cycle)[0]
> # And display it by force
> - repo.ui.debug('obs-cycle detected, forcing display of %s\n'
> + repo.ui.debug(b'obs-cycle detected, forcing display of %s\n'
> % nodemod.short(breaknode))
> validcandidates = [breaknode]
>
> @@ -425,13 +425,13 @@
>
> displayer = obsmarker_printer(ui, repo.unfiltered(), obspatch=True, diffopts=opts, buffered=True)
> edges = graphmod.asciiedges
> - walker = _obshistorywalker(repo.unfiltered(), revs, opts.get('all', False), opts.get('filternonlocal', False))
> + walker = _obshistorywalker(repo.unfiltered(), revs, opts.get(b'all', False), opts.get(b'filternonlocal', False))
> compat.displaygraph(ui, repo, walker, displayer, edges)
>
> def _debugobshistoryrevs(ui, repo, revs, opts):
> """ Display the obsolescence history for revset
> """
> - fm = ui.formatter('debugobshistory', opts)
> + fm = ui.formatter(b'debugobshistory', opts)
> precursors = repo.obsstore.predecessors
> successors = repo.obsstore.successors
> nodec = repo.changelog.node
> @@ -447,9 +447,9 @@
>
> succs = successors.get(ctxnode, ())
>
> - markerfm = fm.nested("markers")
> + markerfm = fm.nested(b"markers")
> for successor in sorted(succs):
> - includediff = opts and opts.get("patch")
> + includediff = opts and opts.get(b"patch")
> _debugobshistorydisplaymarker(markerfm, successor, ctxnode, unfi, includediff)
> markerfm.end()
>
> @@ -473,24 +473,24 @@
> shortdescription = shortdescription.splitlines()[0]
>
> fm.startitem()
> - fm.write('node', '%s', str(ctx),
> - label="evolve.node")
> - fm.plain(' ')
> + fm.write(b'node', b'%s', str(ctx),
> + label=b"evolve.node")
> + fm.plain(b' ')
>
> - fm.write('rev', '(%d)', ctx.rev(),
> - label="evolve.rev")
> - fm.plain(' ')
> + fm.write(b'rev', b'(%d)', ctx.rev(),
> + label=b"evolve.rev")
> + fm.plain(b' ')
>
> - fm.write('shortdescription', '%s', shortdescription,
> - label="evolve.short_description")
> - fm.plain('\n')
> + fm.write(b'shortdescription', b'%s', shortdescription,
> + label=b"evolve.short_description")
> + fm.plain(b'\n')
>
> def _debugobshistorydisplaymissingctx(fm, nodewithoutctx):
> hexnode = nodemod.short(nodewithoutctx)
> fm.startitem()
> - fm.write('node', '%s', hexnode,
> - label="evolve.node evolve.missing_change_ctx")
> - fm.plain('\n')
> + fm.write(b'node', b'%s', hexnode,
> + label=b"evolve.node evolve.missing_change_ctx")
> + fm.plain(b'\n')
>
> def _debugobshistorydisplaymarker(fm, marker, node, repo, includediff=False):
> succnodes = marker[1]
> @@ -498,18 +498,18 @@
> metadata = dict(marker[3])
>
> fm.startitem()
> - fm.plain(' ')
> + fm.plain(b' ')
>
> # Detect pruned revisions
> if len(succnodes) == 0:
> - verb = 'pruned'
> + verb = b'pruned'
> else:
> - verb = 'rewritten'
> + verb = b'rewritten'
>
> - fm.write('verb', '%s', verb,
> - label="evolve.verb")
> + fm.write(b'verb', b'%s', verb,
> + label=b"evolve.verb")
>
> - effectflag = metadata.get('ef1')
> + effectflag = metadata.get(b'ef1')
> if effectflag is not None:
> try:
> effectflag = int(effectflag)
> @@ -520,50 +520,50 @@
>
> # XXX should be a dict
> if effectflag & DESCCHANGED:
> - effect.append('description')
> + effect.append(b'description')
> if effectflag & METACHANGED:
> - effect.append('meta')
> + effect.append(b'meta')
> if effectflag & USERCHANGED:
> - effect.append('user')
> + effect.append(b'user')
> if effectflag & DATECHANGED:
> - effect.append('date')
> + effect.append(b'date')
> if effectflag & BRANCHCHANGED:
> - effect.append('branch')
> + effect.append(b'branch')
> if effectflag & PARENTCHANGED:
> - effect.append('parent')
> + effect.append(b'parent')
> if effectflag & DIFFCHANGED:
> - effect.append('content')
> + effect.append(b'content')
>
> if effect:
> - fmteffect = fm.formatlist(effect, 'effect', sep=', ')
> - fm.write('effect', '(%s)', fmteffect)
> + fmteffect = fm.formatlist(effect, b'effect', sep=b', ')
> + fm.write(b'effect', b'(%s)', fmteffect)
>
> if len(succnodes) > 0:
> - fm.plain(' as ')
> + fm.plain(b' as ')
>
> shortsnodes = (nodemod.short(succnode) for succnode in sorted(succnodes))
> - nodes = fm.formatlist(shortsnodes, 'succnodes', sep=', ')
> - fm.write('succnodes', '%s', nodes,
> - label="evolve.node")
> + nodes = fm.formatlist(shortsnodes, b'succnodes', sep=b', ')
> + fm.write(b'succnodes', b'%s', nodes,
> + label=b"evolve.node")
>
> - operation = metadata.get('operation')
> + operation = metadata.get(b'operation')
> if operation:
> - fm.plain(' using ')
> - fm.write('operation', '%s', operation, label="evolve.operation")
> + fm.plain(b' using ')
> + fm.write(b'operation', b'%s', operation, label=b"evolve.operation")
>
> - fm.plain(' by ')
> + fm.plain(b' by ')
>
> - fm.write('user', '%s', metadata['user'],
> - label="evolve.user")
> - fm.plain(' ')
> + fm.write(b'user', b'%s', metadata[b'user'],
> + label=b"evolve.user")
> + fm.plain(b' ')
>
> - fm.write('date', '(%s)', fm.formatdate(date),
> - label="evolve.date")
> + fm.write(b'date', b'(%s)', fm.formatdate(date),
> + label=b"evolve.date")
>
> # initial support for showing note
> - if metadata.get('note'):
> - fm.plain('\n note: ')
> - fm.write('note', "%s", metadata['note'], label="evolve.note")
> + if metadata.get(b'note'):
> + fm.plain(b'\n note: ')
> + fm.write(b'note', b"%s", metadata[b'note'], label=b"evolve.note")
>
> # Patch display
> if includediff is True:
> @@ -581,20 +581,20 @@
>
> if descriptionpatch:
> # add the diffheader
> - diffheader = "diff -r %s -r %s changeset-description\n" % \
> + diffheader = b"diff -r %s -r %s changeset-description\n" %\
> (basectx, succctx)
> descriptionpatch = diffheader + descriptionpatch
>
> def tolist(text):
> return [text]
>
> - fm.plain("\n")
> + fm.plain(b"\n")
>
> for chunk, label in patch.difflabel(tolist, descriptionpatch):
> - chunk = chunk.strip('\t')
> - if chunk and chunk != '\n':
> - fm.plain(' ')
> - fm.write('desc-diff', '%s', chunk, label=label)
> + chunk = chunk.strip(b'\t')
> + if chunk and chunk != b'\n':
> + fm.plain(b' ')
> + fm.write(b'desc-diff', b'%s', chunk, label=label)
>
> # Content patch
> diffopts = patch.diffallopts(repo.ui, {})
> @@ -603,18 +603,18 @@
> for chunk, label in patch.diffui(repo, node, succ, matchfn,
> opts=diffopts):
> if firstline:
> - fm.plain('\n')
> + fm.plain(b'\n')
> firstline = False
> - if chunk and chunk != '\n':
> - fm.plain(' ')
> - fm.write('patch', '%s', chunk, label=label)
> + if chunk and chunk != b'\n':
> + fm.plain(b' ')
> + fm.write(b'patch', b'%s', chunk, label=label)
> else:
> - nopatch = " (No patch available, %s)" % _patchavailable[1]
> - fm.plain("\n")
> + nopatch = b" (No patch available, %s)" % _patchavailable[1]
> + fm.plain(b"\n")
> # TODO: should be in json too
> fm.plain(nopatch)
>
> - fm.plain("\n")
> + fm.plain(b"\n")
>
> def _debugobshistorydisplaysuccsandmarkers(fm, succnodes, markers, node, repo, includediff=False):
> """
> @@ -622,17 +622,17 @@
> to accept multiple markers as input.
> """
> fm.startitem()
> - fm.plain(' ')
> + fm.plain(b' ')
>
> # Detect pruned revisions
> - verb = _successorsetverb(succnodes, markers)["verb"]
> + verb = _successorsetverb(succnodes, markers)[b"verb"]
>
> - fm.write('verb', '%s', verb,
> - label="evolve.verb")
> + fm.write(b'verb', b'%s', verb,
> + label=b"evolve.verb")
>
> # Effect flag
> metadata = [dict(marker[3]) for marker in markers]
> - ef1 = [data.get('ef1') for data in metadata]
> + ef1 = [data.get(b'ef1') for data in metadata]
>
> effectflag = 0
> for ef in ef1:
> @@ -644,45 +644,45 @@
>
> # XXX should be a dict
> if effectflag & DESCCHANGED:
> - effect.append('description')
> + effect.append(b'description')
> if effectflag & METACHANGED:
> - effect.append('meta')
> + effect.append(b'meta')
> if effectflag & USERCHANGED:
> - effect.append('user')
> + effect.append(b'user')
> if effectflag & DATECHANGED:
> - effect.append('date')
> + effect.append(b'date')
> if effectflag & BRANCHCHANGED:
> - effect.append('branch')
> + effect.append(b'branch')
> if effectflag & PARENTCHANGED:
> - effect.append('parent')
> + effect.append(b'parent')
> if effectflag & DIFFCHANGED:
> - effect.append('content')
> + effect.append(b'content')
>
> if effect:
> - fmteffect = fm.formatlist(effect, 'effect', sep=', ')
> - fm.write('effect', '(%s)', fmteffect)
> + fmteffect = fm.formatlist(effect, b'effect', sep=b', ')
> + fm.write(b'effect', b'(%s)', fmteffect)
>
> if len(succnodes) > 0:
> - fm.plain(' as ')
> + fm.plain(b' as ')
>
> shortsnodes = (nodemod.short(succnode) for succnode in sorted(succnodes))
> - nodes = fm.formatlist(shortsnodes, 'succnodes', sep=', ')
> - fm.write('succnodes', '%s', nodes,
> - label="evolve.node")
> + nodes = fm.formatlist(shortsnodes, b'succnodes', sep=b', ')
> + fm.write(b'succnodes', b'%s', nodes,
> + label=b"evolve.node")
>
> # Operations
> operations = compat.markersoperations(markers)
> if operations:
> - fm.plain(' using ')
> - fm.write('operation', '%s', ", ".join(operations), label="evolve.operation")
> + fm.plain(b' using ')
> + fm.write(b'operation', b'%s', b", ".join(operations), label=b"evolve.operation")
>
> - fm.plain(' by ')
> + fm.plain(b' by ')
>
> # Users
> users = compat.markersusers(markers)
> - fm.write('user', '%s', ", ".join(users),
> - label="evolve.user")
> - fm.plain(' ')
> + fm.write(b'user', b'%s', b", ".join(users),
> + label=b"evolve.user")
> + fm.plain(b' ')
>
> # Dates
> dates = compat.markersdates(markers)
> @@ -691,10 +691,10 @@
> max_date = max(dates)
>
> if min_date == max_date:
> - fm.write("date", "(at %s)", fm.formatdate(min_date), label="evolve.date")
> + fm.write(b"date", b"(at %s)", fm.formatdate(min_date), label=b"evolve.date")
> else:
> - fm.write("date", "(between %s and %s)", fm.formatdate(min_date),
> - fm.formatdate(max_date), label="evolve.date")
> + fm.write(b"date", b"(between %s and %s)", fm.formatdate(min_date),
> + fm.formatdate(max_date), label=b"evolve.date")
>
> # initial support for showing note
> # if metadata.get('note'):
> @@ -717,20 +717,20 @@
>
> if descriptionpatch:
> # add the diffheader
> - diffheader = "diff -r %s -r %s changeset-description\n" % \
> + diffheader = b"diff -r %s -r %s changeset-description\n" %\
> (basectx, succctx)
> descriptionpatch = diffheader + descriptionpatch
>
> def tolist(text):
> return [text]
>
> - fm.plain("\n")
> + fm.plain(b"\n")
>
> for chunk, label in patch.difflabel(tolist, descriptionpatch):
> - chunk = chunk.strip('\t')
> - if chunk and chunk != '\n':
> - fm.plain(' ')
> - fm.write('desc-diff', '%s', chunk, label=label)
> + chunk = chunk.strip(b'\t')
> + if chunk and chunk != b'\n':
> + fm.plain(b' ')
> + fm.write(b'desc-diff', b'%s', chunk, label=label)
>
> # Content patch
> diffopts = patch.diffallopts(repo.ui, {})
> @@ -739,18 +739,18 @@
> for chunk, label in patch.diffui(repo, node, succ, matchfn,
> opts=diffopts):
> if firstline:
> - fm.plain('\n')
> + fm.plain(b'\n')
> firstline = False
> - if chunk and chunk != '\n':
> - fm.plain(' ')
> - fm.write('patch', '%s', chunk, label=label)
> + if chunk and chunk != b'\n':
> + fm.plain(b' ')
> + fm.write(b'patch', b'%s', chunk, label=label)
> else:
> - nopatch = " (No patch available, %s)" % _patchavailable[1]
> - fm.plain("\n")
> + nopatch = b" (No patch available, %s)" % _patchavailable[1]
> + fm.plain(b"\n")
> # TODO: should be in json too
> fm.plain(nopatch)
>
> - fm.plain("\n")
> + fm.plain(b"\n")
>
> # logic around storing and using effect flags
> DESCCHANGED = 1 << 0 # action changed the description
> @@ -762,11 +762,11 @@
> BRANCHCHANGED = 1 << 6 # the branch changed
>
> METABLACKLIST = [
> - re.compile('^__touch-noise__$'),
> - re.compile('^branch$'),
> - re.compile('^.*-source$'),
> - re.compile('^.*_source$'),
> - re.compile('^source$'),
> + re.compile(b'^__touch-noise__$'),
> + re.compile(b'^branch$'),
> + re.compile(b'^.*-source$'),
> + re.compile(b'^.*_source$'),
> + re.compile(b'^source$'),
> ]
>
> def ismetablacklisted(metaitem):
> @@ -810,17 +810,17 @@
>
> if len(successorssets) == 0:
> # The commit has been pruned
> - return 'pruned'
> + return b'pruned'
> elif len(successorssets) > 1:
> - return 'diverged'
> + return b'diverged'
> else:
> # No divergence, only one set of successors
> successors = successorssets[0]
>
> if len(successors) == 1:
> - return 'superseed'
> + return b'superseed'
> else:
> - return 'superseed_split'
> + return b'superseed_split'
>
> def _getobsfateandsuccs(repo, revnode, successorssets=None):
> """ Return a tuple containing:
> @@ -853,8 +853,8 @@
> dates = [m[4] for m in markers]
>
> return {
> - 'min_date': min(dates),
> - 'max_date': max(dates)
> + b'min_date': min(dates),
> + b'max_date': max(dates)
> }
>
> def _successorsetusers(successorset, markers):
> @@ -865,18 +865,18 @@
>
> # Check that user is present in meta
> markersmeta = [dict(m[3]) for m in markers]
> - users = set(meta.get('user') for meta in markersmeta if meta.get('user'))
> + users = set(meta.get(b'user') for meta in markersmeta if meta.get(b'user'))
>
> - return {'users': sorted(users)}
> + return {b'users': sorted(users)}
>
> VERBMAPPING = {
> - DESCCHANGED: "reworded",
> - METACHANGED: "meta-changed",
> - USERCHANGED: "reauthored",
> - DATECHANGED: "date-changed",
> - BRANCHCHANGED: "branch-changed",
> - PARENTCHANGED: "rebased",
> - DIFFCHANGED: "amended"
> + DESCCHANGED: b"reworded",
> + METACHANGED: b"meta-changed",
> + USERCHANGED: b"reauthored",
> + DATECHANGED: b"date-changed",
> + BRANCHCHANGED: b"branch-changed",
> + PARENTCHANGED: b"rebased",
> + DIFFCHANGED: b"amended"
> }
>
> def _successorsetverb(successorset, markers):
> @@ -884,12 +884,12 @@
> """
> verb = None
> if not successorset:
> - verb = 'pruned'
> + verb = b'pruned'
> elif len(successorset) == 1:
> # Check for effect flag
>
> metadata = [dict(marker[3]) for marker in markers]
> - ef1 = [data.get('ef1') for data in metadata]
> + ef1 = [data.get(b'ef1') for data in metadata]
>
> if all(ef1):
> combined = 0
> @@ -901,28 +901,28 @@
> verb = VERBMAPPING[combined]
>
> if verb is None:
> - verb = 'rewritten'
> + verb = b'rewritten'
> else:
> - verb = 'split'
> - return {'verb': verb}
> + verb = b'split'
> + return {b'verb': verb}
>
> # Use a more advanced version of obsfateverb that uses effect-flag
> -if util.safehasattr(obsutil, 'obsfateverb'):
> +if util.safehasattr(obsutil, b'obsfateverb'):
>
> - @eh.wrapfunction(obsutil, 'obsfateverb')
> + @eh.wrapfunction(obsutil, b'obsfateverb')
> def obsfateverb(orig, *args, **kwargs):
> - return _successorsetverb(*args, **kwargs)['verb']
> + return _successorsetverb(*args, **kwargs)[b'verb']
>
> # Hijack callers of successorsetverb
> -elif util.safehasattr(obsutil, 'obsfateprinter'):
> +elif util.safehasattr(obsutil, b'obsfateprinter'):
>
> - @eh.wrapfunction(obsutil, 'obsfateprinter')
> + @eh.wrapfunction(obsutil, b'obsfateprinter')
> def obsfateprinter(orig, successors, markers, ui):
>
> def closure(successors):
> - return _successorsetverb(successors, markers)['verb']
> + return _successorsetverb(successors, markers)[b'verb']
>
> - if not util.safehasattr(obsutil, 'successorsetverb'):
> + if not util.safehasattr(obsutil, b'successorsetverb'):
> return orig(successors, markers, ui)
>
> # Save the old value
> @@ -996,8 +996,8 @@
>
> # Format basic data
> data = {
> - "successors": sorted(successorset),
> - "markers": sorted(markers)
> + b"successors": sorted(successorset),
> + b"markers": sorted(markers)
> }
>
> # Call an extensible list of functions to override or add new data
> diff --git a/hgext3rd/evolve/rewind.py b/hgext3rd/evolve/rewind.py
> --- a/hgext3rd/evolve/rewind.py
> +++ b/hgext3rd/evolve/rewind.py
> @@ -26,14 +26,14 @@
> identicalflag = 4
>
> @eh.command(
> - 'rewind|undo',
> - [('', 'to', [], _("rewind to these revisions"), _('REV')),
> - ('', 'as-divergence', None, _("preserve current latest successors")),
> - ('', 'exact', None, _("only rewind explicitly selected revisions")),
> - ('', 'from', [],
> - _("rewind these revisions to their predecessors"), _('REV')),
> - ],
> - _(''),
> + b'rewind|undo',
> + [(b'', b'to', [], _(b"rewind to these revisions"), _(b'REV')),
> + (b'', b'as-divergence', None, _(b"preserve current latest successors")),
> + (b'', b'exact', None, _(b"only rewind explicitly selected revisions")),
> + (b'', b'from', [],
> + _(b"rewind these revisions to their predecessors"), _(b'REV')),
> + ],
> + _(b''),
> helpbasic=True)
> def rewind(ui, repo, **opts):
> """rewind a stack of changesets to a previous state
> @@ -85,20 +85,20 @@
>
> rewinded = _select_rewinded(repo, opts)
>
> - if not opts['as_divergence']:
> + if not opts[b'as_divergence']:
> for rev in rewinded:
> ctx = unfi[rev]
> ssets = obsutil.successorssets(repo, ctx.node(), sscache)
> if 1 < len(ssets):
> - msg = _('rewind confused by divergence on %s') % ctx
> - hint = _('solve divergence first or use "--as-divergence"')
> + msg = _(b'rewind confused by divergence on %s') % ctx
> + hint = _(b'solve divergence first or use "--as-divergence"')
> raise error.Abort(msg, hint=hint)
> if ssets and ssets[0]:
> for succ in ssets[0]:
> successorsmap[succ].add(ctx.node())
>
> # Check that we can rewind these changesets
> - with repo.transaction('rewind'):
> + with repo.transaction(b'rewind'):
> for rev in sorted(rewinded):
> ctx = unfi[rev]
> rewindmap[ctx.node()] = _revive_revision(unfi, rev, rewindmap)
> @@ -113,36 +113,36 @@
> relationships.append(rel)
> if wctxp.node() == source:
> update_target = newdest[-1]
> - obsolete.createmarkers(unfi, relationships, operation='rewind')
> + obsolete.createmarkers(unfi, relationships, operation=b'rewind')
> if update_target is not None:
> hg.updaterepo(repo, update_target, False)
>
> - repo.ui.status(_('rewinded to %d changesets\n') % len(rewinded))
> + repo.ui.status(_(b'rewinded to %d changesets\n') % len(rewinded))
> if relationships:
> - repo.ui.status(_('(%d changesets obsoleted)\n') % len(relationships))
> + repo.ui.status(_(b'(%d changesets obsoleted)\n') % len(relationships))
> if update_target is not None:
> - ui.status(_('working directory is now at %s\n') % repo['.'])
> + ui.status(_(b'working directory is now at %s\n') % repo[b'.'])
>
> def _select_rewinded(repo, opts):
> """select the revision we shoudl rewind to
> """
> unfi = repo.unfiltered()
> rewinded = set()
> - revsto = opts.get('to')
> - revsfrom = opts.get('from')
> + revsto = opts.get(b'to')
> + revsfrom = opts.get(b'from')
> if not (revsto or revsfrom):
> - revsfrom.append('.')
> + revsfrom.append(b'.')
> if revsto:
> rewinded.update(scmutil.revrange(repo, revsto))
> if revsfrom:
> succs = scmutil.revrange(repo, revsfrom)
> - rewinded.update(unfi.revs('predecessors(%ld)', succs))
> + rewinded.update(unfi.revs(b'predecessors(%ld)', succs))
>
> if not rewinded:
> - raise error.Abort('no revision to rewind to')
> + raise error.Abort(b'no revision to rewind to')
>
> - if not opts['exact']:
> - rewinded = unfi.revs('obsolete() and ::%ld', rewinded)
> + if not opts[b'exact']:
> + rewinded = unfi.revs(b'obsolete() and ::%ld', rewinded)
>
> return sorted(rewinded)
>
> @@ -152,27 +152,27 @@
> ctx = unfi[rev]
> extra = ctx.extra().copy()
> # rewind hash should be unique over multiple rewind.
> - user = unfi.ui.config('devel', 'user.obsmarker')
> + user = unfi.ui.config(b'devel', b'user.obsmarker')
> if not user:
> user = unfi.ui.username()
> - date = unfi.ui.configdate('devel', 'default-date')
> + date = unfi.ui.configdate(b'devel', b'default-date')
> if date is None:
> date = compat.makedate()
> - noise = "%s\0%s\0%d\0%d" % (ctx.node(), user, date[0], date[1])
> - extra['__rewind-hash__'] = hashlib.sha256(noise).hexdigest()
> + noise = b"%s\0%s\0%d\0%d" % (ctx.node(), user, date[0], date[1])
> + extra[b'__rewind-hash__'] = hashlib.sha256(noise).hexdigest()
>
> p1 = ctx.p1().node()
> p1 = rewindmap.get(p1, p1)
> p2 = ctx.p2().node()
> p2 = rewindmap.get(p2, p2)
>
> - extradict = {'extra': extra}
> + extradict = {b'extra': extra}
>
> new, unusedvariable = rewriteutil.rewrite(unfi, ctx, [], ctx,
> [p1, p2],
> commitopts=extradict)
>
> obsolete.createmarkers(unfi, [(ctx, (unfi[new],))],
> - flag=identicalflag, operation='rewind')
> + flag=identicalflag, operation=b'rewind')
>
> return new
> diff --git a/hgext3rd/evolve/rewriteutil.py b/hgext3rd/evolve/rewriteutil.py
> --- a/hgext3rd/evolve/rewriteutil.py
> +++ b/hgext3rd/evolve/rewriteutil.py
> @@ -44,37 +44,37 @@
> numrevs = len(revs)
> if numrevs < maxrevs:
> shorts = [node.short(tonode(r)) for r in revs]
> - summary = ', '.join(shorts)
> + summary = b', '.join(shorts)
> else:
> first = revs.first()
> - summary = _('%s and %d others')
> + summary = _(b'%s and %d others')
> summary %= (node.short(tonode(first)), numrevs - 1)
> return summary
>
> -def precheck(repo, revs, action='rewrite'):
> +def precheck(repo, revs, action=b'rewrite'):
> """check if <revs> can be rewritten
>
> <action> can be used to control the commit message.
> """
> if node.nullrev in revs:
> - msg = _("cannot %s the null revision") % (action)
> - hint = _("no changeset checked out")
> + msg = _(b"cannot %s the null revision") % (action)
> + hint = _(b"no changeset checked out")
> raise error.Abort(msg, hint=hint)
> - if any(util.safehasattr(r, 'rev') for r in revs):
> - msg = "rewriteutil.precheck called with ctx not revs"
> + if any(util.safehasattr(r, b'rev') for r in revs):
> + msg = b"rewriteutil.precheck called with ctx not revs"
> repo.ui.develwarn(msg)
> revs = (r.rev() for r in revs)
> - publicrevs = repo.revs('%ld and public()', revs)
> + publicrevs = repo.revs(b'%ld and public()', revs)
> if publicrevs:
> summary = _formatrevs(repo, publicrevs)
> - msg = _("cannot %s public changesets: %s") % (action, summary)
> - hint = _("see 'hg help phases' for details")
> + msg = _(b"cannot %s public changesets: %s") % (action, summary)
> + hint = _(b"see 'hg help phases' for details")
> raise error.Abort(msg, hint=hint)
> newunstable = disallowednewunstable(repo, revs)
> if newunstable:
> - msg = _("%s will orphan %i descendants")
> + msg = _(b"%s will orphan %i descendants")
> msg %= (action, len(newunstable))
> - hint = _("see 'hg help evolution.instability'")
> + hint = _(b"see 'hg help evolution.instability'")
> raise error.Abort(msg, hint=hint)
>
> def bookmarksupdater(repo, oldid, tr):
> @@ -96,22 +96,22 @@
> allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
> if allowunstable:
> return revset.baseset()
> - return repo.revs("(%ld::) - %ld", revs, revs)
> + return repo.revs(b"(%ld::) - %ld", revs, revs)
>
> def foldcheck(repo, revs):
> """check that <revs> can be folded"""
> - precheck(repo, revs, action='fold')
> - roots = repo.revs('roots(%ld)', revs)
> + precheck(repo, revs, action=b'fold')
> + roots = repo.revs(b'roots(%ld)', revs)
> if len(roots) > 1:
> - raise error.Abort(_("cannot fold non-linear revisions "
> - "(multiple roots given)"))
> + raise error.Abort(_(b"cannot fold non-linear revisions "
> + b"(multiple roots given)"))
> root = repo[roots.first()]
> if root.phase() <= phases.public:
> - raise error.Abort(_("cannot fold public revisions"))
> - heads = repo.revs('heads(%ld)', revs)
> + raise error.Abort(_(b"cannot fold public revisions"))
> + heads = repo.revs(b'heads(%ld)', revs)
> if len(heads) > 1:
> - raise error.Abort(_("cannot fold non-linear revisions "
> - "(multiple heads given)"))
> + raise error.Abort(_(b"cannot fold non-linear revisions "
> + b"(multiple heads given)"))
> head = repo[heads.first()]
> return root, head
>
> @@ -120,14 +120,14 @@
> try:
> wlock = repo.wlock()
> lock = repo.lock()
> - tr = repo.transaction('prune')
> + tr = repo.transaction(b'prune')
> bmchanges = []
> for bookmark in bookmarks:
> bmchanges.append((bookmark, None))
> repo._bookmarks.applychanges(repo, tr, bmchanges)
> tr.close()
> for bookmark in sorted(bookmarks):
> - repo.ui.write(_("bookmark '%s' deleted\n") % bookmark)
> + repo.ui.write(_(b"bookmark '%s' deleted\n") % bookmark)
> finally:
> lockmod.release(tr, lock, wlock)
>
> @@ -143,14 +143,14 @@
> """
> repomarks = repo._bookmarks
> if not bookmarks.issubset(repomarks):
> - raise error.Abort(_("bookmark '%s' not found") %
> - ','.join(sorted(bookmarks - set(repomarks.keys()))))
> + raise error.Abort(_(b"bookmark '%s' not found") %
> + b','.join(sorted(bookmarks - set(repomarks.keys()))))
>
> # If the requested bookmark is not the only one pointing to a
> # a revision we have to only delete the bookmark and not strip
> # anything. revsets cannot detect that case.
> nodetobookmarks = {}
> - for mark, bnode in repomarks.iteritems():
> + for mark, bnode in repomarks.items():
> nodetobookmarks.setdefault(bnode, []).append(mark)
> for marks in nodetobookmarks.values():
> if bookmarks.issuperset(marks):
> @@ -170,9 +170,9 @@
> try:
> wlock = repo.wlock()
> lock = repo.lock()
> - tr = repo.transaction('rewrite')
> + tr = repo.transaction(b'rewrite')
> if len(old.parents()) > 1: # XXX remove this unnecessary limitation.
> - raise error.Abort(_('cannot amend merge changesets'))
> + raise error.Abort(_(b'cannot amend merge changesets'))
> base = old.p1()
> updatebookmarks = bookmarksupdater(repo, old.node(), tr)
>
> @@ -213,13 +213,13 @@
> if not message:
> message = old.description()
>
> - user = commitopts.get('user') or old.user()
> + user = commitopts.get(b'user') or old.user()
> # TODO: In case not date is given, we should take the old commit date
> # if we are working one one changeset or mimic the fold behavior about
> # date
> - date = commitopts.get('date') or None
> - extra = dict(commitopts.get('extra', old.extra()))
> - extra['branch'] = head.branch()
> + date = commitopts.get(b'date') or None
> + extra = dict(commitopts.get(b'extra', old.extra()))
> + extra[b'branch'] = head.branch()
>
> new = context.memctx(repo,
> parents=newbases,
> @@ -230,7 +230,7 @@
> date=date,
> extra=extra)
>
> - if commitopts.get('edit'):
> + if commitopts.get(b'edit'):
> new._text = cmdutil.commitforceeditor(repo, new, [])
> revcount = len(repo)
> newid = repo.commitctx(new)
> diff --git a/hgext3rd/evolve/safeguard.py b/hgext3rd/evolve/safeguard.py
> --- a/hgext3rd/evolve/safeguard.py
> +++ b/hgext3rd/evolve/safeguard.py
> @@ -20,9 +20,9 @@
> eh = exthelper.exthelper()
>
> # hg <= 4.8
> -if 'auto-publish' not in configitems.coreitems.get('experimental', {}):
> +if b'auto-publish' not in configitems.coreitems.get(b'experimental', {}):
>
> - eh.configitem('experimental', 'auto-publish', 'publish')
> + eh.configitem(b'experimental', b'auto-publish', b'publish')
>
> @eh.reposetup
> def setuppublishprevention(ui, repo):
> @@ -31,25 +31,25 @@
>
> def checkpush(self, pushop):
> super(noautopublishrepo, self).checkpush(pushop)
> - behavior = self.ui.config('experimental', 'auto-publish')
> - nocheck = behavior not in ('warn', 'abort')
> + behavior = self.ui.config(b'experimental', b'auto-publish')
> + nocheck = behavior not in (b'warn', b'abort')
> if nocheck or getattr(pushop, 'publish', False):
> return
> - remotephases = pushop.remote.listkeys('phases')
> - publishing = remotephases.get('publishing', False)
> + remotephases = pushop.remote.listkeys(b'phases')
> + publishing = remotephases.get(b'publishing', False)
> if publishing:
> if pushop.revs is None:
> - published = self.filtered('served').revs("not public()")
> + published = self.filtered(b'served').revs(b"not public()")
> else:
> - published = self.revs("::%ln - public()", pushop.revs)
> + published = self.revs(b"::%ln - public()", pushop.revs)
> if published:
> - if behavior == 'warn':
> - self.ui.warn(_('%i changesets about to be published\n')
> + if behavior == b'warn':
> + self.ui.warn(_(b'%i changesets about to be published\n')
> % len(published))
> - elif behavior == 'abort':
> - msg = _('push would publish 1 changesets')
> - hint = _("behavior controlled by "
> - "'experimental.auto-publish' config")
> + elif behavior == b'abort':
> + msg = _(b'push would publish 1 changesets')
> + hint = _(b"behavior controlled by "
> + b"'experimental.auto-publish' config")
> raise error.Abort(msg, hint=hint)
>
> repo.__class__ = noautopublishrepo
> diff --git a/hgext3rd/evolve/serveronly.py b/hgext3rd/evolve/serveronly.py
> --- a/hgext3rd/evolve/serveronly.py
> +++ b/hgext3rd/evolve/serveronly.py
> @@ -24,7 +24,7 @@
> obsexchange,
> )
> except ValueError as exc:
> - if str(exc) != 'Attempted relative import in non-package':
> + if str(exc) != b'Attempted relative import in non-package':
> raise
> # extension imported using direct path
> sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
> @@ -53,11 +53,11 @@
>
> @eh.reposetup
> def default2evolution(ui, repo):
> - evolveopts = repo.ui.configlist('experimental', 'evolution')
> + evolveopts = repo.ui.configlist(b'experimental', b'evolution')
> if not evolveopts:
> - evolveopts = 'all'
> - repo.ui.setconfig('experimental', 'evolution', evolveopts)
> - if obsolete.isenabled(repo, 'exchange'):
> + evolveopts = b'all'
> + repo.ui.setconfig(b'experimental', b'evolution', evolveopts)
> + if obsolete.isenabled(repo, b'exchange'):
> # if no config explicitly set, disable bundle1
> - if not isinstance(repo.ui.config('server', 'bundle1'), str):
> - repo.ui.setconfig('server', 'bundle1', False)
> + if not isinstance(repo.ui.config(b'server', b'bundle1'), str):
> + repo.ui.setconfig(b'server', b'bundle1', False)
> diff --git a/hgext3rd/evolve/stablerange.py b/hgext3rd/evolve/stablerange.py
> --- a/hgext3rd/evolve/stablerange.py
> +++ b/hgext3rd/evolve/stablerange.py
> @@ -64,23 +64,23 @@
> return ranges
>
> _stablerangemethodmap = {
> - 'branchpoint': lambda repo: stablerange(),
> - 'default': lambda repo: repo.stablerange,
> - 'basic-branchpoint': lambda repo: stablerangebasic(),
> - 'basic-mergepoint': lambda repo: stablerangedummy_mergepoint(),
> - 'mergepoint': lambda repo: stablerange_mergepoint(),
> + b'branchpoint': lambda repo: stablerange(),
> + b'default': lambda repo: repo.stablerange,
> + b'basic-branchpoint': lambda repo: stablerangebasic(),
> + b'basic-mergepoint': lambda repo: stablerangedummy_mergepoint(),
> + b'mergepoint': lambda repo: stablerange_mergepoint(),
> }
>
> @eh.command(
> - 'debugstablerange',
> + b'debugstablerange',
> [
> - ('r', 'rev', [], 'operate on (rev, 0) ranges for rev in REVS'),
> - ('', 'subranges', False, 'recursively display data for subranges too'),
> - ('', 'verify', False, 'checks subranges content (EXPENSIVE)'),
> - ('', 'method', 'branchpoint',
> - 'method to use, one of "branchpoint", "mergepoint"')
> - ],
> - _(''))
> + (b'r', b'rev', [], b'operate on (rev, 0) ranges for rev in REVS'),
> + (b'', b'subranges', False, b'recursively display data for subranges too'),
> + (b'', b'verify', False, b'checks subranges content (EXPENSIVE)'),
> + (b'', b'method', b'branchpoint',
> + b'method to use, one of "branchpoint", "mergepoint"')
> + ],
> + _(b''))
> def debugstablerange(ui, repo, **opts):
> """display standard stable subrange for a set of ranges
>
> @@ -88,11 +88,11 @@
> --verbose to get the extra details in ().
> """
> short = nodemod.short
> - revs = scmutil.revrange(repo, opts['rev'])
> + revs = scmutil.revrange(repo, opts[b'rev'])
> if not revs:
> - raise error.Abort('no revisions specified')
> + raise error.Abort(b'no revisions specified')
> if ui.verbose:
> - template = '%s-%d (%d, %d, %d)'
> + template = b'%s-%d (%d, %d, %d)'
>
> def _rangestring(repo, rangeid):
> return template % (
> @@ -103,7 +103,7 @@
> length(unfi, rangeid)
> )
> else:
> - template = '%s-%d'
> + template = b'%s-%d'
>
> def _rangestring(repo, rangeid):
> return template % (
> @@ -114,10 +114,10 @@
> unfi = repo.unfiltered()
> node = unfi.changelog.node
>
> - method = opts['method']
> + method = opts[b'method']
> getstablerange = _stablerangemethodmap.get(method)
> if getstablerange is None:
> - raise error.Abort('unknown stable sort method: "%s"' % method)
> + raise error.Abort(b'unknown stable sort method: "%s"' % method)
>
> stablerange = getstablerange(unfi)
> depth = stablerange.depthrev
> @@ -125,28 +125,28 @@
> subranges = stablerange.subranges
> stablerange.warmup(repo, max(revs))
>
> - if opts['subranges']:
> + if opts[b'subranges']:
> ranges = subrangesclosure(unfi, stablerange, revs)
> else:
> ranges = [(r, 0) for r in revs]
>
> for r in ranges:
> subs = subranges(unfi, r)
> - subsstr = ', '.join(_rangestring(unfi, s) for s in subs)
> + subsstr = b', '.join(_rangestring(unfi, s) for s in subs)
> rstr = _rangestring(unfi, r)
> - if opts['verify']:
> - status = 'leaf'
> + if opts[b'verify']:
> + status = b'leaf'
> if 1 < length(unfi, r):
> - status = 'complete'
> + status = b'complete'
> revs = set(stablerange.revsfromrange(unfi, r))
> subrevs = set()
> for s in subs:
> subrevs.update(stablerange.revsfromrange(unfi, s))
> if revs != subrevs:
> - status = 'missing'
> - ui.status('%s [%s] - %s\n' % (rstr, status, subsstr))
> + status = b'missing'
> + ui.status(b'%s [%s] - %s\n' % (rstr, status, subsstr))
> else:
> - ui.status('%s - %s\n' % (rstr, subsstr))
> + ui.status(b'%s - %s\n' % (rstr, subsstr))
>
> class abstractstablerange(object):
> """The official API for a stablerange"""
> @@ -214,7 +214,7 @@
>
> def depthrev(self, repo, rev):
> """depth a revision"""
> - return len(repo.revs('::%d', rev))
> + return len(repo.revs(b'::%d', rev))
>
> def revsfromrange(self, repo, rangeid):
> """return revision contained in a range
> @@ -620,12 +620,12 @@
> rangeheap = []
> for idx, r in enumerate(revs):
> if not idx % 1000:
> - compat.progress(ui, _("filling depth cache"), idx, total=nbrevs,
> - unit=_("changesets"))
> + compat.progress(ui, _(b"filling depth cache"), idx, total=nbrevs,
> + unit=_(b"changesets"))
> # warm up depth
> self.depthrev(repo, r)
> rangeheap.append((-r, (r, 0)))
> - compat.progress(ui, _("filling depth cache"), None, total=nbrevs)
> + compat.progress(ui, _(b"filling depth cache"), None, total=nbrevs)
>
> heappop = heapq.heappop
> heappush = heapq.heappush
> @@ -646,8 +646,8 @@
> progress_new = time.time()
> if (1 < progress_each) and (0.1 < progress_new - progress_last):
> progress_each /= 10
> - compat.progress(ui, _("filling stablerange cache"), seen,
> - total=nbrevs, unit=_("changesets"))
> + compat.progress(ui, _(b"filling stablerange cache"), seen,
> + total=nbrevs, unit=_(b"changesets"))
> progress_last = progress_new
> seen += 1
> original.remove(value) # might have been added from other source
> @@ -656,13 +656,13 @@
> for sub in self.subranges(repo, rangeid):
> if self._getsub(sub) is None:
> heappush(rangeheap, (-sub[0], sub))
> - compat.progress(ui, _("filling stablerange cache"), None, total=nbrevs)
> + compat.progress(ui, _(b"filling stablerange cache"), None, total=nbrevs)
>
> self._tiprev = upto
> self._tipnode = cl.node(upto)
>
> duration = util.timer() - starttime
> - repo.ui.log('evoext-cache', 'updated stablerange cache in %.4f seconds\n',
> + repo.ui.log(b'evoext-cache', b'updated stablerange cache in %.4f seconds\n',
> duration)
>
> def subranges(self, repo, rangeid):
> diff --git a/hgext3rd/evolve/stablerangecache.py b/hgext3rd/evolve/stablerangecache.py
> --- a/hgext3rd/evolve/stablerangecache.py
> +++ b/hgext3rd/evolve/stablerangecache.py
> @@ -98,8 +98,8 @@
> warned_long = True
> if (1 < progress_each) and (0.1 < progress_new - progress_last):
> progress_each /= 10
> - compat.progress(ui, _("filling stablerange cache"), seen,
> - total=total, unit=_("changesets"))
> + compat.progress(ui, _(b"filling stablerange cache"), seen,
> + total=total, unit=_(b"changesets"))
> progress_last = progress_new
> seen += 1
> original.remove(rangeid) # might have been added from other source
> @@ -108,7 +108,7 @@
> for sub in self.subranges(repo, rangeid):
> if self._getsub(sub) is None:
> heappush(rangeheap, sub)
> - compat.progress(ui, _("filling stablerange cache"), None, total=total)
> + compat.progress(ui, _(b"filling stablerange cache"), None, total=total)
>
> def clear(self, reset=False):
> super(stablerangeondiskbase, self).clear()
> @@ -131,23 +131,23 @@
> FOREIGN KEY (suprev, supidx) REFERENCES range(rev, idx),
> FOREIGN KEY (subrev, subidx) REFERENCES range(rev, idx)
> );""",
> - "CREATE INDEX subranges_index ON subranges (suprev, supidx);",
> - "CREATE INDEX superranges_index ON subranges (subrev, subidx);",
> - "CREATE INDEX range_index ON range (rev, idx);",
> + b"CREATE INDEX subranges_index ON subranges (suprev, supidx);",
> + b"CREATE INDEX superranges_index ON subranges (subrev, subidx);",
> + b"CREATE INDEX range_index ON range (rev, idx);",
> """CREATE TABLE meta(schemaversion INTEGER NOT NULL,
> tiprev INTEGER NOT NULL,
> tipnode BLOB NOT NULL
> );""",
> ]
> -_newmeta = "INSERT INTO meta (schemaversion, tiprev, tipnode) VALUES (?,?,?);"
> -_updatemeta = "UPDATE meta SET tiprev = ?, tipnode = ?;"
> -_updaterange = "INSERT INTO range(rev, idx) VALUES (?,?);"
> +_newmeta = b"INSERT INTO meta (schemaversion, tiprev, tipnode) VALUES (?,?,?);"
> +_updatemeta = b"UPDATE meta SET tiprev = ?, tipnode = ?;"
> +_updaterange = b"INSERT INTO range(rev, idx) VALUES (?,?);"
> _updatesubranges = """INSERT
> INTO subranges(listidx, suprev, supidx, subrev, subidx)
> VALUES (?,?,?,?,?);"""
> -_queryexist = "SELECT name FROM sqlite_master WHERE type='table' AND name='meta';"
> -_querymeta = "SELECT schemaversion, tiprev, tipnode FROM meta;"
> -_queryrange = "SELECT * FROM range WHERE (rev = ? AND idx = ?);"
> +_queryexist = b"SELECT name FROM sqlite_master WHERE type='table' AND name='meta';"
> +_querymeta = b"SELECT schemaversion, tiprev, tipnode FROM meta;"
> +_queryrange = b"SELECT * FROM range WHERE (rev = ? AND idx = ?);"
> _querysubranges = """SELECT subrev, subidx
> FROM subranges
> WHERE (suprev = ? AND supidx = ?)
> @@ -157,11 +157,11 @@
> FROM subranges
> WHERE %s;"""
>
> -_querysuperrangesbody = '(subrev = %d and subidx = %d)'
> +_querysuperrangesbody = b'(subrev = %d and subidx = %d)'
>
> def _make_querysuperranges(ranges):
> # building a tree of OR would allow for more ranges
> - body = ' OR '.join(_querysuperrangesbody % r for r in ranges)
> + body = b' OR '.join(_querysuperrangesbody % r for r in ranges)
> return _querysuperrangesmain % body
>
> class stablerangesqlbase(stablerange.stablerangecached):
> @@ -218,7 +218,7 @@
> except (sqlite3.DatabaseError, sqlite3.OperationalError):
> # something is wrong with the sqlite db
> # Since this is a cache, we ignore it.
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
> self._unsavedsubranges.clear()
>
> @@ -234,7 +234,7 @@
> util.makedirs(self._vfs.dirname(self._path))
> except OSError:
> return None
> - con = sqlite3.connect(self._path, timeout=30, isolation_level="IMMEDIATE")
> + con = sqlite3.connect(self._path, timeout=30, isolation_level=b"IMMEDIATE")
> con.text_factory = str
> return con
>
> @@ -272,11 +272,11 @@
> #
> # operational error catch read-only and locked database
> # IntegrityError catch Unique constraint error that may arise
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
> self._unsavedsubranges.clear()
> - repo.ui.log('evoext-cache', 'error while saving new data: %s' % exc)
> - repo.ui.debug('evoext-cache: error while saving new data: %s' % exc)
> + repo.ui.log(b'evoext-cache', b'error while saving new data: %s' % exc)
> + repo.ui.debug(b'evoext-cache: error while saving new data: %s' % exc)
>
> def _trysave(self, repo):
> repo = repo.unfiltered()
> @@ -288,7 +288,7 @@
>
> if self._con is None:
> util.unlinkpath(self._path, ignoremissing=True)
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
>
> con = self._db()
> @@ -313,9 +313,9 @@
> # drifting is currently an issue because this means another
> # process might have already added the cache line we are about
> # to add. This will confuse sqlite
> - msg = _('stable-range cache: skipping write, '
> - 'database drifted under my feet\n')
> - hint = _('(disk: %s-%s vs mem: %s-%s)\n')
> + msg = _(b'stable-range cache: skipping write, '
> + b'database drifted under my feet\n')
> + hint = _(b'(disk: %s-%s vs mem: %s-%s)\n')
> data = (nodemod.hex(meta[2]), meta[1],
> nodemod.hex(self._ondisktipnode), self._ondisktiprev)
> repo.ui.warn(msg)
> @@ -369,7 +369,7 @@
>
> def clear(self, reset=False):
> super(stablerangesql, self).clear(reset=reset)
> - if '_con' in vars(self):
> + if b'_con' in vars(self):
> del self._con
> self._subrangescache.clear()
>
> @@ -390,13 +390,13 @@
> class mergepointsql(stablerangesql, stablerange.stablerange_mergepoint):
>
> _schemaversion = 3
> - _cachefile = 'evoext_stablerange_v2.sqlite'
> - _cachename = 'evo-ext-stablerange-mergepoint'
> + _cachefile = b'evoext_stablerange_v2.sqlite'
> + _cachename = b'evo-ext-stablerange-mergepoint'
>
> class sqlstablerange(stablerangesqlbase, stablerange.stablerange):
>
> _schemaversion = 1
> - _cachefile = 'evoext_stablerange_v1.sqlite'
> + _cachefile = b'evoext_stablerange_v1.sqlite'
>
> def warmup(self, repo, upto=None):
> self._con # make sure the data base is loaded
> @@ -413,8 +413,8 @@
> except error.LockError:
> # Exceptionnally we are noisy about it since performance impact is
> # large We should address that before using this more widely.
> - repo.ui.warn('stable-range cache: unable to lock repo while warming\n')
> - repo.ui.warn('(cache will not be saved)\n')
> + repo.ui.warn(b'stable-range cache: unable to lock repo while warming\n')
> + repo.ui.warn(b'(cache will not be saved)\n')
> super(sqlstablerange, self).warmup(repo, upto)
>
> @eh.reposetup
> @@ -430,7 +430,7 @@
>
> @localrepo.unfilteredmethod
> def destroyed(self):
> - if 'stablerange' in vars(self):
> + if b'stablerange' in vars(self):
> self.stablerange.clear()
> del self.stablerange
> super(stablerangerepo, self).destroyed()
> diff --git a/hgext3rd/evolve/stablesort.py b/hgext3rd/evolve/stablesort.py
> --- a/hgext3rd/evolve/stablesort.py
> +++ b/hgext3rd/evolve/stablesort.py
> @@ -52,30 +52,30 @@
> return key
>
> @eh.command(
> - 'debugstablesort',
> + b'debugstablesort',
> [
> - ('r', 'rev', [], 'heads to start from'),
> - ('', 'method', 'branchpoint', "method used for sorting, one of: "
> - "branchpoint, basic-mergepoint and basic-headstart"),
> - ('l', 'limit', '', 'number of revision display (default to all)')
> - ] + commands.formatteropts,
> - _(''))
> + (b'r', b'rev', [], b'heads to start from'),
> + (b'', b'method', b'branchpoint', b"method used for sorting, one of: "
> + b"branchpoint, basic-mergepoint and basic-headstart"),
> + (b'l', b'limit', b'', b'number of revision display (default to all)')
> + ] + commands.formatteropts,
> + _(b''))
> def debugstablesort(ui, repo, **opts):
> """display the ::REVS set topologically sorted in a stable way
> """
> - revs = scmutil.revrange(repo, opts['rev'])
> + revs = scmutil.revrange(repo, opts[b'rev'])
>
> - method = opts['method']
> + method = opts[b'method']
> sorting = _methodmap.get(method)
> if sorting is None:
> - valid_method = ', '.join(sorted(_methodmap))
> - raise error.Abort('unknown sorting method: "%s"' % method,
> - hint='pick one of: %s' % valid_method)
> + valid_method = b', '.join(sorted(_methodmap))
> + raise error.Abort(b'unknown sorting method: "%s"' % method,
> + hint=b'pick one of: %s' % valid_method)
>
> displayer = compat.changesetdisplayer(ui, repo, opts, buffered=True)
> kwargs = {}
> - if opts['limit']:
> - kwargs['limit'] = int(opts['limit'])
> + if opts[b'limit']:
> + kwargs[b'limit'] = int(opts[b'limit'])
> for r in sorting(repo, revs, **kwargs):
> ctx = repo[r]
> displayer.show(ctx)
> @@ -178,7 +178,7 @@
> heads = list(sorted(revs))
> else:
> # keeps heads only
> - heads = sorted(repo.revs('sort(heads(%ld::%ld))', revs, revs), key=tiebreaker)
> + heads = sorted(repo.revs(b'sort(heads(%ld::%ld))', revs, revs), key=tiebreaker)
>
> results = []
> while heads:
> @@ -244,24 +244,24 @@
> return result
>
> def stablesort_mergepoint_head_basic(repo, revs, limit=None):
> - heads = repo.revs('sort(heads(%ld))', revs)
> + heads = repo.revs(b'sort(heads(%ld))', revs)
> if not heads:
> return []
> elif 2 < len(heads):
> - raise error.Abort('cannot use head based merging, %d heads found'
> + raise error.Abort(b'cannot use head based merging, %d heads found'
> % len(heads))
> head = heads.first()
> - revs = stablesort_mergepoint_bounded(repo, head, repo.revs('::%d', head))
> + revs = stablesort_mergepoint_bounded(repo, head, repo.revs(b'::%d', head))
> if limit is None:
> return revs
> return revs[-limit:]
>
> def stablesort_mergepoint_head_debug(repo, revs, limit=None):
> - heads = repo.revs('sort(heads(%ld))', revs)
> + heads = repo.revs(b'sort(heads(%ld))', revs)
> if not heads:
> return []
> elif 2 < len(heads):
> - raise error.Abort('cannot use head based merging, %d heads found'
> + raise error.Abort(b'cannot use head based merging, %d heads found'
> % len(heads))
> head = heads.first()
> revs = stablesort_mergepoint_head(repo, head)
> @@ -292,7 +292,7 @@
> ps = sorted(ps, key=tiebreaker)
>
> # get the part from the highest parent. This is the part that changes
> - mid_revs = repo.revs('only(%d, %d)', ps[1], ps[0])
> + mid_revs = repo.revs(b'only(%d, %d)', ps[1], ps[0])
> if mid_revs:
> mid = stablesort_mergepoint_bounded(repo, ps[1], mid_revs)
>
> @@ -302,20 +302,20 @@
> return bottom + mid + top
>
> def stablesort_mergepoint_head_cached(repo, revs, limit=None):
> - heads = repo.revs('sort(heads(%ld))', revs)
> + heads = repo.revs(b'sort(heads(%ld))', revs)
> if not heads:
> return []
> elif 2 < len(heads):
> - raise error.Abort('cannot use head based merging, %d heads found'
> + raise error.Abort(b'cannot use head based merging, %d heads found'
> % len(heads))
> head = heads.first()
> cache = stablesortcache()
> first = list(cache.get(repo, head, limit=limit))
> second = list(cache.get(repo, head, limit=limit))
> if first != second:
> - repo.ui.warn('stablesort-cache: initial run different from re-run:\n'
> - ' %s\n'
> - ' %s\n' % (first, second))
> + repo.ui.warn(b'stablesort-cache: initial run different from re-run:\n'
> + b' %s\n'
> + b' %s\n' % (first, second))
> return second
>
> class stablesortcache(object):
> @@ -502,11 +502,11 @@
> recordjump(previous, lower, size)
>
> def stablesort_mergepoint_head_ondisk(repo, revs, limit=None):
> - heads = repo.revs('sort(heads(%ld))', revs)
> + heads = repo.revs(b'sort(heads(%ld))', revs)
> if not heads:
> return []
> elif 2 < len(heads):
> - raise error.Abort('cannot use head based merging, %d heads found'
> + raise error.Abort(b'cannot use head based merging, %d heads found'
> % len(heads))
> head = heads.first()
> unfi = repo.unfiltered()
> @@ -514,22 +514,22 @@
> cache.save(unfi)
> return cache.get(repo, head, limit=limit)
>
> -S_INDEXSIZE = struct.Struct('>I')
> +S_INDEXSIZE = struct.Struct(b'>I')
>
> class ondiskstablesortcache(stablesortcache, genericcaches.changelogsourcebase):
>
> - _filepath = 'evoext-stablesortcache-00'
> - _cachename = 'evo-ext-stablesort'
> + _filepath = b'evoext-stablesortcache-00'
> + _cachename = b'evo-ext-stablesort'
>
> def __init__(self):
> super(ondiskstablesortcache, self).__init__()
> - self._index = array.array('l')
> - self._data = array.array('l')
> + self._index = array.array(b'l')
> + self._data = array.array(b'l')
> del self._jumps
>
> def getjumps(self, repo, rev):
> if len(self._index) < rev:
> - msg = 'stablesortcache must be warmed before use (%d < %d)'
> + msg = b'stablesortcache must be warmed before use (%d < %d)'
> msg %= (len(self._index), rev)
> raise error.ProgrammingError(msg)
> return self._getjumps(rev)
> @@ -577,10 +577,10 @@
> total = len(data)
>
> def progress(pos, rev):
> - compat.progress(repo.ui, 'updating stablesort cache',
> - pos, 'rev %s' % rev, unit='revision', total=total)
> + compat.progress(repo.ui, b'updating stablesort cache',
> + pos, b'rev %s' % rev, unit=b'revision', total=total)
>
> - progress(0, '')
> + progress(0, b'')
> for idx, rev in enumerate(data):
> parents = filterparents(repo.changelog.parentrevs(rev))
> if len(parents) <= 1:
> @@ -594,12 +594,12 @@
> break
> if not (idx % 1000): # progress as a too high performance impact
> progress(idx, rev)
> - progress(None, '')
> + progress(None, b'')
>
> def clear(self, reset=False):
> super(ondiskstablesortcache, self).clear()
> - self._index = array.array('l')
> - self._data = array.array('l')
> + self._index = array.array(b'l')
> + self._data = array.array(b'l')
>
> def load(self, repo):
> """load data from disk
> @@ -609,8 +609,8 @@
> assert repo.filtername is None
>
> data = repo.cachevfs.tryread(self._filepath)
> - self._index = array.array('l')
> - self._data = array.array('l')
> + self._index = array.array(b'l')
> + self._data = array.array(b'l')
> if not data:
> self._cachekey = self.emptykey
> else:
> @@ -634,7 +634,7 @@
> if self._cachekey is None or self._cachekey == self._ondiskkey:
> return
> try:
> - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True)
> + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True)
>
> # data to write
> headerdata = self._serializecachekey()
> @@ -650,8 +650,8 @@
> cachefile.close()
> self._ondiskkey = self._cachekey
> except (IOError, OSError) as exc:
> - repo.ui.log('stablesortcache', 'could not write update %s\n' % exc)
> - repo.ui.debug('stablesortcache: could not write update %s\n' % exc)
> + repo.ui.log(b'stablesortcache', b'could not write update %s\n' % exc)
> + repo.ui.debug(b'stablesortcache: could not write update %s\n' % exc)
>
> @eh.reposetup
> def setupcache(ui, repo):
> @@ -666,7 +666,7 @@
>
> @localrepo.unfilteredmethod
> def destroyed(self):
> - if 'stablesort' in vars(self):
> + if b'stablesort' in vars(self):
> self.stablesort.clear()
> super(stablesortrepo, self).destroyed()
>
> @@ -680,12 +680,12 @@
> repo.__class__ = stablesortrepo
>
> _methodmap = {
> - 'branchpoint': stablesort_branchpoint,
> - 'basic-mergepoint': stablesort_mergepoint_multirevs,
> - 'basic-headstart': stablesort_mergepoint_head_basic,
> - 'headstart': stablesort_mergepoint_head_debug,
> - 'headcached': stablesort_mergepoint_head_cached,
> - 'headondisk': stablesort_mergepoint_head_ondisk,
> + b'branchpoint': stablesort_branchpoint,
> + b'basic-mergepoint': stablesort_mergepoint_multirevs,
> + b'basic-headstart': stablesort_mergepoint_head_basic,
> + b'headstart': stablesort_mergepoint_head_debug,
> + b'headcached': stablesort_mergepoint_head_cached,
> + b'headondisk': stablesort_mergepoint_head_ondisk,
> }
>
> # merge last so that repo setup wrap after that one.
> diff --git a/hgext3rd/evolve/state.py b/hgext3rd/evolve/state.py
> --- a/hgext3rd/evolve/state.py
> +++ b/hgext3rd/evolve/state.py
> @@ -37,7 +37,7 @@
> can populate the object data reading that file
> """
>
> - def __init__(self, repo, path='evolvestate', opts={}):
> + def __init__(self, repo, path=b'evolvestate', opts={}):
> self._repo = repo
> self.path = path
> self.opts = opts
> @@ -63,7 +63,7 @@
> op = self._read()
> if isinstance(op, dict):
> self.opts.update(op)
> - elif self.path == 'evolvestate':
> + elif self.path == b'evolvestate':
> # it is the old evolvestate file
> oldop = _oldevolvestateread(self._repo)
> self.opts.update(oldop)
> @@ -77,13 +77,13 @@
>
> we use third-party library cbor to serialize data to write in the file.
> """
> - with self._repo.vfs(self.path, 'wb', atomictemp=True) as fp:
> + with self._repo.vfs(self.path, b'wb', atomictemp=True) as fp:
> cbor.dump(self.opts, fp)
>
> def _read(self):
> """reads the evolvestate file and returns a dictionary which contain
> data in the same format as it was before storing"""
> - with self._repo.vfs(self.path, 'rb') as fp:
> + with self._repo.vfs(self.path, b'rb') as fp:
> return cbor.load(fp)
>
> def delete(self):
> @@ -99,20 +99,20 @@
>
> This exists for BC reasons."""
> try:
> - f = repo.vfs('evolvestate')
> + f = repo.vfs(b'evolvestate')
> except IOError as err:
> if err.errno != errno.ENOENT:
> raise
> try:
> versionblob = f.read(4)
> if len(versionblob) < 4:
> - repo.ui.debug('ignoring corrupted evolvestate (file contains %i bits)'
> + repo.ui.debug(b'ignoring corrupted evolvestate (file contains %i bits)'
> % len(versionblob))
> return None
> - version = struct._unpack('>I', versionblob)[0]
> + version = struct._unpack(b'>I', versionblob)[0]
> if version != 0:
> - msg = _('unknown evolvestate version %i') % version
> - raise error.Abort(msg, hint=_('upgrade your evolve'))
> + msg = _(b'unknown evolvestate version %i') % version
> + raise error.Abort(msg, hint=_(b'upgrade your evolve'))
> records = []
> data = f.read()
> off = 0
> @@ -120,22 +120,22 @@
> while off < end:
> rtype = data[off]
> off += 1
> - length = struct._unpack('>I', data[off:(off + 4)])[0]
> + length = struct._unpack(b'>I', data[off:(off + 4)])[0]
> off += 4
> record = data[off:(off + length)]
> off += length
> - if rtype == 't':
> + if rtype == b't':
> rtype, record = record[0], record[1:]
> records.append((rtype, record))
> state = {}
> for rtype, rdata in records:
> - if rtype == 'C':
> - state['current'] = rdata
> + if rtype == b'C':
> + state[b'current'] = rdata
> elif rtype.lower():
> - repo.ui.debug('ignore evolve state record type %s' % rtype)
> + repo.ui.debug(b'ignore evolve state record type %s' % rtype)
> else:
> - raise error.Abort(_('unknown evolvestate field type %r')
> - % rtype, hint=_('upgrade your evolve'))
> + raise error.Abort(_(b'unknown evolvestate field type %r')
> + % rtype, hint=_(b'upgrade your evolve'))
> return state
> finally:
> f.close()
> diff --git a/hgext3rd/evolve/templatekw.py b/hgext3rd/evolve/templatekw.py
> --- a/hgext3rd/evolve/templatekw.py
> +++ b/hgext3rd/evolve/templatekw.py
> @@ -23,46 +23,48 @@
>
> ### template keywords
>
> -if util.safehasattr(templatekw, 'compatlist'):
> - @eh.templatekeyword('instabilities', requires=set(['ctx', 'templ']))
> +if util.safehasattr(templatekw, b'compatlist'):
> + @eh.templatekeyword(b'instabilities', requires=set([b'ctx', b'templ']))
> def showinstabilities(context, mapping):
> """List of strings. Evolution instabilities affecting the changeset
> (zero or more of "orphan", "content-divergent" or "phase-divergent")."""
> - ctx = context.resource(mapping, 'ctx')
> - return templatekw.compatlist(context, mapping, 'instability',
> + ctx = context.resource(mapping, b'ctx')
> + return templatekw.compatlist(context, mapping, b'instability',
> ctx.instabilities(),
> - plural='instabilities')
> + plural=b'instabilities')
>
> - @eh.templatekeyword('troubles', requires=set(['ctx', 'templ']))
> + @eh.templatekeyword(b'troubles', requires=set([b'ctx', b'templ']))
> def showtroubles(context, mapping): # legacy name for instabilities
> - ctx = context.resource(mapping, 'ctx')
> - return templatekw.compatlist(context, mapping, 'trouble',
> - ctx.instabilities(), plural='troubles')
> + ctx = context.resource(mapping, b'ctx')
> + return templatekw.compatlist(context, mapping, b'trouble',
> + ctx.instabilities(), plural=b'troubles')
> else:
> # older template API in hg < 4.6
> - @eh.templatekeyword('instabilities')
> + @eh.templatekeyword(b'instabilities')
> def showinstabilities(**args):
> """List of strings. Evolution instabilities affecting the changeset
> (zero or more of "orphan", "content-divergent" or "phase-divergent")."""
> - ctx = args['ctx']
> - return templatekw.showlist('instability', ctx.instabilities(), args,
> - plural='instabilities')
> + ctx = args[b'ctx']
> + return templatekw.showlist(b'instability', ctx.instabilities(), args,
> + plural=b'instabilities')
>
> - @eh.templatekeyword('troubles')
> + @eh.templatekeyword(b'troubles')
> def showtroubles(**args):
> - ctx = args['ctx']
> - return templatekw.showlist('trouble', ctx.instabilities(), args,
> - plural='troubles')
> + ctx = args[b'ctx']
> + return templatekw.showlist(b'trouble', ctx.instabilities(), args,
> + plural=b'troubles')
>
> _sp = templatekw.showpredecessors
> -if util.safehasattr(_sp, '_requires'):
> +if util.safehasattr(_sp, b'_requires'):
> def showprecursors(context, mapping):
> return _sp(context, mapping)
> showprecursors.__doc__ = _sp._origdoc
> - _tk = templatekw.templatekeyword("precursors", requires=_sp._requires)
> + if isinstance(showprecursors.__doc__, bytes):
> + showprecursors.__doc__ = _sp._origdoc.decode(u"utf-8")
> + _tk = templatekw.templatekeyword(b"precursors", requires=_sp._requires)
> _tk(showprecursors)
> else:
> - templatekw.keywords["precursors"] = _sp
> + templatekw.keywords[b"precursors"] = _sp
>
>
> def closestsuccessors(repo, nodeid):
> @@ -71,14 +73,16 @@
> return directsuccessorssets(repo, nodeid)
>
> _ss = templatekw.showsuccessorssets
> -if util.safehasattr(_ss, '_requires'):
> +if util.safehasattr(_ss, b'_requires'):
> def showsuccessors(context, mapping):
> return _ss(context, mapping)
> showsuccessors.__doc__ = _ss._origdoc
> - _tk = templatekw.templatekeyword("successors", requires=_ss._requires)
> + if isinstance(showsuccessors.__doc__, bytes):
> + showsuccessors.__doc__ = _ss._origdoc.decode(u"utf-8")
> + _tk = templatekw.templatekeyword(b"successors", requires=_ss._requires)
> _tk(showsuccessors)
> else:
> - templatekw.keywords["successors"] = _ss
> + templatekw.keywords[b"successors"] = _ss
>
> def _getusername(ui):
> """the default username in the config or None"""
> @@ -91,24 +95,24 @@
> """ Returns a dict with the default templates for obs fate
> """
> # Prepare templates
> - verbtempl = '{verb}'
> - usertempl = '{if(users, " by {join(users, ", ")}")}'
> - succtempl = '{if(successors, " as ")}{successors}' # Bypass if limitation
> - datetempleq = ' (at {min_date|isodate})'
> - datetemplnoteq = ' (between {min_date|isodate} and {max_date|isodate})'
> - datetempl = '{if(max_date, "{ifeq(min_date, max_date, "%s", "%s")}")}' % (datetempleq, datetemplnoteq)
> + verbtempl = b'{verb}'
> + usertempl = b'{if(users, " by {join(users, ", ")}")}'
> + succtempl = b'{if(successors, " as ")}{successors}' # Bypass if limitation
> + datetempleq = b' (at {min_date|isodate})'
> + datetemplnoteq = b' (between {min_date|isodate} and {max_date|isodate})'
> + datetempl = b'{if(max_date, "{ifeq(min_date, max_date, "%s", "%s")}")}' % (datetempleq, datetemplnoteq)
>
> optionalusertempl = usertempl
> username = _getusername(ui)
> if username is not None:
> - optionalusertempl = ('{ifeq(join(users, "\0"), "%s", "", "%s")}'
> + optionalusertempl = (b'{ifeq(join(users, "\0"), "%s", "", "%s")}'
> % (username, usertempl))
>
> # Assemble them
> return {
> - 'obsfate_quiet': verbtempl + succtempl,
> - 'obsfate': verbtempl + succtempl + optionalusertempl,
> - 'obsfate_verbose': verbtempl + succtempl + usertempl + datetempl,
> + b'obsfate_quiet': verbtempl + succtempl,
> + b'obsfate': verbtempl + succtempl + optionalusertempl,
> + b'obsfate_verbose': verbtempl + succtempl + usertempl + datetempl,
> }
>
> def obsfatedata(repo, ctx):
> @@ -158,18 +162,18 @@
> line = []
>
> # Verb
> - line.append(obsfateline['verb'])
> + line.append(obsfateline[b'verb'])
>
> # Successors
> - successors = obsfateline["successors"]
> + successors = obsfateline[b"successors"]
>
> if successors:
> fmtsuccessors = map(lambda s: s[:12], successors)
> - line.append(" as %s" % ", ".join(fmtsuccessors))
> + line.append(b" as %s" % b", ".join(fmtsuccessors))
>
> # Users
> - if (verbose or normal) and 'users' in obsfateline:
> - users = obsfateline['users']
> + if (verbose or normal) and b'users' in obsfateline:
> + users = obsfateline[b'users']
>
> if not verbose:
> # If current user is the only user, do not show anything if not in
> @@ -179,24 +183,24 @@
> users = None
>
> if users:
> - line.append(" by %s" % ", ".join(users))
> + line.append(b" by %s" % b", ".join(users))
>
> # Date
> if verbose:
> - min_date = obsfateline['min_date']
> - max_date = obsfateline['max_date']
> + min_date = obsfateline[b'min_date']
> + max_date = obsfateline[b'max_date']
>
> if min_date == max_date:
> - fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
> - line.append(" (at %s)" % fmtmin_date)
> + fmtmin_date = util.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
> + line.append(b" (at %s)" % fmtmin_date)
> else:
> - fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2')
> - fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2')
> - line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date))
> + fmtmin_date = util.datestr(min_date, b'%Y-%m-%d %H:%M %1%2')
> + fmtmax_date = util.datestr(max_date, b'%Y-%m-%d %H:%M %1%2')
> + line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date))
>
> - return "".join(line)
> + return b"".join(line)
>
> -def obsfateprinter(obsfate, ui, prefix=""):
> +def obsfateprinter(obsfate, ui, prefix=b""):
> lines = []
> for raw in obsfate:
> lines.append(obsfatelineprinter(raw, ui))
> @@ -204,16 +208,16 @@
> if prefix:
> lines = [prefix + line for line in lines]
>
> - return "\n".join(lines)
> + return b"\n".join(lines)
>
> -if not util.safehasattr(templatekw, 'obsfateverb'): # <= hg-4.5
> - @eh.templatekeyword("obsfatedata")
> +if not util.safehasattr(templatekw, b'obsfateverb'): # <= hg-4.5
> + @eh.templatekeyword(b"obsfatedata")
> def showobsfatedata(repo, ctx, **args):
> # Get the needed obsfate data
> values = obsfatedata(repo, ctx)
>
> if values is None:
> - return templatekw.showlist("obsfatedata", [], args)
> + return templatekw.showlist(b"obsfatedata", [], args)
>
> return _showobsfatedata(repo, ctx, values, **args)
>
> @@ -224,36 +228,36 @@
> # As we can't do something like
> # "{join(map(nodeshort, successors), ', '}" in template, manually
> # create a correct textual representation
> - gen = ', '.join(n[:12] for n in raw['successors'])
> + gen = b', '.join(n[:12] for n in raw[b'successors'])
>
> - makemap = lambda x: {'successor': x}
> - joinfmt = lambda d: "%s" % d['successor']
> - raw['successors'] = templatekw._hybrid(gen, raw['successors'], makemap,
> - joinfmt)
> + makemap = lambda x: {b'successor': x}
> + joinfmt = lambda d: b"%s" % d[b'successor']
> + raw[b'successors'] = templatekw._hybrid(gen, raw[b'successors'], makemap,
> + joinfmt)
>
> # And then format them
> # Insert default obsfate templates
> - args['templ'].cache.update(obsfatedefaulttempl(repo.ui))
> + args[b'templ'].cache.update(obsfatedefaulttempl(repo.ui))
>
> if repo.ui.quiet:
> - name = "obsfate_quiet"
> + name = b"obsfate_quiet"
> elif repo.ui.verbose:
> - name = "obsfate_verbose"
> + name = b"obsfate_verbose"
> elif repo.ui.debugflag:
> - name = "obsfate_debug"
> + name = b"obsfate_debug"
> else:
> - name = "obsfate"
> + name = b"obsfate"
>
> # Format a single value
> def fmt(d):
> nargs = args.copy()
> nargs.update(d[name])
> - templ = args['templ']
> + templ = args[b'templ']
> # HG 4.6
> if hasattr(templ, "generate"):
> return templ.generate(name, nargs)
> else:
> - return args['templ'](name, **nargs)
> + return args[b'templ'](name, **nargs)
>
> # Generate a good enough string representation using templater
> gen = []
> @@ -268,8 +272,8 @@
> except StopIteration:
> pass
>
> - gen.append("".join(chunkstr))
> - gen = "; ".join(gen)
> + gen.append(b"".join(chunkstr))
> + gen = b"; ".join(gen)
>
> return templatekw._hybrid(gen, values, lambda x: {name: x}, fmt)
>
> diff --git a/hgext3rd/evolve/thirdparty/cbor.py b/hgext3rd/evolve/thirdparty/cbor.py
> --- a/hgext3rd/evolve/thirdparty/cbor.py
> +++ b/hgext3rd/evolve/thirdparty/cbor.py
> @@ -79,23 +79,23 @@
> CBOR_TAG_MIME = 36 # following text is MIME message, headers, separators and all
> CBOR_TAG_CBOR_FILEHEADER = 55799 # can open a file with 0xd9d9f7
>
> -_CBOR_TAG_BIGNUM_BYTES = struct.pack('B', CBOR_TAG | CBOR_TAG_BIGNUM)
> +_CBOR_TAG_BIGNUM_BYTES = struct.pack(b'B', CBOR_TAG | CBOR_TAG_BIGNUM)
>
>
> def dumps_int(val):
> - "return bytes representing int val in CBOR"
> + b"return bytes representing int val in CBOR"
> if val >= 0:
> # CBOR_UINT is 0, so I'm lazy/efficient about not OR-ing it in.
> if val <= 23:
> - return struct.pack('B', val)
> + return struct.pack(b'B', val)
> if val <= 0x0ff:
> - return struct.pack('BB', CBOR_UINT8_FOLLOWS, val)
> + return struct.pack(b'BB', CBOR_UINT8_FOLLOWS, val)
> if val <= 0x0ffff:
> - return struct.pack('!BH', CBOR_UINT16_FOLLOWS, val)
> + return struct.pack(b'!BH', CBOR_UINT16_FOLLOWS, val)
> if val <= 0x0ffffffff:
> - return struct.pack('!BI', CBOR_UINT32_FOLLOWS, val)
> + return struct.pack(b'!BI', CBOR_UINT32_FOLLOWS, val)
> if val <= 0x0ffffffffffffffff:
> - return struct.pack('!BQ', CBOR_UINT64_FOLLOWS, val)
> + return struct.pack(b'!BQ', CBOR_UINT64_FOLLOWS, val)
> outb = _dumps_bignum_to_bytearray(val)
> return _CBOR_TAG_BIGNUM_BYTES + _encode_type_num(CBOR_BYTES, len(outb)) + outb
> val = -1 - val
> @@ -119,28 +119,28 @@
>
>
> def dumps_float(val):
> - return struct.pack("!Bd", CBOR_FLOAT64, val)
> + return struct.pack(b"!Bd", CBOR_FLOAT64, val)
>
>
> -_CBOR_TAG_NEGBIGNUM_BYTES = struct.pack('B', CBOR_TAG | CBOR_TAG_NEGBIGNUM)
> +_CBOR_TAG_NEGBIGNUM_BYTES = struct.pack(b'B', CBOR_TAG | CBOR_TAG_NEGBIGNUM)
>
>
> def _encode_type_num(cbor_type, val):
> """For some CBOR primary type [0..7] and an auxiliary unsigned number, return CBOR encoded bytes"""
> assert val >= 0
> if val <= 23:
> - return struct.pack('B', cbor_type | val)
> + return struct.pack(b'B', cbor_type | val)
> if val <= 0x0ff:
> - return struct.pack('BB', cbor_type | CBOR_UINT8_FOLLOWS, val)
> + return struct.pack(b'BB', cbor_type | CBOR_UINT8_FOLLOWS, val)
> if val <= 0x0ffff:
> - return struct.pack('!BH', cbor_type | CBOR_UINT16_FOLLOWS, val)
> + return struct.pack(b'!BH', cbor_type | CBOR_UINT16_FOLLOWS, val)
> if val <= 0x0ffffffff:
> - return struct.pack('!BI', cbor_type | CBOR_UINT32_FOLLOWS, val)
> + return struct.pack(b'!BI', cbor_type | CBOR_UINT32_FOLLOWS, val)
> if (((cbor_type == CBOR_NEGINT) and (val <= 0x07fffffffffffffff)) or
> ((cbor_type != CBOR_NEGINT) and (val <= 0x0ffffffffffffffff))):
> - return struct.pack('!BQ', cbor_type | CBOR_UINT64_FOLLOWS, val)
> + return struct.pack(b'!BQ', cbor_type | CBOR_UINT64_FOLLOWS, val)
> if cbor_type != CBOR_NEGINT:
> - raise Exception("value too big for CBOR unsigned number: {0!r}".format(val))
> + raise Exception(b"value too big for CBOR unsigned number: {0!r}".format(val))
> outb = _dumps_bignum_to_bytearray(val)
> return _CBOR_TAG_NEGBIGNUM_BYTES + _encode_type_num(CBOR_BYTES, len(outb)) + outb
>
> @@ -193,7 +193,7 @@
> parts.append(dumps(k, sort_keys=sort_keys))
> parts.append(dumps(v, sort_keys=sort_keys))
> else:
> - for k,v in d.iteritems():
> + for k,v in d.items():
> parts.append(dumps(k, sort_keys=sort_keys))
> parts.append(dumps(v, sort_keys=sort_keys))
> return b''.join(parts)
> @@ -201,8 +201,8 @@
>
> def dumps_bool(b):
> if b:
> - return struct.pack('B', CBOR_TRUE)
> - return struct.pack('B', CBOR_FALSE)
> + return struct.pack(b'B', CBOR_TRUE)
> + return struct.pack(b'B', CBOR_FALSE)
>
>
> def dumps_tag(t, sort_keys=False):
> @@ -223,7 +223,7 @@
>
> def dumps(ob, sort_keys=False):
> if ob is None:
> - return struct.pack('B', CBOR_NULL)
> + return struct.pack(b'B', CBOR_NULL)
> if isinstance(ob, bool):
> return dumps_bool(ob)
> if _is_stringish(ob):
> @@ -239,7 +239,7 @@
> return dumps_int(ob)
> if isinstance(ob, Tag):
> return dumps_tag(ob, sort_keys=sort_keys)
> - raise Exception("don't know how to cbor serialize object of type %s", type(ob))
> + raise Exception(b"don't know how to cbor serialize object of type %s", type(ob))
>
>
> # same basic signature as json.dump, but with no options (yet)
> @@ -260,7 +260,7 @@
> self.value = value
>
> def __repr__(self):
> - return "Tag({0!r}, {1!r})".format(self.tag, self.value)
> + return b"Tag({0!r}, {1!r})".format(self.tag, self.value)
>
> def __eq__(self, other):
> if not isinstance(other, Tag):
> @@ -273,7 +273,7 @@
> Parse CBOR bytes and return Python objects.
> """
> if data is None:
> - raise ValueError("got None for buffer to decode in loads")
> + raise ValueError(b"got None for buffer to decode in loads")
> fp = StringIO(data)
> return _loads(fp)[0]
>
> @@ -296,22 +296,22 @@
> aux = tag_aux
> elif tag_aux == CBOR_UINT8_FOLLOWS:
> data = fp.read(1)
> - aux = struct.unpack_from("!B", data, 0)[0]
> + aux = struct.unpack_from(b"!B", data, 0)[0]
> bytes_read += 1
> elif tag_aux == CBOR_UINT16_FOLLOWS:
> data = fp.read(2)
> - aux = struct.unpack_from("!H", data, 0)[0]
> + aux = struct.unpack_from(b"!H", data, 0)[0]
> bytes_read += 2
> elif tag_aux == CBOR_UINT32_FOLLOWS:
> data = fp.read(4)
> - aux = struct.unpack_from("!I", data, 0)[0]
> + aux = struct.unpack_from(b"!I", data, 0)[0]
> bytes_read += 4
> elif tag_aux == CBOR_UINT64_FOLLOWS:
> data = fp.read(8)
> - aux = struct.unpack_from("!Q", data, 0)[0]
> + aux = struct.unpack_from(b"!Q", data, 0)[0]
> bytes_read += 8
> else:
> - assert tag_aux == CBOR_VAR_FOLLOWS, "bogus tag {0:02x}".format(tb)
> + assert tag_aux == CBOR_VAR_FOLLOWS, b"bogus tag {0:02x}".format(tb)
> aux = None
>
> return tag, tag_aux, aux, bytes_read
> @@ -385,9 +385,9 @@
> return ob, bytes_read
>
> def _loads(fp, limit=None, depth=0, returntags=False):
> - "return (object, bytes read)"
> + b"return (object, bytes read)"
> if depth > _MAX_DEPTH:
> - raise Exception("hit CBOR loads recursion depth limit")
> + raise Exception(b"hit CBOR loads recursion depth limit")
>
> tb = _read_byte(fp)
>
> @@ -397,16 +397,16 @@
> # Some special cases of CBOR_7 best handled by special struct.unpack logic here
> if tb == CBOR_FLOAT16:
> data = fp.read(2)
> - hibyte, lowbyte = struct.unpack_from("BB", data, 0)
> + hibyte, lowbyte = struct.unpack_from(b"BB", data, 0)
> exp = (hibyte >> 2) & 0x1F
> mant = ((hibyte & 0x03) << 8) | lowbyte
> if exp == 0:
> val = mant * (2.0 ** -24)
> elif exp == 31:
> if mant == 0:
> - val = float('Inf')
> + val = float(b'Inf')
> else:
> - val = float('NaN')
> + val = float(b'NaN')
> else:
> val = (mant + 1024.0) * (2 ** (exp - 25))
> if hibyte & 0x80:
> @@ -414,11 +414,11 @@
> return (val, 3)
> elif tb == CBOR_FLOAT32:
> data = fp.read(4)
> - pf = struct.unpack_from("!f", data, 0)
> + pf = struct.unpack_from(b"!f", data, 0)
> return (pf[0], 5)
> elif tb == CBOR_FLOAT64:
> data = fp.read(8)
> - pf = struct.unpack_from("!d", data, 0)
> + pf = struct.unpack_from(b"!d", data, 0)
> return (pf[0], 9)
>
> tag, tag_aux, aux, bytes_read = _tag_aux(fp, tb)
> @@ -461,7 +461,7 @@
> return (None, bytes_read)
> if tb == CBOR_UNDEFINED:
> return (None, bytes_read)
> - raise ValueError("unknown cbor tag 7 byte: {:02x}".format(tb))
> + raise ValueError(b"unknown cbor tag 7 byte: {:02x}".format(tb))
>
>
> def loads_bytes(fp, aux, btag=CBOR_BYTES):
> @@ -481,7 +481,7 @@
> total_bytes_read += 1
> break
> tag, tag_aux, aux, bytes_read = _tag_aux(fp, tb)
> - assert tag == btag, 'variable length value contains unexpected component'
> + assert tag == btag, b'variable length value contains unexpected component'
> ob = fp.read(aux)
> chunklist.append(ob)
> total_bytes_read += bytes_read + aux
> diff --git a/hgext3rd/evolve/utility.py b/hgext3rd/evolve/utility.py
> --- a/hgext3rd/evolve/utility.py
> +++ b/hgext3rd/evolve/utility.py
> @@ -17,20 +17,20 @@
> compat,
> )
>
> -shorttemplate = "[{label('evolve.rev', rev)}] {desc|firstline}\n"
> +shorttemplate = b"[{label('evolve.rev', rev)}] {desc|firstline}\n"
> stacktemplate = """[{label('evolve.rev', if(topicidx, "s{topicidx}", rev))}] {desc|firstline}\n"""
>
> def obsexcmsg(ui, message, important=False):
> - verbose = ui.configbool('experimental', 'verbose-obsolescence-exchange')
> + verbose = ui.configbool(b'experimental', b'verbose-obsolescence-exchange')
> if verbose:
> - message = 'OBSEXC: ' + message
> + message = b'OBSEXC: ' + message
> if important or verbose:
> ui.status(message)
>
> def obsexcprg(ui, *args, **kwargs):
> - topic = 'obsmarkers exchange'
> - if ui.configbool('experimental', 'verbose-obsolescence-exchange'):
> - topic = 'OBSEXC'
> + topic = b'obsmarkers exchange'
> + if ui.configbool(b'experimental', b'verbose-obsolescence-exchange'):
> + topic = b'OBSEXC'
> compat.progress(ui, topic, *args, **kwargs)
>
> def filterparents(parents):
> @@ -50,28 +50,28 @@
> def shouldwarmcache(repo, tr):
> configbool = repo.ui.configbool
> config = repo.ui.config
> - desc = getattr(tr, 'desc', '')
> + desc = getattr(tr, 'desc', b'')
>
> autocase = False
> if tr is None and not getattr(repo, '_destroying', False):
> autocase = True
> - elif desc.startswith('serve'):
> + elif desc.startswith(b'serve'):
> autocase = True
> - elif desc.startswith('push') and not desc.startswith('push-response'):
> + elif desc.startswith(b'push') and not desc.startswith(b'push-response'):
> autocase = True
>
> - autocache = config('experimental', 'obshashrange.warm-cache',
> - 'auto') == 'auto'
> + autocache = config(b'experimental', b'obshashrange.warm-cache',
> + b'auto') == b'auto'
> if autocache:
> warm = autocase
> else:
> # note: we should not get to the default case
> - warm = configbool('experimental', 'obshashrange.warm-cache')
> - if not configbool('experimental', 'obshashrange'):
> + warm = configbool(b'experimental', b'obshashrange.warm-cache')
> + if not configbool(b'experimental', b'obshashrange'):
> return False
> if not warm:
> return False
> - maxrevs = repo.ui.configint('experimental', 'obshashrange.max-revs')
> + maxrevs = repo.ui.configint(b'experimental', b'obshashrange.max-revs')
> if maxrevs is not None and maxrevs < len(repo.unfiltered()):
> return False
> return True
> @@ -123,8 +123,8 @@
> newer = obsutil.successorssets(repo, obs.node())
> # search of a parent which is not killed
> while not newer:
> - ui.debug("stabilize target %s is plain dead,"
> - " trying to stabilize on its parent\n" %
> + ui.debug(b"stabilize target %s is plain dead,"
> + b" trying to stabilize on its parent\n" %
> obs)
> obs = obs.parents()[0]
> newer = obsutil.successorssets(repo, obs.node())
> @@ -141,7 +141,7 @@
> for successorsset in exc.successorssets
> for node in successorsset}
>
> -def revselectionprompt(ui, repo, revs, customheader=""):
> +def revselectionprompt(ui, repo, revs, customheader=b""):
> """function to prompt user to choose a revision from all the revs and return
> that revision for further tasks
>
> @@ -161,29 +161,29 @@
> if not ui.interactive():
> return None
>
> - promptmsg = customheader + "\n"
> + promptmsg = customheader + b"\n"
> for idx, rev in enumerate(revs):
> curctx = repo[rev]
> - revmsg = "%d: [%s] %s\n" % (idx + 1, curctx,
> - curctx.description().split("\n")[0])
> + revmsg = b"%d: [%s] %s\n" % (idx + 1, curctx,
> + curctx.description().split(b"\n")[0])
> promptmsg += revmsg
>
> - promptmsg += _("q: quit the prompt\n")
> - promptmsg += _("enter the index of the revision you want to select:")
> + promptmsg += _(b"q: quit the prompt\n")
> + promptmsg += _(b"enter the index of the revision you want to select:")
> idxselected = ui.prompt(promptmsg)
>
> intidx = None
> try:
> intidx = int(idxselected)
> except ValueError:
> - if idxselected == 'q':
> + if idxselected == b'q':
> return None
> - ui.write_err(_("invalid value '%s' entered for index\n") % idxselected)
> + ui.write_err(_(b"invalid value '%s' entered for index\n") % idxselected)
> return None
>
> if intidx > len(revs) or intidx <= 0:
> # we can make this error message better
> - ui.write_err(_("invalid value '%d' entered for index\n") % intidx)
> + ui.write_err(_(b"invalid value '%d' entered for index\n") % intidx)
> return None
>
> return revs[intidx - 1]
> @@ -206,7 +206,7 @@
> # all three are different, lets concatenate the two authors
> # XXX: should we let the user know about concatenation of authors
> # by printing some message (or maybe in verbose mode)
> - users = set(divuser.split(', '))
> - users.update(othuser.split(', '))
> - user = ', '.join(sorted(users))
> + users = set(divuser.split(b', '))
> + users.update(othuser.split(b', '))
> + user = b', '.join(sorted(users))
> return user
> diff --git a/hgext3rd/pullbundle.py b/hgext3rd/pullbundle.py
> --- a/hgext3rd/pullbundle.py
> +++ b/hgext3rd/pullbundle.py
> @@ -92,10 +92,10 @@
>
> from mercurial.i18n import _
>
> -__version__ = '0.1.1'
> -testedwith = '4.4 4.5 4.6 4.7.1'
> -minimumhgversion = '4.4'
> -buglink = 'https://bz.mercurial-scm.org/'
> +__version__ = b'0.1.1'
> +testedwith = b'4.4 4.5 4.6 4.7.1'
> +minimumhgversion = b'4.4'
> +buglink = b'https://bz.mercurial-scm.org/'
>
> cmdtable = {}
> command = registrar.command(cmdtable)
> @@ -103,14 +103,14 @@
> configtable = {}
> configitem = registrar.configitem(configtable)
>
> -configitem('pullbundle', 'cache-directory',
> +configitem(b'pullbundle', b'cache-directory',
> default=None,
> )
>
> # generic wrapping
>
> def uisetup(ui):
> - exchange.getbundle2partsmapping['changegroup'] = _getbundlechangegrouppart
> + exchange.getbundle2partsmapping[b'changegroup'] = _getbundlechangegrouppart
>
> def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
> b2caps=None, heads=None, common=None, **kwargs):
> @@ -118,13 +118,13 @@
> if not kwargs.get(r'cg', True):
> return
>
> - version = '01'
> - cgversions = b2caps.get('changegroup')
> + version = b'01'
> + cgversions = b2caps.get(b'changegroup')
> if cgversions: # 3.1 and 3.2 ship with an empty value
> cgversions = [v for v in cgversions
> if v in changegroup.supportedoutgoingversions(repo)]
> if not cgversions:
> - raise ValueError(_('no common changegroup version'))
> + raise ValueError(_(b'no common changegroup version'))
> version = max(cgversions)
>
> outgoing = exchange._computeoutgoing(repo, heads, common)
> @@ -145,20 +145,20 @@
> # END OF ALTERED PART
>
> if kwargs.get(r'narrow', False) and (include or exclude):
> - narrowspecpart = bundler.newpart('narrow:spec')
> + narrowspecpart = bundler.newpart(b'narrow:spec')
> if include:
> narrowspecpart.addparam(
> - 'include', '\n'.join(include), mandatory=True)
> + b'include', b'\n'.join(include), mandatory=True)
> if exclude:
> narrowspecpart.addparam(
> - 'exclude', '\n'.join(exclude), mandatory=True)
> + b'exclude', b'\n'.join(exclude), mandatory=True)
>
> def makeallcgpart(newpart, repo, outgoing, version, source,
> bundlecaps, filematcher, cgversions):
>
> pullbundle = not filematcher
> - if pullbundle and not util.safehasattr(repo, 'stablerange'):
> - repo.ui.warn('pullbundle: required extension "evolve" are missing, skipping pullbundle\n')
> + if pullbundle and not util.safehasattr(repo, b'stablerange'):
> + repo.ui.warn(b'pullbundle: required extension "evolve" are missing, skipping pullbundle\n')
> pullbundle = False
> if filematcher:
> makeonecgpart(newpart, repo, None, outgoing, version, source, bundlecaps,
> @@ -167,8 +167,8 @@
> start = util.timer()
> slices = sliceoutgoing(repo, outgoing)
> end = util.timer()
> - msg = _('pullbundle-cache: "missing" set sliced into %d subranges '
> - 'in %s seconds\n')
> + msg = _(b'pullbundle-cache: "missing" set sliced into %d subranges '
> + b'in %s seconds\n')
> repo.ui.write(msg % (len(slices), end - start))
> for sliceid, sliceout in slices:
> makeonecgpart(newpart, repo, sliceid, sliceout, version, source, bundlecaps,
> @@ -192,7 +192,7 @@
> missingheads = [rev(n) for n in sorted(outgoing.missingheads, reverse=True)]
> for head in missingheads:
> localslices = []
> - localmissing = set(repo.revs('%ld and ::%d', missingrevs, head))
> + localmissing = set(repo.revs(b'%ld and ::%d', missingrevs, head))
> thisrunmissing = localmissing.copy()
> while localmissing:
> slicerevs = []
> @@ -207,11 +207,11 @@
> missingrevs.difference_update(slicerevs)
> localmissing.difference_update(slicerevs)
> if localmissing:
> - heads = list(repo.revs('heads(%ld)', localmissing))
> + heads = list(repo.revs(b'heads(%ld)', localmissing))
> heads.sort(key=node)
> head = heads.pop()
> if heads:
> - thisrunmissing = repo.revs('%ld and only(%d, %ld)',
> + thisrunmissing = repo.revs(b'%ld and only(%d, %ld)',
> localmissing,
> head,
> heads)
> @@ -220,15 +220,15 @@
> if DEBUG:
> for s in reversed(ss):
> ms -= set(s)
> - missingbase = repo.revs('parents(%ld) and %ld', s, ms)
> + missingbase = repo.revs(b'parents(%ld) and %ld', s, ms)
> if missingbase:
> - repo.ui.write_err('!!! rev bundled while parents missing\n')
> - repo.ui.write_err(' parent: %s\n' % list(missingbase))
> - pb = repo.revs('%ld and children(%ld)', s, missingbase)
> - repo.ui.write_err(' children: %s\n' % list(pb))
> - h = repo.revs('heads(%ld)', s)
> - repo.ui.write_err(' heads: %s\n' % list(h))
> - raise error.ProgrammingError('issuing a range before its parents')
> + repo.ui.write_err(b'!!! rev bundled while parents missing\n')
> + repo.ui.write_err(b' parent: %s\n' % list(missingbase))
> + pb = repo.revs(b'%ld and children(%ld)', s, missingbase)
> + repo.ui.write_err(b' children: %s\n' % list(pb))
> + h = repo.revs(b'heads(%ld)', s)
> + repo.ui.write_err(b' heads: %s\n' % list(h))
> + raise error.ProgrammingError(b'issuing a range before its parents')
>
> for s in reversed(localslices):
> allslices.extend(s)
> @@ -381,8 +381,8 @@
> # changegroup part construction
>
> def _changegroupinfo(repo, nodes, source):
> - if repo.ui.verbose or source == 'bundle':
> - repo.ui.status(_("%d changesets found\n") % len(nodes))
> + if repo.ui.verbose or source == b'bundle':
> + repo.ui.status(_(b"%d changesets found\n") % len(nodes))
>
> def _makenewstream(newpart, repo, outgoing, version, source,
> bundlecaps, filematcher, cgversions):
> @@ -408,23 +408,23 @@
> def _makepartfromstream(newpart, repo, cgstream, nbchanges, version):
> # same as upstream code
>
> - part = newpart('changegroup', data=cgstream)
> + part = newpart(b'changegroup', data=cgstream)
> if version:
> - part.addparam('version', version)
> + part.addparam(b'version', version)
>
> - part.addparam('nbchanges', '%d' % nbchanges,
> + part.addparam(b'nbchanges', b'%d' % nbchanges,
> mandatory=False)
>
> - if 'treemanifest' in repo.requirements:
> - part.addparam('treemanifest', '1')
> + if b'treemanifest' in repo.requirements:
> + part.addparam(b'treemanifest', b'1')
>
> # cache management
>
> def cachedir(repo):
> - cachedir = repo.ui.config('pullbundle', 'cache-directory')
> + cachedir = repo.ui.config(b'pullbundle', b'cache-directory')
> if cachedir is not None:
> return cachedir
> - return repo.cachevfs.join('pullbundles')
> + return repo.cachevfs.join(b'pullbundles')
>
> def getcache(repo, bundlename):
> cdir = cachedir(repo)
> @@ -436,7 +436,7 @@
> # opening too many file will not work.
>
> def data():
> - with open(bundlepath, 'rb') as fd:
> + with open(bundlepath, b'rb') as fd:
> for chunk in util.filechunkiter(fd):
> yield chunk
> return data()
> @@ -454,7 +454,7 @@
> cachefile.write(chunk)
> yield chunk
>
> -BUNDLEMASK = "%s-%s-%010iskip-%010isize.hg"
> +BUNDLEMASK = b"%s-%s-%010iskip-%010isize.hg"
>
> def makeonecgpart(newpart, repo, rangeid, outgoing, version, source,
> bundlecaps, filematcher, cgversions):
> @@ -472,19 +472,19 @@
> cgstream = cachewriter(repo, bundlename, partdata[0])
> partdata = (cgstream,) + partdata[1:]
> else:
> - if repo.ui.verbose or source == 'bundle':
> - repo.ui.status(_("%d changesets found in caches\n") % nbchanges)
> + if repo.ui.verbose or source == b'bundle':
> + repo.ui.status(_(b"%d changesets found in caches\n") % nbchanges)
> pversion = None
> if cgversions:
> pversion = version
> partdata = (cachedata, nbchanges, pversion)
> return _makepartfromstream(newpart, repo, *partdata)
>
> - at command('debugpullbundlecacheoverlap',
> - [('', 'count', 100, _('of "client" pulling')),
> - ('', 'min-cache', 1, _('minimum size of cached bundle')),
> - ],
> - _('hg debugpullbundlecacheoverlap [--client 100] REVSET'))
> + at command(b'debugpullbundlecacheoverlap',
> + [(b'', b'count', 100, _(b'of "client" pulling')),
> + (b'', b'min-cache', 1, _(b'minimum size of cached bundle')),
> + ],
> + _(b'hg debugpullbundlecacheoverlap [--client 100] REVSET'))
> def debugpullbundlecacheoverlap(ui, repo, *revs, **opts):
> '''Display statistic on bundle cache hit
>
> @@ -494,21 +494,21 @@
> '''
> actionrevs = scmutil.revrange(repo, revs)
> if not revs:
> - raise error.Abort('No revision selected')
> - count = opts['count']
> - min_cache = opts['min_cache']
> + raise error.Abort(b'No revision selected')
> + count = opts[b'count']
> + min_cache = opts[b'min_cache']
>
> bundlehits = collections.defaultdict(lambda: 0)
> pullstats = []
>
> rlen = lambda rangeid: repo.stablerange.rangelength(repo, rangeid)
>
> - repo.ui.write("gathering %d sample pulls within %d revisions\n"
> + repo.ui.write(b"gathering %d sample pulls within %d revisions\n"
> % (count, len(actionrevs)))
> if 1 < min_cache:
> - repo.ui.write(" not caching ranges smaller than %d changesets\n" % min_cache)
> + repo.ui.write(b" not caching ranges smaller than %d changesets\n" % min_cache)
> for i in xrange(count):
> - repo.ui.progress('gathering data', i, total=count)
> + repo.ui.progress(b'gathering data', i, total=count)
> outgoing = takeonesample(repo, actionrevs)
> ranges = sliceoutgoing(repo, outgoing)
> hitranges = 0
> @@ -532,7 +532,7 @@
> hitranges,
> )
> pullstats.append(stats)
> - repo.ui.progress('gathering data', None)
> + repo.ui.progress(b'gathering data', None)
>
> sizes = []
> changesmissing = []
> @@ -563,36 +563,36 @@
> cachedhits.append(hits)
>
> sizesdist = distribution(sizes)
> - repo.ui.write(fmtdist('pull size', sizesdist))
> + repo.ui.write(fmtdist(b'pull size', sizesdist))
>
> changesmissingdist = distribution(changesmissing)
> - repo.ui.write(fmtdist('non-cached changesets', changesmissingdist))
> + repo.ui.write(fmtdist(b'non-cached changesets', changesmissingdist))
>
> changesratiodist = distribution(changesratio)
> - repo.ui.write(fmtdist('ratio of cached changesets', changesratiodist))
> + repo.ui.write(fmtdist(b'ratio of cached changesets', changesratiodist))
>
> bundlecountdist = distribution(bundlecount)
> - repo.ui.write(fmtdist('bundle count', bundlecountdist))
> + repo.ui.write(fmtdist(b'bundle count', bundlecountdist))
>
> rangesratiodist = distribution(rangesratio)
> - repo.ui.write(fmtdist('ratio of cached bundles', rangesratiodist))
> + repo.ui.write(fmtdist(b'ratio of cached bundles', rangesratiodist))
>
> - repo.ui.write('changesets served:\n')
> - repo.ui.write(' total: %7d\n' % totalchanges)
> - repo.ui.write(' from cache: %7d (%2d%%)\n'
> + repo.ui.write(b'changesets served:\n')
> + repo.ui.write(b' total: %7d\n' % totalchanges)
> + repo.ui.write(b' from cache: %7d (%2d%%)\n'
> % (totalcached, (totalcached * 100 // totalchanges)))
> - repo.ui.write(' bundle: %7d\n' % sum(bundlecount))
> + repo.ui.write(b' bundle: %7d\n' % sum(bundlecount))
>
> cachedsizesdist = distribution(cachedsizes)
> - repo.ui.write(fmtdist('size of cached bundles', cachedsizesdist))
> + repo.ui.write(fmtdist(b'size of cached bundles', cachedsizesdist))
>
> cachedhitsdist = distribution(cachedhits)
> - repo.ui.write(fmtdist('hit on cached bundles', cachedhitsdist))
> + repo.ui.write(fmtdist(b'hit on cached bundles', cachedhitsdist))
>
> def takeonesample(repo, revs):
> node = repo.changelog.node
> pulled = random.sample(revs, max(4, len(revs) // 1000))
> - pulled = repo.revs('%ld::%ld', pulled, pulled)
> + pulled = repo.revs(b'%ld::%ld', pulled, pulled)
> nodes = [node(r) for r in pulled]
> return outgoingfromnodes(repo, nodes)
>
> @@ -600,14 +600,14 @@
> data.sort()
> length = len(data)
> return {
> - 'min': data[0],
> - '10%': data[length // 10],
> - '25%': data[length // 4],
> - '50%': data[length // 2],
> - '75%': data[(length // 4) * 3],
> - '90%': data[(length // 10) * 9],
> - '95%': data[(length // 20) * 19],
> - 'max': data[-1],
> + b'min': data[0],
> + b'10%': data[length // 10],
> + b'25%': data[length // 4],
> + b'50%': data[length // 2],
> + b'75%': data[(length // 4) * 3],
> + b'90%': data[(length // 10) * 9],
> + b'95%': data[(length // 20) * 19],
> + b'max': data[-1],
> }
>
> STATSFORMAT = """{name}:
> diff --git a/hgext3rd/serverminitopic.py b/hgext3rd/serverminitopic.py
> --- a/hgext3rd/serverminitopic.py
> +++ b/hgext3rd/serverminitopic.py
> @@ -29,11 +29,11 @@
> except ImportError: # <= hg-4.5
> from mercurial import wireprotov1server as wireproto
>
> -if util.safehasattr(registrar, 'configitem'):
> +if util.safehasattr(registrar, b'configitem'):
>
> configtable = {}
> configitem = registrar.configitem(configtable)
> - configitem('experimental', 'server-mini-topic',
> + configitem(b'experimental', b'server-mini-topic',
> default=False,
> )
>
> @@ -44,7 +44,7 @@
> """
> enabled = getattr(repo, '_hasminitopic', None)
> if enabled is None:
> - enabled = (repo.ui.configbool('experimental', 'server-mini-topic')
> + enabled = (repo.ui.configbool(b'experimental', b'server-mini-topic')
> and not repo.publishing())
> repo._hasminitopic = enabled
> return enabled
> @@ -54,10 +54,10 @@
> def topicbranch(orig, self):
> branch = orig(self)
> if hasminitopic(self._repo) and self.phase():
> - topic = self._changeset.extra.get('topic')
> + topic = self._changeset.extra.get(b'topic')
> if topic is not None:
> topic = encoding.tolocal(topic)
> - branch = '%s:%s' % (branch, topic)
> + branch = b'%s:%s' % (branch, topic)
> return branch
>
> ### avoid caching topic data in rev-branch-cache
> @@ -67,7 +67,7 @@
>
> def _init__(self, *args, **kwargs):
> super(revbranchcacheoverlay, self).__init__(*args, **kwargs)
> - if 'branchinfo' in vars(self):
> + if b'branchinfo' in vars(self):
> del self.branchinfo
>
> def branchinfo(self, rev, changelog=None):
> @@ -95,7 +95,7 @@
> class topicawarerbc(revbranchcacheoverlay, cache.__class__):
> pass
> cache.__class__ = topicawarerbc
> - if 'branchinfo' in vars(cache):
> + if b'branchinfo' in vars(cache):
> del cache.branchinfo
> self._revbranchcache = cache
> return self._revbranchcache
> @@ -120,7 +120,7 @@
> if revs:
> s = hashlib.sha1()
> for rev in revs:
> - s.update('%s;' % rev)
> + s.update(b'%s;' % rev)
> key = s.digest()
> return key
>
> @@ -138,8 +138,8 @@
> branchmap.branchcache = previous
>
> _publiconly = set([
> - 'base',
> - 'immutable',
> + b'base',
> + b'immutable',
> ])
>
> def mighttopic(repo):
> @@ -159,7 +159,7 @@
>
> def copy(self):
> """return an deep copy of the branchcache object"""
> - if util.safehasattr(self, '_entries'):
> + if util.safehasattr(self, b'_entries'):
> _entries = self._entries
> else:
> # hg <= 4.9 (624d6683c705, b137a6793c51)
> @@ -216,7 +216,7 @@
> def wireprotocaps(orig, repo, proto):
> caps = orig(repo, proto)
> if hasminitopic(repo):
> - caps.append('topics')
> + caps.append(b'topics')
> return caps
>
> # wrap the necessary bit
> @@ -234,13 +234,13 @@
> assert issubclass(current, new), (current, new, targetclass)
>
> def uisetup(ui):
> - wrapclass(branchmap, 'branchcache', _topiccache)
> + wrapclass(branchmap, b'branchcache', _topiccache)
> try:
> # Mercurial 4.8 and older
> - extensions.wrapfunction(branchmap, 'read', wrapread)
> + extensions.wrapfunction(branchmap, b'read', wrapread)
> except AttributeError:
> # Mercurial 4.9; branchcache.fromfile now takes care of this
> # which is alredy defined on _topiccache
> pass
> - extensions.wrapfunction(wireproto, '_capabilities', wireprotocaps)
> - extensions.wrapfunction(context.changectx, 'branch', topicbranch)
> + extensions.wrapfunction(wireproto, b'_capabilities', wireprotocaps)
> + extensions.wrapfunction(context.changectx, b'branch', topicbranch)
> diff --git a/hgext3rd/topic/__init__.py b/hgext3rd/topic/__init__.py
> --- a/hgext3rd/topic/__init__.py
> +++ b/hgext3rd/topic/__init__.py
> @@ -159,65 +159,65 @@
>
> cmdtable = {}
> command = registrar.command(cmdtable)
> -colortable = {'topic.active': 'green',
> - 'topic.list.unstablecount': 'red',
> - 'topic.list.headcount.multiple': 'yellow',
> - 'topic.list.behindcount': 'cyan',
> - 'topic.list.behinderror': 'red',
> - 'stack.index': 'yellow',
> - 'stack.index.base': 'none dim',
> - 'stack.desc.base': 'none dim',
> - 'stack.shortnode.base': 'none dim',
> - 'stack.state.base': 'dim',
> - 'stack.state.clean': 'green',
> - 'stack.index.current': 'cyan', # random pick
> - 'stack.state.current': 'cyan bold', # random pick
> - 'stack.desc.current': 'cyan', # random pick
> - 'stack.shortnode.current': 'cyan', # random pick
> - 'stack.state.orphan': 'red',
> - 'stack.state.content-divergent': 'red',
> - 'stack.state.phase-divergent': 'red',
> - 'stack.summary.behindcount': 'cyan',
> - 'stack.summary.behinderror': 'red',
> - 'stack.summary.headcount.multiple': 'yellow',
> +colortable = {b'topic.active': b'green',
> + b'topic.list.unstablecount': b'red',
> + b'topic.list.headcount.multiple': b'yellow',
> + b'topic.list.behindcount': b'cyan',
> + b'topic.list.behinderror': b'red',
> + b'stack.index': b'yellow',
> + b'stack.index.base': b'none dim',
> + b'stack.desc.base': b'none dim',
> + b'stack.shortnode.base': b'none dim',
> + b'stack.state.base': b'dim',
> + b'stack.state.clean': b'green',
> + b'stack.index.current': b'cyan', # random pick
> + b'stack.state.current': b'cyan bold', # random pick
> + b'stack.desc.current': b'cyan', # random pick
> + b'stack.shortnode.current': b'cyan', # random pick
> + b'stack.state.orphan': b'red',
> + b'stack.state.content-divergent': b'red',
> + b'stack.state.phase-divergent': b'red',
> + b'stack.summary.behindcount': b'cyan',
> + b'stack.summary.behinderror': b'red',
> + b'stack.summary.headcount.multiple': b'yellow',
> # default color to help log output and thg
> # (first pick I could think off, update as needed
> - 'log.topic': 'green_background',
> - 'topic.active': 'green',
> - }
> + b'log.topic': b'green_background',
> + b'topic.active': b'green',
> + }
>
> -__version__ = '0.15.0'
> +__version__ = b'0.15.0'
>
> -testedwith = '4.5.2 4.6.2 4.7 4.8 4.9 5.0'
> -minimumhgversion = '4.5'
> -buglink = 'https://bz.mercurial-scm.org/'
> +testedwith = b'4.5.2 4.6.2 4.7 4.8 4.9 5.0'
> +minimumhgversion = b'4.5'
> +buglink = b'https://bz.mercurial-scm.org/'
>
> -if util.safehasattr(registrar, 'configitem'):
> +if util.safehasattr(registrar, b'configitem'):
>
> from mercurial import configitems
>
> configtable = {}
> configitem = registrar.configitem(configtable)
>
> - configitem('experimental', 'enforce-topic',
> + configitem(b'experimental', b'enforce-topic',
> default=False,
> )
> - configitem('experimental', 'enforce-single-head',
> + configitem(b'experimental', b'enforce-single-head',
> default=False,
> )
> - configitem('experimental', 'topic-mode',
> + configitem(b'experimental', b'topic-mode',
> default=None,
> )
> - configitem('experimental', 'topic.publish-bare-branch',
> + configitem(b'experimental', b'topic.publish-bare-branch',
> default=False,
> )
> - configitem('experimental', 'topic.allow-publish',
> + configitem(b'experimental', b'topic.allow-publish',
> default=configitems.dynamicdefault,
> )
> - configitem('_internal', 'keep-topic',
> + configitem(b'_internal', b'keep-topic',
> default=False,
> )
> - configitem('experimental', 'topic-mode.server',
> + configitem(b'experimental', b'topic-mode.server',
> default=configitems.dynamicdefault,
> )
>
> @@ -228,25 +228,25 @@
> # nobody else did so far.
> from mercurial import configitems
> extraitem = functools.partial(configitems._register, ui._knownconfig)
> - if ('experimental' not in ui._knownconfig
> - or not ui._knownconfig['experimental'].get('thg.displaynames')):
> - extraitem('experimental', 'thg.displaynames',
> + if (b'experimental' not in ui._knownconfig
> + or not ui._knownconfig[b'experimental'].get(b'thg.displaynames')):
> + extraitem(b'experimental', b'thg.displaynames',
> default=None,
> )
> - if ('devel' not in ui._knownconfig
> - or not ui._knownconfig['devel'].get('random')):
> - extraitem('devel', 'randomseed',
> + if (b'devel' not in ui._knownconfig
> + or not ui._knownconfig[b'devel'].get(b'random')):
> + extraitem(b'devel', b'randomseed',
> default=None,
> )
>
> # we need to do old style declaration for <= 4.5
> templatekeyword = registrar.templatekeyword()
> -post45template = 'requires=' in templatekeyword.__doc__
> +post45template = b'requires=' in templatekeyword.__doc__
>
> def _contexttopic(self, force=False):
> if not (force or self.mutable()):
> - return ''
> - return self.extra().get(constants.extrakey, '')
> + return b''
> + return self.extra().get(constants.extrakey, b'')
> context.basectx.topic = _contexttopic
>
> def _contexttopicidx(self):
> @@ -274,38 +274,38 @@
> idx = int(name[1:])
> tname = topic = repo.currenttopic
> if topic:
> - ttype = 'topic'
> + ttype = b'topic'
> revs = list(stack.stack(repo, topic=topic))
> else:
> - ttype = 'branch'
> + ttype = b'branch'
> tname = branch = repo[None].branch()
> revs = list(stack.stack(repo, branch=branch))
> elif topicrev.match(name):
> idx = int(name[1:])
> - ttype = 'topic'
> + ttype = b'topic'
> tname = topic = repo.currenttopic
> if not tname:
> - raise error.Abort(_('cannot resolve "%s": no active topic') % name)
> + raise error.Abort(_(b'cannot resolve "%s": no active topic') % name)
> revs = list(stack.stack(repo, topic=topic))
>
> if revs is not None:
> try:
> r = revs[idx]
> except IndexError:
> - if ttype == 'topic':
> - msg = _('cannot resolve "%s": %s "%s" has only %d changesets')
> - elif ttype == 'branch':
> - msg = _('cannot resolve "%s": %s "%s" has only %d non-public changesets')
> + if ttype == b'topic':
> + msg = _(b'cannot resolve "%s": %s "%s" has only %d changesets')
> + elif ttype == b'branch':
> + msg = _(b'cannot resolve "%s": %s "%s" has only %d non-public changesets')
> raise error.Abort(msg % (name, ttype, tname, len(revs) - 1))
> # t0 or s0 can be None
> if r == -1 and idx == 0:
> - msg = _('the %s "%s" has no %s')
> + msg = _(b'the %s "%s" has no %s')
> raise error.Abort(msg % (ttype, tname, name))
> return [repo[r].node()]
> if name not in repo.topics:
> return []
> node = repo.changelog.node
> - return [node(rev) for rev in repo.revs('topic(%s)', name)]
> + return [node(rev) for rev in repo.revs(b'topic(%s)', name)]
>
> def _nodemap(repo, node):
> ctx = repo[node]
> @@ -320,47 +320,47 @@
> topicmap.modsetup(ui)
> setupimportexport(ui)
>
> - extensions.afterloaded('rebase', _fixrebase)
> + extensions.afterloaded(b'rebase', _fixrebase)
>
> flow.installpushflag(ui)
>
> - entry = extensions.wrapcommand(commands.table, 'commit', commitwrap)
> - entry[1].append(('t', 'topic', '',
> - _("use specified topic"), _('TOPIC')))
> + entry = extensions.wrapcommand(commands.table, b'commit', commitwrap)
> + entry[1].append((b't', b'topic', b'',
> + _(b"use specified topic"), _(b'TOPIC')))
>
> - entry = extensions.wrapcommand(commands.table, 'push', pushoutgoingwrap)
> - entry[1].append(('t', 'topic', '',
> - _("topic to push"), _('TOPIC')))
> + entry = extensions.wrapcommand(commands.table, b'push', pushoutgoingwrap)
> + entry[1].append((b't', b'topic', b'',
> + _(b"topic to push"), _(b'TOPIC')))
>
> - entry = extensions.wrapcommand(commands.table, 'outgoing',
> + entry = extensions.wrapcommand(commands.table, b'outgoing',
> pushoutgoingwrap)
> - entry[1].append(('t', 'topic', '',
> - _("topic to push"), _('TOPIC')))
> + entry[1].append((b't', b'topic', b'',
> + _(b"topic to push"), _(b'TOPIC')))
>
> - extensions.wrapfunction(cmdutil, 'buildcommittext', committextwrap)
> - extensions.wrapfunction(merge, 'update', mergeupdatewrap)
> + extensions.wrapfunction(cmdutil, b'buildcommittext', committextwrap)
> + extensions.wrapfunction(merge, b'update', mergeupdatewrap)
> # We need to check whether t0 or b0 or s0 is passed to override the default update
> # behaviour of changing topic and I can't find a better way
> # to do that as scmutil.revsingle returns the rev number and hence we can't
> # plug into logic for this into mergemod.update().
> - extensions.wrapcommand(commands.table, 'update', checkt0)
> + extensions.wrapcommand(commands.table, b'update', checkt0)
>
> try:
> - evolve = extensions.find('evolve')
> - extensions.wrapfunction(evolve.rewriteutil, "presplitupdate",
> + evolve = extensions.find(b'evolve')
> + extensions.wrapfunction(evolve.rewriteutil, b"presplitupdate",
> presplitupdatetopic)
> except (KeyError, AttributeError):
> pass
>
> - cmdutil.summaryhooks.add('topic', summaryhook)
> + cmdutil.summaryhooks.add(b'topic', summaryhook)
>
> if not post45template:
> - templatekw.keywords['topic'] = topickw
> - templatekw.keywords['topicidx'] = topicidxkw
> + templatekw.keywords[b'topic'] = topickw
> + templatekw.keywords[b'topicidx'] = topicidxkw
> # Wrap workingctx extra to return the topic name
> - extensions.wrapfunction(context.workingctx, '__init__', wrapinit)
> + extensions.wrapfunction(context.workingctx, b'__init__', wrapinit)
> # Wrap changelog.add to drop empty topic
> - extensions.wrapfunction(changelog.changelog, 'add', wrapadd)
> + extensions.wrapfunction(changelog.changelog, b'add', wrapadd)
>
> def reposetup(ui, repo):
> if not isinstance(repo, localrepo.localrepository):
> @@ -368,9 +368,9 @@
>
> repo = repo.unfiltered()
>
> - if repo.ui.config('experimental', 'thg.displaynames') is None:
> - repo.ui.setconfig('experimental', 'thg.displaynames', 'topics',
> - source='topic-extension')
> + if repo.ui.config(b'experimental', b'thg.displaynames') is None:
> + repo.ui.setconfig(b'experimental', b'thg.displaynames', b'topics',
> + source=b'topic-extension')
>
> class topicrepo(repo.__class__):
>
> @@ -379,15 +379,15 @@
>
> def _restrictcapabilities(self, caps):
> caps = super(topicrepo, self)._restrictcapabilities(caps)
> - caps.add('topics')
> + caps.add(b'topics')
> return caps
>
> def commit(self, *args, **kwargs):
> - backup = self.ui.backupconfig('ui', 'allowemptycommit')
> + backup = self.ui.backupconfig(b'ui', b'allowemptycommit')
> try:
> - if self.currenttopic != self['.'].topic():
> + if self.currenttopic != self[b'.'].topic():
> # bypass the core "nothing changed" logic
> - self.ui.setconfig('ui', 'allowemptycommit', True)
> + self.ui.setconfig(b'ui', b'allowemptycommit', True)
> return super(topicrepo, self).commit(*args, **kwargs)
> finally:
> self.ui.restoreconfig(backup)
> @@ -403,7 +403,7 @@
> if current:
> ctx.extra()[constants.extrakey] = current
> if (isinstance(ctx, context.memctx)
> - and ctx.extra().get('amend_source')
> + and ctx.extra().get(b'amend_source')
> and ctx.topic()
> and not self.currenttopic):
> # we are amending and need to remove a topic
> @@ -414,16 +414,16 @@
> def topics(self):
> if self._topics is not None:
> return self._topics
> - topics = set(['', self.currenttopic])
> - for c in self.set('not public()'):
> + topics = set([b'', self.currenttopic])
> + for c in self.set(b'not public()'):
> topics.add(c.topic())
> - topics.remove('')
> + topics.remove(b'')
> self._topics = topics
> return topics
>
> @property
> def currenttopic(self):
> - return self.vfs.tryread('topic')
> + return self.vfs.tryread(b'topic')
>
> # overwritten at the instance level by topicmap.py
> _autobranchmaptopic = True
> @@ -454,12 +454,12 @@
> def transaction(self, desc, *a, **k):
> ctr = self.currenttransaction()
> tr = super(topicrepo, self).transaction(desc, *a, **k)
> - if desc in ('strip', 'repair') or ctr is not None:
> + if desc in (b'strip', b'repair') or ctr is not None:
> return tr
>
> reporef = weakref.ref(self)
> - if self.ui.configbool('experimental', 'enforce-single-head'):
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + if self.ui.configbool(b'experimental', b'enforce-single-head'):
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> origvalidator = tr.validator
> else:
> origvalidator = tr._validator
> @@ -469,16 +469,16 @@
> flow.enforcesinglehead(repo, tr2)
> origvalidator(tr2)
>
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> tr.validator = validator
> else:
> tr._validator = validator
>
> - topicmodeserver = self.ui.config('experimental',
> - 'topic-mode.server', 'ignore')
> - ispush = (desc.startswith('push') or desc.startswith('serve'))
> - if (topicmodeserver != 'ignore' and ispush):
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + topicmodeserver = self.ui.config(b'experimental',
> + b'topic-mode.server', b'ignore')
> + ispush = (desc.startswith(b'push') or desc.startswith(b'serve'))
> + if (topicmodeserver != b'ignore' and ispush):
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> origvalidator = tr.validator
> else:
> origvalidator = tr._validator
> @@ -487,14 +487,14 @@
> repo = reporef()
> flow.rejectuntopicedchangeset(repo, tr2)
> return origvalidator(tr2)
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> tr.validator = validator
> else:
> tr._validator = validator
>
> - elif (self.ui.configbool('experimental', 'topic.publish-bare-branch')
> - and (desc.startswith('push')
> - or desc.startswith('serve'))
> + elif (self.ui.configbool(b'experimental', b'topic.publish-bare-branch')
> + and (desc.startswith(b'push')
> + or desc.startswith(b'serve'))
> ):
> origclose = tr.close
> trref = weakref.ref(tr)
> @@ -505,11 +505,11 @@
> flow.publishbarebranch(repo, tr2)
> origclose()
> tr.close = close
> - allow_publish = self.ui.configbool('experimental',
> - 'topic.allow-publish',
> + allow_publish = self.ui.configbool(b'experimental',
> + b'topic.allow-publish',
> True)
> if not allow_publish:
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> origvalidator = tr.validator
> else:
> origvalidator = tr._validator
> @@ -518,7 +518,7 @@
> repo = reporef()
> flow.reject_publish(repo, tr2)
> return origvalidator(tr2)
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> tr.validator = validator
> else:
> tr._validator = validator
> @@ -537,62 +537,62 @@
> csetcount = stack.stack(repo, topic=ct).changesetcount
> empty = csetcount == 0
> if empty and not ctwasempty:
> - ui.status('active topic %r is now empty\n' % ct)
> + ui.status(b'active topic %r is now empty\n' % ct)
> trnames = getattr(tr, 'names', getattr(tr, '_names', ()))
> - if ('phase' in trnames
> - or any(n.startswith('push-response')
> + if (b'phase' in trnames
> + or any(n.startswith(b'push-response')
> for n in trnames)):
> - ui.status(_("(use 'hg topic --clear' to clear it if needed)\n"))
> - hint = _("(see 'hg help topics' for more information)\n")
> + ui.status(_(b"(use 'hg topic --clear' to clear it if needed)\n"))
> + hint = _(b"(see 'hg help topics' for more information)\n")
> if ctwasempty and not empty:
> if csetcount == 1:
> - msg = _('active topic %r grew its first changeset\n%s')
> + msg = _(b'active topic %r grew its first changeset\n%s')
> ui.status(msg % (ct, hint))
> else:
> - msg = _('active topic %r grew its %s first changesets\n%s')
> + msg = _(b'active topic %r grew its %s first changesets\n%s')
> ui.status(msg % (ct, csetcount, hint))
>
> - tr.addpostclose('signalcurrenttopicempty', currenttopicempty)
> + tr.addpostclose(b'signalcurrenttopicempty', currenttopicempty)
> return tr
>
> repo.__class__ = topicrepo
> repo._topics = None
> - if util.safehasattr(repo, 'names'):
> + if util.safehasattr(repo, b'names'):
> repo.names.addnamespace(namespaces.namespace(
> - 'topics', 'topic', namemap=_namemap, nodemap=_nodemap,
> + b'topics', b'topic', namemap=_namemap, nodemap=_nodemap,
> listnames=lambda repo: repo.topics))
>
> if post45template:
> - @templatekeyword('topic', requires={'ctx'})
> + @templatekeyword(b'topic', requires={b'ctx'})
> def topickw(context, mapping):
> """:topic: String. The topic of the changeset"""
> - ctx = context.resource(mapping, 'ctx')
> + ctx = context.resource(mapping, b'ctx')
> return ctx.topic()
>
> - @templatekeyword('topicidx', requires={'ctx'})
> + @templatekeyword(b'topicidx', requires={b'ctx'})
> def topicidxkw(context, mapping):
> """:topicidx: Integer. Index of the changeset as a stack alias"""
> - ctx = context.resource(mapping, 'ctx')
> + ctx = context.resource(mapping, b'ctx')
> return ctx.topicidx()
> else:
> def topickw(**args):
> """:topic: String. The topic of the changeset"""
> - return args['ctx'].topic()
> + return args[b'ctx'].topic()
>
> def topicidxkw(**args):
> """:topicidx: Integer. Index of the changeset as a stack alias"""
> - return args['ctx'].topicidx()
> + return args[b'ctx'].topicidx()
>
> def wrapinit(orig, self, repo, *args, **kwargs):
> orig(self, repo, *args, **kwargs)
> if not hastopicext(repo):
> return
> if constants.extrakey not in self._extra:
> - if getattr(repo, 'currenttopic', ''):
> + if getattr(repo, 'currenttopic', b''):
> self._extra[constants.extrakey] = repo.currenttopic
> else:
> # Empty key will be dropped from extra by another hack at the changegroup level
> - self._extra[constants.extrakey] = ''
> + self._extra[constants.extrakey] = b''
>
> def wrapadd(orig, cl, manifest, files, desc, transaction, p1, p2, user,
> date=None, extra=None, p1copies=None, p2copies=None,
> @@ -603,28 +603,28 @@
> # hg <= 4.9 (0e41f40b01cc)
> kwargs = {}
> if p1copies is not None:
> - kwargs['p1copies'] = p1copies
> + kwargs[b'p1copies'] = p1copies
> if p2copies is not None:
> - kwargs['p2copies'] = p2copies
> + kwargs[b'p2copies'] = p2copies
> # hg <= 5.0 (f385ba70e4af)
> if filesadded is not None:
> - kwargs['filesadded'] = filesadded
> + kwargs[b'filesadded'] = filesadded
> if filesremoved is not None:
> - kwargs['filesremoved'] = filesremoved
> + kwargs[b'filesremoved'] = filesremoved
> return orig(cl, manifest, files, desc, transaction, p1, p2, user,
> date=date, extra=extra, **kwargs)
>
> # revset predicates are automatically registered at loading via this symbol
> revsetpredicate = topicrevset.revsetpredicate
>
> - at command('topics', [
> - ('', 'clear', False, 'clear active topic if any'),
> - ('r', 'rev', [], 'revset of existing revisions', _('REV')),
> - ('l', 'list', False, 'show the stack of changeset in the topic'),
> - ('', 'age', False, 'show when you last touched the topics'),
> - ('', 'current', None, 'display the current topic only'),
> + at command(b'topics', [
> + (b'', b'clear', False, b'clear active topic if any'),
> + (b'r', b'rev', [], b'revset of existing revisions', _(b'REV')),
> + (b'l', b'list', False, b'show the stack of changeset in the topic'),
> + (b'', b'age', False, b'show when you last touched the topics'),
> + (b'', b'current', None, b'display the current topic only'),
> ] + commands.formatteropts,
> - _('hg topics [TOPIC]'))
> + _(b'hg topics [TOPIC]'))
> def topics(ui, repo, topic=None, **opts):
> """View current topic, set current topic, change topic for a set of revisions, or see all topics.
>
> @@ -662,20 +662,20 @@
>
> The --verbose version of this command display various information on the state of each topic."""
>
> - clear = opts.get('clear')
> - list = opts.get('list')
> - rev = opts.get('rev')
> - current = opts.get('current')
> - age = opts.get('age')
> + clear = opts.get(b'clear')
> + list = opts.get(b'list')
> + rev = opts.get(b'rev')
> + current = opts.get(b'current')
> + age = opts.get(b'age')
>
> if current and topic:
> - raise error.Abort(_("cannot use --current when setting a topic"))
> + raise error.Abort(_(b"cannot use --current when setting a topic"))
> if current and clear:
> - raise error.Abort(_("cannot use --current and --clear"))
> + raise error.Abort(_(b"cannot use --current and --clear"))
> if clear and topic:
> - raise error.Abort(_("cannot use --clear when setting a topic"))
> + raise error.Abort(_(b"cannot use --clear when setting a topic"))
> if age and topic:
> - raise error.Abort(_("cannot use --age while setting a topic"))
> + raise error.Abort(_(b"cannot use --age while setting a topic"))
>
> touchedrevs = set()
> if rev:
> @@ -684,49 +684,49 @@
> if topic:
> topic = topic.strip()
> if not topic:
> - raise error.Abort(_("topic name cannot consist entirely of whitespaces"))
> + raise error.Abort(_(b"topic name cannot consist entirely of whitespaces"))
> # Have some restrictions on the topic name just like bookmark name
> - scmutil.checknewlabel(repo, topic, 'topic')
> + scmutil.checknewlabel(repo, topic, b'topic')
>
> rmatch = re.match(br'[-_.\w]+', topic)
> if not rmatch or rmatch.group(0) != topic:
> - helptxt = _("topic names can only consist of alphanumeric, '-'"
> - " '_' and '.' characters")
> - raise error.Abort(_("invalid topic name: '%s'") % topic, hint=helptxt)
> + helptxt = _(b"topic names can only consist of alphanumeric, '-'"
> + b" '_' and '.' characters")
> + raise error.Abort(_(b"invalid topic name: '%s'") % topic, hint=helptxt)
>
> if list:
> - ui.pager('topics')
> + ui.pager(b'topics')
> if clear or rev:
> - raise error.Abort(_("cannot use --clear or --rev with --list"))
> + raise error.Abort(_(b"cannot use --clear or --rev with --list"))
> if not topic:
> topic = repo.currenttopic
> if not topic:
> - raise error.Abort(_('no active topic to list'))
> + raise error.Abort(_(b'no active topic to list'))
> return stack.showstack(ui, repo, topic=topic, opts=opts)
>
> if touchedrevs:
> if not obsolete.isenabled(repo, obsolete.createmarkersopt):
> - raise error.Abort(_('must have obsolete enabled to change topics'))
> + raise error.Abort(_(b'must have obsolete enabled to change topics'))
> if clear:
> topic = None
> - elif opts.get('current'):
> + elif opts.get(b'current'):
> topic = repo.currenttopic
> elif not topic:
> - raise error.Abort('changing topic requires a topic name or --clear')
> - if repo.revs('%ld and public()', touchedrevs):
> - raise error.Abort("can't change topic of a public change")
> + raise error.Abort(b'changing topic requires a topic name or --clear')
> + if repo.revs(b'%ld and public()', touchedrevs):
> + raise error.Abort(b"can't change topic of a public change")
> wl = lock = txn = None
> try:
> wl = repo.wlock()
> lock = repo.lock()
> - txn = repo.transaction('rewrite-topics')
> + txn = repo.transaction(b'rewrite-topics')
> rewrote = _changetopics(ui, repo, touchedrevs, topic)
> txn.close()
> if topic is None:
> - ui.status('cleared topic on %d changesets\n' % rewrote)
> + ui.status(b'cleared topic on %d changesets\n' % rewrote)
> else:
> - ui.status('changed topic on %d changesets to "%s"\n' % (rewrote,
> - topic))
> + ui.status(b'changed topic on %d changesets to "%s"\n' % (rewrote,
> + topic))
> finally:
> lockmod.release(txn, lock, wl)
> repo.invalidate()
> @@ -737,37 +737,37 @@
> if ct:
> st = stack.stack(repo, topic=ct)
> if not st:
> - ui.status(_('clearing empty topic "%s"\n') % ct)
> + ui.status(_(b'clearing empty topic "%s"\n') % ct)
> return _changecurrenttopic(repo, None)
>
> if topic:
> if not ct:
> - ui.status(_('marked working directory as topic: %s\n') % topic)
> + ui.status(_(b'marked working directory as topic: %s\n') % topic)
> return _changecurrenttopic(repo, topic)
>
> - ui.pager('topics')
> + ui.pager(b'topics')
> # `hg topic --current`
> ret = 0
> if current and not ct:
> - ui.write_err(_('no active topic\n'))
> + ui.write_err(_(b'no active topic\n'))
> ret = 1
> elif current:
> - fm = ui.formatter('topic', opts)
> - namemask = '%s\n'
> - label = 'topic.active'
> + fm = ui.formatter(b'topic', opts)
> + namemask = b'%s\n'
> + label = b'topic.active'
> fm.startitem()
> - fm.write('topic', namemask, ct, label=label)
> + fm.write(b'topic', namemask, ct, label=label)
> fm.end()
> else:
> _listtopics(ui, repo, opts)
> return ret
>
> - at command('stack', [
> - ('c', 'children', None,
> - _('display data about children outside of the stack'))
> + at command(b'stack', [
> + (b'c', b'children', None,
> + _(b'display data about children outside of the stack'))
> ] + commands.formatteropts,
> - _('hg stack [TOPIC]'))
> -def cmdstack(ui, repo, topic='', **opts):
> + _(b'hg stack [TOPIC]'))
> +def cmdstack(ui, repo, topic=b'', **opts):
> """list all changesets in a topic and other information
>
> List the current topic by default.
> @@ -781,30 +781,30 @@
> topic = repo.currenttopic
> if topic is None:
> branch = repo[None].branch()
> - ui.pager('stack')
> + ui.pager(b'stack')
> return stack.showstack(ui, repo, branch=branch, topic=topic, opts=opts)
>
> - at command('debugcb|debugconvertbookmark', [
> - ('b', 'bookmark', '', _('bookmark to convert to topic')),
> - ('', 'all', None, _('convert all bookmarks to topics')),
> + at command(b'debugcb|debugconvertbookmark', [
> + (b'b', b'bookmark', b'', _(b'bookmark to convert to topic')),
> + (b'', b'all', None, _(b'convert all bookmarks to topics')),
> ],
> - _('[-b BOOKMARK] [--all]'))
> + _(b'[-b BOOKMARK] [--all]'))
> def debugconvertbookmark(ui, repo, **opts):
> """Converts a bookmark to a topic with the same name.
> """
>
> - bookmark = opts.get('bookmark')
> - convertall = opts.get('all')
> + bookmark = opts.get(b'bookmark')
> + convertall = opts.get(b'all')
>
> if convertall and bookmark:
> - raise error.Abort(_("cannot use '--all' and '-b' together"))
> + raise error.Abort(_(b"cannot use '--all' and '-b' together"))
> if not (convertall or bookmark):
> - raise error.Abort(_("you must specify either '--all' or '-b'"))
> + raise error.Abort(_(b"you must specify either '--all' or '-b'"))
>
> bmstore = repo._bookmarks
>
> nodetobook = {}
> - for book, revnode in bmstore.iteritems():
> + for book, revnode in bmstore.items():
> if nodetobook.get(revnode):
> nodetobook[revnode].append(book)
> else:
> @@ -824,28 +824,28 @@
> try:
> node = bmstore[bookmark]
> except KeyError:
> - raise error.Abort(_("no such bookmark exists: '%s'") % bookmark)
> + raise error.Abort(_(b"no such bookmark exists: '%s'") % bookmark)
>
> revnum = repo[node].rev()
> if len(nodetobook[node]) > 1:
> - ui.status(_("skipping revision '%d' as it has multiple bookmarks "
> - "on it\n") % revnum)
> + ui.status(_(b"skipping revision '%d' as it has multiple bookmarks "
> + b"on it\n") % revnum)
> return
> targetrevs = _findconvertbmarktopic(repo, bookmark)
> if targetrevs:
> actions[(bookmark, revnum)] = targetrevs
>
> elif convertall:
> - for bmark, revnode in sorted(bmstore.iteritems()):
> + for bmark, revnode in sorted(bmstore.items()):
> revnum = repo[revnode].rev()
> if revnum in skipped:
> continue
> if len(nodetobook[revnode]) > 1:
> - ui.status(_("skipping '%d' as it has multiple bookmarks on"
> - " it\n") % revnum)
> + ui.status(_(b"skipping '%d' as it has multiple bookmarks on"
> + b" it\n") % revnum)
> skipped.append(revnum)
> continue
> - if bmark == '@':
> + if bmark == b'@':
> continue
> targetrevs = _findconvertbmarktopic(repo, bmark)
> if targetrevs:
> @@ -853,8 +853,8 @@
>
> if actions:
> try:
> - tr = repo.transaction('debugconvertbookmark')
> - for ((bmark, revnum), targetrevs) in sorted(actions.iteritems()):
> + tr = repo.transaction(b'debugconvertbookmark')
> + for ((bmark, revnum), targetrevs) in sorted(actions.items()):
> _applyconvertbmarktopic(ui, repo, targetrevs, revnum, bmark, tr)
> tr.close()
> finally:
> @@ -901,9 +901,9 @@
> # changeset
> if rewrote == 0:
> return
> - ui.status(_('changed topic to "%s" on %d revisions\n') % (bmark,
> + ui.status(_(b'changed topic to "%s" on %d revisions\n') % (bmark,
> rewrote))
> - ui.debug('removing bookmark "%s" from "%d"' % (bmark, old))
> + ui.debug(b'removing bookmark "%s" from "%d"' % (bmark, old))
> bookmarks.delete(repo, tr, [bmark])
>
> def _changecurrenttopic(repo, newtopic):
> @@ -911,11 +911,11 @@
>
> if newtopic:
> with repo.wlock():
> - with repo.vfs.open('topic', 'w') as f:
> + with repo.vfs.open(b'topic', b'w') as f:
> f.write(newtopic)
> else:
> - if repo.vfs.exists('topic'):
> - repo.vfs.unlink('topic')
> + if repo.vfs.exists(b'topic'):
> + repo.vfs.unlink(b'topic')
>
> def _changetopics(ui, repo, revs, newtopic):
> """ Changes topic to newtopic of all the revisions in the revset and return
> @@ -934,8 +934,8 @@
> except error.ManifestLookupError:
> return None
> fixedextra = dict(c.extra())
> - ui.debug('old node id is %s\n' % node.hex(c.node()))
> - ui.debug('origextra: %r\n' % fixedextra)
> + ui.debug(b'old node id is %s\n' % node.hex(c.node()))
> + ui.debug(b'origextra: %r\n' % fixedextra)
> oldtopic = fixedextra.get(constants.extrakey, None)
> if oldtopic == newtopic:
> continue
> @@ -944,16 +944,16 @@
> else:
> fixedextra[constants.extrakey] = newtopic
> fixedextra[constants.changekey] = c.hex()
> - if 'amend_source' in fixedextra:
> + if b'amend_source' in fixedextra:
> # TODO: right now the commitctx wrapper in
> # topicrepo overwrites the topic in extra if
> # amend_source is set to support 'hg commit
> # --amend'. Support for amend should be adjusted
> # to not be so invasive.
> - del fixedextra['amend_source']
> - ui.debug('changing topic of %s from %s to %s\n' % (
> + del fixedextra[b'amend_source']
> + ui.debug(b'changing topic of %s from %s to %s\n' % (
> c, oldtopic, newtopic))
> - ui.debug('fixedextra: %r\n' % fixedextra)
> + ui.debug(b'fixedextra: %r\n' % fixedextra)
> # While changing topic of set of linear commits, make sure that
> # we base our commits on new parent rather than old parent which
> # was obsoleted while changing the topic
> @@ -974,18 +974,18 @@
>
> # phase handling
> commitphase = c.phase()
> - overrides = {('phases', 'new-commit'): commitphase}
> - with repo.ui.configoverride(overrides, 'changetopic'):
> + overrides = {(b'phases', b'new-commit'): commitphase}
> + with repo.ui.configoverride(overrides, b'changetopic'):
> newnode = repo.commitctx(mc)
>
> successors[c.node()] = (newnode,)
> - ui.debug('new node id is %s\n' % node.hex(newnode))
> + ui.debug(b'new node id is %s\n' % node.hex(newnode))
> rewrote += 1
>
> # create obsmarkers and move bookmarks
> # XXX we should be creating marker as we go instead of only at the end,
> # this makes the operations more modulars
> - scmutil.cleanupnodes(repo, successors, 'changetopics')
> + scmutil.cleanupnodes(repo, successors, b'changetopics')
>
> # move the working copy too
> wctx = repo[None]
> @@ -997,13 +997,13 @@
> return rewrote
>
> def _listtopics(ui, repo, opts):
> - fm = ui.formatter('topics', opts)
> + fm = ui.formatter(b'topics', opts)
> activetopic = repo.currenttopic
> - namemask = '%s'
> + namemask = b'%s'
> if repo.topics:
> maxwidth = max(len(t) for t in repo.topics)
> - namemask = '%%-%is' % maxwidth
> - if opts.get('age'):
> + namemask = b'%%-%is' % maxwidth
> + if opts.get(b'age'):
> # here we sort by age and topic name
> topicsdata = sorted(_getlasttouched(repo, repo.topics))
> else:
> @@ -1014,70 +1014,70 @@
> )
> for age, topic, date, user in topicsdata:
> fm.startitem()
> - marker = ' '
> - label = 'topic'
> + marker = b' '
> + label = b'topic'
> active = (topic == activetopic)
> if active:
> - marker = '*'
> - label = 'topic.active'
> + marker = b'*'
> + label = b'topic.active'
> if not ui.quiet:
> # registering the active data is made explicitly later
> - fm.plain(' %s ' % marker, label=label)
> - fm.write('topic', namemask, topic, label=label)
> + fm.plain(b' %s ' % marker, label=label)
> + fm.write(b'topic', namemask, topic, label=label)
> fm.data(active=active)
>
> if ui.quiet:
> - fm.plain('\n')
> + fm.plain(b'\n')
> continue
> - fm.plain(' (')
> + fm.plain(b' (')
> if date:
> if age == -1:
> - timestr = 'empty and active'
> + timestr = b'empty and active'
> else:
> timestr = templatefilters.age(date)
> - fm.write('lasttouched', '%s', timestr, label='topic.list.time')
> + fm.write(b'lasttouched', b'%s', timestr, label=b'topic.list.time')
> if user:
> - fm.write('usertouched', ' by %s', user, label='topic.list.user')
> + fm.write(b'usertouched', b' by %s', user, label=b'topic.list.user')
> if date:
> - fm.plain(', ')
> + fm.plain(b', ')
> data = stack.stack(repo, topic=topic)
> if ui.verbose:
> - fm.write('branches+', 'on branch: %s',
> - '+'.join(data.branches), # XXX use list directly after 4.0 is released
> - label='topic.list.branches')
> + fm.write(b'branches+', b'on branch: %s',
> + b'+'.join(data.branches), # XXX use list directly after 4.0 is released
> + label=b'topic.list.branches')
>
> - fm.plain(', ')
> - fm.write('changesetcount', '%d changesets', data.changesetcount,
> - label='topic.list.changesetcount')
> + fm.plain(b', ')
> + fm.write(b'changesetcount', b'%d changesets', data.changesetcount,
> + label=b'topic.list.changesetcount')
>
> if data.unstablecount:
> - fm.plain(', ')
> - fm.write('unstablecount', '%d unstable',
> + fm.plain(b', ')
> + fm.write(b'unstablecount', b'%d unstable',
> data.unstablecount,
> - label='topic.list.unstablecount')
> + label=b'topic.list.unstablecount')
>
> headcount = len(data.heads)
> if 1 < headcount:
> - fm.plain(', ')
> - fm.write('headcount', '%d heads',
> + fm.plain(b', ')
> + fm.write(b'headcount', b'%d heads',
> headcount,
> - label='topic.list.headcount.multiple')
> + label=b'topic.list.headcount.multiple')
>
> if ui.verbose:
> # XXX we should include the data even when not verbose
>
> behindcount = data.behindcount
> if 0 < behindcount:
> - fm.plain(', ')
> - fm.write('behindcount', '%d behind',
> + fm.plain(b', ')
> + fm.write(b'behindcount', b'%d behind',
> behindcount,
> - label='topic.list.behindcount')
> + label=b'topic.list.behindcount')
> elif -1 == behindcount:
> - fm.plain(', ')
> - fm.write('behinderror', '%s',
> - _('ambiguous destination: %s') % data.behinderror,
> - label='topic.list.behinderror')
> - fm.plain(')\n')
> + fm.plain(b', ')
> + fm.write(b'behinderror', b'%s',
> + _(b'ambiguous destination: %s') % data.behinderror,
> + label=b'topic.list.behinderror')
> + fm.plain(b')\n')
> fm.end()
>
> def _getlasttouched(repo, topics):
> @@ -1090,7 +1090,7 @@
> age = -1
> user = None
> maxtime = (0, 0)
> - trevs = repo.revs("topic(%s)", topic)
> + trevs = repo.revs(b"topic(%s)", topic)
> # Need to check for the time of all changesets in the topic, whether
> # they are obsolete of non-heads
> # XXX: can we just rely on the max rev number for this
> @@ -1107,7 +1107,7 @@
> for marker in obsmarkers:
> rt = marker.date()
> if rt[0] > maxtime[0]:
> - user = marker.metadata().get('user', user)
> + user = marker.metadata().get(b'user', user)
> maxtime = rt
>
> username = stack.parseusername(user)
> @@ -1117,31 +1117,31 @@
> yield (age, topic, maxtime, username)
>
> def summaryhook(ui, repo):
> - t = getattr(repo, 'currenttopic', '')
> + t = getattr(repo, 'currenttopic', b'')
> if not t:
> return
> # i18n: column positioning for "hg summary"
> - ui.write(_("topic: %s\n") % ui.label(t, 'topic.active'))
> + ui.write(_(b"topic: %s\n") % ui.label(t, b'topic.active'))
>
> _validmode = [
> - 'ignore',
> - 'warning',
> - 'enforce',
> - 'enforce-all',
> - 'random',
> - 'random-all',
> + b'ignore',
> + b'warning',
> + b'enforce',
> + b'enforce-all',
> + b'random',
> + b'random-all',
> ]
>
> def _configtopicmode(ui):
> """ Parse the config to get the topicmode
> """
> - topicmode = ui.config('experimental', 'topic-mode')
> + topicmode = ui.config(b'experimental', b'topic-mode')
>
> # Fallback to read enforce-topic
> if topicmode is None:
> - enforcetopic = ui.configbool('experimental', 'enforce-topic')
> + enforcetopic = ui.configbool(b'experimental', b'enforce-topic')
> if enforcetopic:
> - topicmode = "enforce"
> + topicmode = b"enforce"
> if topicmode not in _validmode:
> topicmode = _validmode[0]
>
> @@ -1155,37 +1155,37 @@
> ismergecommit = len(repo[None].parents()) == 2
>
> notopic = not repo.currenttopic
> - mayabort = (topicmode == "enforce" and not ismergecommit)
> - maywarn = (topicmode == "warning"
> - or (topicmode == "enforce" and ismergecommit))
> + mayabort = (topicmode == b"enforce" and not ismergecommit)
> + maywarn = (topicmode == b"warning"
> + or (topicmode == b"enforce" and ismergecommit))
>
> mayrandom = False
> - if topicmode == "random":
> + if topicmode == b"random":
> mayrandom = not ismergecommit
> - elif topicmode == "random-all":
> + elif topicmode == b"random-all":
> mayrandom = True
>
> - if topicmode == 'enforce-all':
> + if topicmode == b'enforce-all':
> ismergecommit = False
> mayabort = True
> maywarn = False
>
> - hint = _("see 'hg help -e topic.topic-mode' for details")
> - if opts.get('topic'):
> - t = opts['topic']
> - with repo.vfs.open('topic', 'w') as f:
> + hint = _(b"see 'hg help -e topic.topic-mode' for details")
> + if opts.get(b'topic'):
> + t = opts[b'topic']
> + with repo.vfs.open(b'topic', b'w') as f:
> f.write(t)
> - elif opts.get('amend'):
> + elif opts.get(b'amend'):
> pass
> elif notopic and mayabort:
> - msg = _("no active topic")
> + msg = _(b"no active topic")
> raise error.Abort(msg, hint=hint)
> elif notopic and maywarn:
> - ui.warn(_("warning: new draft commit without topic\n"))
> + ui.warn(_(b"warning: new draft commit without topic\n"))
> if not ui.quiet:
> - ui.warn(("(%s)\n") % hint)
> + ui.warn((b"(%s)\n") % hint)
> elif notopic and mayrandom:
> - with repo.vfs.open('topic', 'w') as f:
> + with repo.vfs.open(b'topic', b'w') as f:
> f.write(randomname.randomtopicname(ui))
> return orig(ui, repo, *args, **opts)
>
> @@ -1194,18 +1194,18 @@
> if hastopicext(repo):
> t = repo.currenttopic
> if t:
> - ret = ret.replace("\nHG: branch",
> - "\nHG: topic '%s'\nHG: branch" % t)
> + ret = ret.replace(b"\nHG: branch",
> + b"\nHG: topic '%s'\nHG: branch" % t)
> return ret
>
> def pushoutgoingwrap(orig, ui, repo, *args, **opts):
> - if opts.get('topic'):
> - topicrevs = repo.revs('topic(%s) - obsolete()', opts['topic'])
> - opts.setdefault('rev', []).extend(topicrevs)
> + if opts.get(b'topic'):
> + topicrevs = repo.revs(b'topic(%s) - obsolete()', opts[b'topic'])
> + opts.setdefault(b'rev', []).extend(topicrevs)
> return orig(ui, repo, *args, **opts)
>
> def mergeupdatewrap(orig, repo, node, branchmerge, force, *args, **kwargs):
> - matcher = kwargs.get('matcher')
> + matcher = kwargs.get(b'matcher')
> partial = not (matcher is None or matcher.always())
> wlock = repo.wlock()
> isrebase = False
> @@ -1220,37 +1220,37 @@
> # rebased commit. We have explicitly stored in config if rebase is
> # running.
> ot = repo.currenttopic
> - if repo.ui.hasconfig('experimental', 'topicrebase'):
> + if repo.ui.hasconfig(b'experimental', b'topicrebase'):
> isrebase = True
> - if repo.ui.configbool('_internal', 'keep-topic'):
> + if repo.ui.configbool(b'_internal', b'keep-topic'):
> ist0 = True
> if ((not partial and not branchmerge) or isrebase) and not ist0:
> - t = ''
> + t = b''
> pctx = repo[node]
> if pctx.phase() > phases.public:
> t = pctx.topic()
> - with repo.vfs.open('topic', 'w') as f:
> + with repo.vfs.open(b'topic', b'w') as f:
> f.write(t)
> if t and t != ot:
> - repo.ui.status(_("switching to topic %s\n") % t)
> + repo.ui.status(_(b"switching to topic %s\n") % t)
> if ot and not t:
> st = stack.stack(repo, topic=ot)
> if not st:
> - repo.ui.status(_('clearing empty topic "%s"\n') % ot)
> + repo.ui.status(_(b'clearing empty topic "%s"\n') % ot)
> elif ist0:
> - repo.ui.status(_("preserving the current topic '%s'\n") % ot)
> + repo.ui.status(_(b"preserving the current topic '%s'\n") % ot)
> return ret
> finally:
> wlock.release()
>
> def checkt0(orig, ui, repo, node=None, rev=None, *args, **kwargs):
>
> - thezeros = set(['t0', 'b0', 's0'])
> - backup = repo.ui.backupconfig('_internal', 'keep-topic')
> + thezeros = set([b't0', b'b0', b's0'])
> + backup = repo.ui.backupconfig(b'_internal', b'keep-topic')
> try:
> if node in thezeros or rev in thezeros:
> - repo.ui.setconfig('_internal', 'keep-topic', 'yes',
> - source='topic-extension')
> + repo.ui.setconfig(b'_internal', b'keep-topic', b'yes',
> + source=b'topic-extension')
> return orig(ui, repo, node=node, rev=rev, *args, **kwargs)
> finally:
> repo.ui.restoreconfig(backup)
> @@ -1264,25 +1264,25 @@
> extra[constants.extrakey] = ctx.topic()
>
> def setrebaseconfig(orig, ui, repo, **opts):
> - repo.ui.setconfig('experimental', 'topicrebase', 'yes',
> - source='topic-extension')
> + repo.ui.setconfig(b'experimental', b'topicrebase', b'yes',
> + source=b'topic-extension')
> return orig(ui, repo, **opts)
>
> def new_init(orig, *args, **kwargs):
> runtime = orig(*args, **kwargs)
>
> - if util.safehasattr(runtime, 'extrafns'):
> + if util.safehasattr(runtime, b'extrafns'):
> runtime.extrafns.append(savetopic)
>
> return runtime
>
> try:
> - rebase = extensions.find("rebase")
> - extensions.wrapfunction(rebase.rebaseruntime, '__init__', new_init)
> + rebase = extensions.find(b"rebase")
> + extensions.wrapfunction(rebase.rebaseruntime, b'__init__', new_init)
> # This exists to store in the config that rebase is running so that we can
> # update the topic according to rebase. This is a hack and should be removed
> # when we have better options.
> - extensions.wrapcommand(rebase.cmdtable, 'rebase', setrebaseconfig)
> + extensions.wrapcommand(rebase.cmdtable, b'rebase', setrebaseconfig)
> except KeyError:
> pass
>
> @@ -1291,27 +1291,27 @@
> def _exporttopic(seq, ctx):
> topic = ctx.topic()
> if topic:
> - return 'EXP-Topic %s' % topic
> + return b'EXP-Topic %s' % topic
> return None
>
> def _importtopic(repo, patchdata, extra, opts):
> - if 'topic' in patchdata:
> - extra['topic'] = patchdata['topic']
> + if b'topic' in patchdata:
> + extra[b'topic'] = patchdata[b'topic']
>
> def setupimportexport(ui):
> """run at ui setup time to install import/export logic"""
> - cmdutil.extraexport.append('topic')
> - cmdutil.extraexportmap['topic'] = _exporttopic
> - cmdutil.extrapreimport.append('topic')
> - cmdutil.extrapreimportmap['topic'] = _importtopic
> - patch.patchheadermap.append(('EXP-Topic', 'topic'))
> + cmdutil.extraexport.append(b'topic')
> + cmdutil.extraexportmap[b'topic'] = _exporttopic
> + cmdutil.extrapreimport.append(b'topic')
> + cmdutil.extrapreimportmap[b'topic'] = _importtopic
> + patch.patchheadermap.append((b'EXP-Topic', b'topic'))
>
> ## preserve topic during split
>
> def presplitupdatetopic(original, repo, ui, prev, ctx):
> # Save topic of revision
> topic = None
> - if util.safehasattr(ctx, 'topic'):
> + if util.safehasattr(ctx, b'topic'):
> topic = ctx.topic()
>
> # Update the working directory
> diff --git a/hgext3rd/topic/constants.py b/hgext3rd/topic/constants.py
> --- a/hgext3rd/topic/constants.py
> +++ b/hgext3rd/topic/constants.py
> @@ -1,2 +1,2 @@
> -extrakey = 'topic'
> -changekey = '_rewrite_noise'
> +extrakey = b'topic'
> +changekey = b'_rewrite_noise'
> diff --git a/hgext3rd/topic/destination.py b/hgext3rd/topic/destination.py
> --- a/hgext3rd/topic/destination.py
> +++ b/hgext3rd/topic/destination.py
> @@ -13,11 +13,11 @@
> )
> from .evolvebits import builddependencies
>
> -def _destmergebranch(orig, repo, action='merge', sourceset=None,
> +def _destmergebranch(orig, repo, action=b'merge', sourceset=None,
> onheadcheck=True, destspace=None):
> # XXX: take destspace into account
> if sourceset is None:
> - p1 = repo['.']
> + p1 = repo[b'.']
> else:
> # XXX: using only the max here is flacky. That code should eventually
> # be updated to take care of the whole sourceset.
> @@ -26,11 +26,11 @@
> if common.hastopicext(repo):
> top = p1.topic()
> if top:
> - revs = repo.revs('topic(%s) - obsolete()', top)
> + revs = repo.revs(b'topic(%s) - obsolete()', top)
> deps, rdeps = builddependencies(repo, revs)
> heads = [r for r in revs if not rdeps[r]]
> if onheadcheck and p1.rev() not in heads:
> - raise error.Abort(_("not at topic head, update or explicit"))
> + raise error.Abort(_(b"not at topic head, update or explicit"))
>
> # prune heads above the source
> otherheads = set(heads)
> @@ -43,20 +43,20 @@
> # nothing to do at the topic level
> bhead = ngtip(repo, p1.branch(), all=True)
> if not bhead:
> - raise error.NoMergeDestAbort(_("nothing to merge"))
> + raise error.NoMergeDestAbort(_(b"nothing to merge"))
> elif 1 == len(bhead):
> return bhead[0]
> else:
> - msg = _("branch '%s' has %d heads "
> - "- please merge with an explicit rev")
> - hint = _("run 'hg heads .' to see heads")
> + msg = _(b"branch '%s' has %d heads "
> + b"- please merge with an explicit rev")
> + hint = _(b"run 'hg heads .' to see heads")
> raise error.ManyMergeDestAbort(msg % (p1.branch(), len(bhead)),
> hint=hint)
> elif len(otherheads) == 1:
> return otherheads.pop()
> else:
> - msg = _("topic '%s' has %d heads "
> - "- please merge with an explicit rev") % (top, len(heads))
> + msg = _(b"topic '%s' has %d heads "
> + b"- please merge with an explicit rev") % (top, len(heads))
> raise error.ManyMergeDestAbort(msg)
> return orig(repo, action, sourceset, onheadcheck, destspace=destspace)
>
> @@ -67,23 +67,23 @@
> movemark = node = None
> topic = repo.currenttopic
> if topic:
> - revs = repo.revs('.::topic(%s)', topic)
> + revs = repo.revs(b'.::topic(%s)', topic)
> else:
> revs = []
> if not revs:
> return None, None, None
> node = revs.last()
> if bookmarks.isactivewdirparent(repo):
> - movemark = repo['.'].node()
> + movemark = repo[b'.'].node()
> return node, movemark, None
>
> def desthistedit(orig, ui, repo):
> if not common.hastopicext(repo):
> return None
> - if not (ui.config('histedit', 'defaultrev', None) is None
> + if not (ui.config(b'histedit', b'defaultrev', None) is None
> and repo.currenttopic):
> return orig(ui, repo)
> - revs = repo.revs('::. and stack()')
> + revs = repo.revs(b'::. and stack()')
> if revs:
> return revs.min()
> return None
> @@ -106,8 +106,8 @@
>
> def modsetup(ui):
> """run a uisetup time to install all destinations wrapping"""
> - extensions.wrapfunction(destutil, '_destmergebranch', _destmergebranch)
> - bridx = destutil.destupdatesteps.index('branch')
> - destutil.destupdatesteps.insert(bridx, 'topic')
> - destutil.destupdatestepmap['topic'] = _destupdatetopic
> - extensions.wrapfunction(destutil, 'desthistedit', desthistedit)
> + extensions.wrapfunction(destutil, b'_destmergebranch', _destmergebranch)
> + bridx = destutil.destupdatesteps.index(b'branch')
> + destutil.destupdatesteps.insert(bridx, b'topic')
> + destutil.destupdatestepmap[b'topic'] = _destupdatetopic
> + extensions.wrapfunction(destutil, b'desthistedit', desthistedit)
> diff --git a/hgext3rd/topic/discovery.py b/hgext3rd/topic/discovery.py
> --- a/hgext3rd/topic/discovery.py
> +++ b/hgext3rd/topic/discovery.py
> @@ -27,12 +27,12 @@
> repo = pushop.repo.unfiltered()
> remote = pushop.remote
>
> - publishing = ('phases' not in remote.listkeys('namespaces')
> - or bool(remote.listkeys('phases').get('publishing', False)))
> + publishing = (b'phases' not in remote.listkeys(b'namespaces')
> + or bool(remote.listkeys(b'phases').get(b'publishing', False)))
>
> if not common.hastopicext(pushop.repo):
> return orig(pushop, *args, **kwargs)
> - elif ((publishing or not remote.capable('topics'))
> + elif ((publishing or not remote.capable(b'topics'))
> and not getattr(pushop, 'publish', False)):
> return orig(pushop, *args, **kwargs)
>
> @@ -40,7 +40,7 @@
> remotebranchmap = None
> origremotebranchmap = remote.branchmap
> publishednode = [c.node() for c in pushop.outdatedphases]
> - publishedset = repo.revs('ancestors(%ln + %ln)',
> + publishedset = repo.revs(b'ancestors(%ln + %ln)',
> publishednode,
> pushop.remotephases.publicheads)
>
> @@ -49,18 +49,18 @@
> def remotebranchmap():
> # drop topic information from changeset about to be published
> result = collections.defaultdict(list)
> - for branch, heads in origremotebranchmap().iteritems():
> - if ':' not in branch:
> + for branch, heads in origremotebranchmap().items():
> + if b':' not in branch:
> result[branch].extend(heads)
> else:
> - namedbranch = branch.split(':', 1)[0]
> + namedbranch = branch.split(b':', 1)[0]
> for h in heads:
> r = rev(h)
> if r is not None and r in publishedset:
> result[namedbranch].append(h)
> else:
> result[branch].append(h)
> - for heads in result.itervalues():
> + for heads in result.values():
> heads.sort()
> return result
>
> @@ -77,7 +77,7 @@
> return branch
> topic = ctx.topic()
> if topic:
> - branch = "%s:%s" % (branch, topic)
> + branch = b"%s:%s" % (branch, topic)
> return branch
>
> ctx.branch = branch
> @@ -95,7 +95,7 @@
> return branch, close
> topic = repo[rev].topic()
> if topic:
> - branch = "%s:%s" % (branch, topic)
> + branch = b"%s:%s" % (branch, topic)
> return branch, close
>
> rbc.branchinfo = branchinfo
> @@ -106,17 +106,17 @@
> repo.__class__ = repocls
> if remotebranchmap is not None:
> remote.branchmap = remotebranchmap
> - unxx = repo.filtered('unfiltered-topic')
> + unxx = repo.filtered(b'unfiltered-topic')
> repo.unfiltered = lambda: unxx
> pushop.repo = repo
> summary = orig(pushop)
> - for key, value in summary.iteritems():
> - if ':' in key: # This is a topic
> + for key, value in summary.items():
> + if b':' in key: # This is a topic
> if value[0] is None and value[1]:
> summary[key] = ([value[1][0]], ) + value[1:]
> return summary
> finally:
> - if 'unfiltered' in vars(repo):
> + if b'unfiltered' in vars(repo):
> del repo.unfiltered
> repo.__class__ = oldrepocls
> if remotebranchmap is not None:
> @@ -146,7 +146,7 @@
> def _nbheads(repo):
> data = {}
> for b in repo.branchmap().iterbranches():
> - if ':' in b[0]:
> + if b':' in b[0]:
> continue
> data[b[0]] = len(b[1])
> return data
> @@ -157,11 +157,11 @@
> if not common.hastopicext(op.repo) or op.repo.publishing():
> return
> tr = op.gettransaction()
> - if tr.hookargs['source'] not in ('push', 'serve'): # not a push
> + if tr.hookargs[b'source'] not in (b'push', b'serve'): # not a push
> return
> tr._prepushheads = _nbheads(op.repo)
> reporef = weakref.ref(op.repo)
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> oldvalidator = tr.validator
> else:
> oldvalidator = tr._validator
> @@ -171,18 +171,18 @@
> if repo is not None:
> repo.invalidatecaches()
> finalheads = _nbheads(repo)
> - for branch, oldnb in tr._prepushheads.iteritems():
> + for branch, oldnb in tr._prepushheads.items():
> newnb = finalheads.pop(branch, 0)
> if oldnb < newnb:
> - msg = _('push create a new head on branch "%s"' % branch)
> + msg = _(b'push create a new head on branch "%s"' % branch)
> raise error.Abort(msg)
> - for branch, newnb in finalheads.iteritems():
> + for branch, newnb in finalheads.items():
> if 1 < newnb:
> - msg = _('push create more than 1 head on new branch "%s"'
> + msg = _(b'push create more than 1 head on new branch "%s"'
> % branch)
> raise error.Abort(msg)
> return oldvalidator(tr)
> - if util.safehasattr(tr, 'validator'): # hg <= 4.7
> + if util.safehasattr(tr, b'validator'): # hg <= 4.7
> tr.validator = validator
> else:
> tr._validator = validator
> @@ -190,7 +190,7 @@
>
> def _pushb2phases(orig, pushop, bundler):
> if common.hastopicext(pushop.repo):
> - checktypes = ('check:heads', 'check:updated-heads')
> + checktypes = (b'check:heads', b'check:updated-heads')
> hascheck = any(p.type in checktypes for p in bundler._parts)
> if not hascheck and pushop.outdatedphases:
> exchange._pushb2ctxcheckheads(pushop, bundler)
> @@ -198,23 +198,23 @@
>
> def wireprotocaps(orig, repo, proto):
> caps = orig(repo, proto)
> - if common.hastopicext(repo) and repo.peer().capable('topics'):
> - caps.append('topics')
> + if common.hastopicext(repo) and repo.peer().capable(b'topics'):
> + caps.append(b'topics')
> return caps
>
> def modsetup(ui):
> """run at uisetup time to install all destinations wrapping"""
> - extensions.wrapfunction(discovery, '_headssummary', _headssummary)
> - extensions.wrapfunction(wireproto, 'branchmap', wireprotobranchmap)
> - extensions.wrapfunction(wireproto, '_capabilities', wireprotocaps)
> + extensions.wrapfunction(discovery, b'_headssummary', _headssummary)
> + extensions.wrapfunction(wireproto, b'branchmap', wireprotobranchmap)
> + extensions.wrapfunction(wireproto, b'_capabilities', wireprotocaps)
> # we need a proper wrap b2 part stuff
> - extensions.wrapfunction(bundle2, 'handlecheckheads', handlecheckheads)
> + extensions.wrapfunction(bundle2, b'handlecheckheads', handlecheckheads)
> bundle2.handlecheckheads.params = frozenset()
> - bundle2.parthandlermapping['check:heads'] = bundle2.handlecheckheads
> - if util.safehasattr(bundle2, 'handlecheckupdatedheads'):
> + bundle2.parthandlermapping[b'check:heads'] = bundle2.handlecheckheads
> + if util.safehasattr(bundle2, b'handlecheckupdatedheads'):
> # we still need a proper wrap b2 part stuff
> - extensions.wrapfunction(bundle2, 'handlecheckupdatedheads', handlecheckheads)
> + extensions.wrapfunction(bundle2, b'handlecheckupdatedheads', handlecheckheads)
> bundle2.handlecheckupdatedheads.params = frozenset()
> - bundle2.parthandlermapping['check:updated-heads'] = bundle2.handlecheckupdatedheads
> - extensions.wrapfunction(exchange, '_pushb2phases', _pushb2phases)
> - exchange.b2partsgenmapping['phase'] = exchange._pushb2phases
> + bundle2.parthandlermapping[b'check:updated-heads'] = bundle2.handlecheckupdatedheads
> + extensions.wrapfunction(exchange, b'_pushb2phases', _pushb2phases)
> + exchange.b2partsgenmapping[b'phase'] = exchange._pushb2phases
> diff --git a/hgext3rd/topic/evolvebits.py b/hgext3rd/topic/evolvebits.py
> --- a/hgext3rd/topic/evolvebits.py
> +++ b/hgext3rd/topic/evolvebits.py
> @@ -78,8 +78,8 @@
> newer = obsutil.successorssets(repo, obs.node())
> # search of a parent which is not killed
> while not newer:
> - ui.debug("stabilize target %s is plain dead,"
> - " trying to stabilize on its parent\n" %
> + ui.debug(b"stabilize target %s is plain dead,"
> + b" trying to stabilize on its parent\n" %
> obs)
> obs = obs.parents()[0]
> newer = obsutil.successorssets(repo, obs.node())
> @@ -88,7 +88,7 @@
> # we should pick as arbitrary one
> raise MultipleSuccessorsError(newer)
> elif 1 < len(newer[0]):
> - splitheads = list(repo.revs('heads(%ln::%ln)', newer[0], newer[0]))
> + splitheads = list(repo.revs(b'heads(%ln::%ln)', newer[0], newer[0]))
> if 1 < len(splitheads):
> # split case, See if we can make sense of it.
> raise MultipleSuccessorsError(newer)
> diff --git a/hgext3rd/topic/flow.py b/hgext3rd/topic/flow.py
> --- a/hgext3rd/topic/flow.py
> +++ b/hgext3rd/topic/flow.py
> @@ -12,18 +12,18 @@
> from mercurial.i18n import _
>
> def enforcesinglehead(repo, tr):
> - for name, heads in repo.filtered('visible').branchmap().iteritems():
> + for name, heads in repo.filtered(b'visible').branchmap().items():
> if len(heads) > 1:
> hexs = [node.short(n) for n in heads]
> - raise error.Abort(_('%d heads on "%s"') % (len(heads), name),
> - hint=(', '.join(hexs)))
> + raise error.Abort(_(b'%d heads on "%s"') % (len(heads), name),
> + hint=(b', '.join(hexs)))
>
> def publishbarebranch(repo, tr):
> """Publish changeset without topic"""
> - if 'node' not in tr.hookargs: # no new node
> + if b'node' not in tr.hookargs: # no new node
> return
> - startnode = node.bin(tr.hookargs['node'])
> - topublish = repo.revs('not public() and (%n:) - hidden() - topic()', startnode)
> + startnode = node.bin(tr.hookargs[b'node'])
> + topublish = repo.revs(b'not public() and (%n:) - hidden() - topic()', startnode)
> if topublish:
> cl = repo.changelog
> nodes = [cl.node(r) for r in topublish]
> @@ -31,56 +31,56 @@
>
> def rejectuntopicedchangeset(repo, tr):
> """Reject the push if there are changeset without topic"""
> - if 'node' not in tr.hookargs: # no new revs
> + if b'node' not in tr.hookargs: # no new revs
> return
>
> - startnode = node.bin(tr.hookargs['node'])
> + startnode = node.bin(tr.hookargs[b'node'])
>
> - mode = repo.ui.config('experimental', 'topic-mode.server', 'ignore')
> + mode = repo.ui.config(b'experimental', b'topic-mode.server', b'ignore')
>
> - untopiced = repo.revs('not public() and (%n:) - hidden() - topic()', startnode)
> + untopiced = repo.revs(b'not public() and (%n:) - hidden() - topic()', startnode)
> if untopiced:
> num = len(untopiced)
> fnode = repo[untopiced.first()].hex()[:10]
> if num == 1:
> - msg = _("%s") % fnode
> + msg = _(b"%s") % fnode
> else:
> - msg = _("%s and %d more") % (fnode, num - 1)
> - if mode == 'warning':
> - fullmsg = _("pushed draft changeset without topic: %s\n")
> + msg = _(b"%s and %d more") % (fnode, num - 1)
> + if mode == b'warning':
> + fullmsg = _(b"pushed draft changeset without topic: %s\n")
> repo.ui.warn(fullmsg % msg)
> - elif mode == 'enforce':
> - fullmsg = _("rejecting draft changesets: %s")
> + elif mode == b'enforce':
> + fullmsg = _(b"rejecting draft changesets: %s")
> raise error.Abort(fullmsg % msg)
> else:
> - repo.ui.warn(_("unknown 'topic-mode.server': %s\n" % mode))
> + repo.ui.warn(_(b"unknown 'topic-mode.server': %s\n" % mode))
>
> def reject_publish(repo, tr):
> """prevent a transaction to be publish anything"""
> published = set()
> - for r, (o, n) in tr.changes['phases'].items():
> + for r, (o, n) in tr.changes[b'phases'].items():
> if n == phases.public:
> published.add(r)
> if published:
> r = min(published)
> - msg = "rejecting publishing of changeset %s" % repo[r]
> + msg = b"rejecting publishing of changeset %s" % repo[r]
> if len(published) > 1:
> - msg += ' and %d others' % (len(published) - 1)
> + msg += b' and %d others' % (len(published) - 1)
> raise error.Abort(msg)
>
> def wrappush(orig, repo, remote, *args, **kwargs):
> """interpret the --publish flag and pass it to the push operation"""
> newargs = kwargs.copy()
> - if kwargs.pop('publish', False):
> - opargs = kwargs.get('opargs')
> + if kwargs.pop(b'publish', False):
> + opargs = kwargs.get(b'opargs')
> if opargs is None:
> opargs = {}
> - newargs['opargs'] = opargs.copy()
> - newargs['opargs']['publish'] = True
> + newargs[b'opargs'] = opargs.copy()
> + newargs[b'opargs'][b'publish'] = True
> return orig(repo, remote, *args, **newargs)
>
> def extendpushoperation(orig, self, *args, **kwargs):
> - publish = kwargs.pop('publish', False)
> + publish = kwargs.pop(b'publish', False)
> orig(self, *args, **kwargs)
> self.publish = publish
>
> @@ -90,16 +90,16 @@
> if not pushop.remotephases.publishing:
> unfi = pushop.repo.unfiltered()
> droots = pushop.remotephases.draftroots
> - revset = '%ln and (not public() or %ln::)'
> + revset = b'%ln and (not public() or %ln::)'
> future = list(unfi.set(revset, pushop.futureheads, droots))
> pushop.outdatedphases = future
>
> def installpushflag(ui):
> - entry = extensions.wrapcommand(commands.table, 'push', wrappush)
> - if not any(opt for opt in entry[1] if opt[1] == 'publish'): # hg <= 4.9
> - entry[1].append(('', 'publish', False,
> - _('push the changeset as public')))
> - extensions.wrapfunction(exchange.pushoperation, '__init__',
> + entry = extensions.wrapcommand(commands.table, b'push', wrappush)
> + if not any(opt for opt in entry[1] if opt[1] == b'publish'): # hg <= 4.9
> + entry[1].append((b'', b'publish', False,
> + _(b'push the changeset as public')))
> + extensions.wrapfunction(exchange.pushoperation, b'__init__',
> extendpushoperation)
> - extensions.wrapfunction(exchange, '_pushdiscoveryphase', wrapphasediscovery)
> - exchange.pushdiscoverymapping['phase'] = exchange._pushdiscoveryphase
> + extensions.wrapfunction(exchange, b'_pushdiscoveryphase', wrapphasediscovery)
> + exchange.pushdiscoverymapping[b'phase'] = exchange._pushdiscoveryphase
> diff --git a/hgext3rd/topic/randomname.py b/hgext3rd/topic/randomname.py
> --- a/hgext3rd/topic/randomname.py
> +++ b/hgext3rd/topic/randomname.py
> @@ -8,1003 +8,1003 @@
> import random
>
> animals = [
> - 'aardvark',
> - 'albatross',
> - 'alligator',
> - 'alpaca',
> - 'ant',
> - 'anteater',
> - 'antelope',
> - 'ape',
> - 'armadillo',
> - 'baboon',
> - 'badger',
> - 'barracuda',
> - 'bat',
> - 'bear',
> - 'beaver',
> - 'bee',
> - 'beetle',
> - 'bison',
> - 'boar',
> - 'buffalo',
> - 'bushbaby',
> - 'bustard',
> - 'butterfly',
> - 'camel',
> - 'capuchin',
> - 'carabao',
> - 'caribou',
> - 'cat',
> - 'caterpillar',
> - 'cattle',
> - 'chameleon',
> - 'chamois',
> - 'cheetah',
> - 'chicken',
> - 'chimpanzee',
> - 'chinchilla',
> - 'chipmunk',
> - 'chough',
> - 'cicada',
> - 'clam',
> - 'cobra',
> - 'cockroach',
> - 'cod',
> - 'cormorant',
> - 'coyote',
> - 'crab',
> - 'crane',
> - 'cricket',
> - 'crocodile',
> - 'crow',
> - 'curlew',
> - 'deer',
> - 'dinosaur',
> - 'dog',
> - 'dogfish',
> - 'dolphin',
> - 'donkey',
> - 'dotterel',
> - 'dove',
> - 'dragon',
> - 'dragonfly',
> - 'duck',
> - 'dugong',
> - 'dunlin',
> - 'eagle',
> - 'echidna',
> - 'eel',
> - 'eland',
> - 'elephant',
> - 'elk',
> - 'emu',
> - 'falcon',
> - 'ferret',
> - 'finch',
> - 'fish',
> - 'flamingo',
> - 'fly',
> - 'fox',
> - 'frog',
> - 'gaur',
> - 'gazelle',
> - 'gecko',
> - 'gerbil',
> - 'giraffe',
> - 'gnat',
> - 'gnu',
> - 'goat',
> - 'goldfish',
> - 'goose',
> - 'gorilla',
> - 'goshawk',
> - 'grasshopper',
> - 'grouse',
> - 'guanaco',
> - 'guinea',
> - 'gull',
> - 'hamster',
> - 'hare',
> - 'hawk',
> - 'hedgehog',
> - 'heron',
> - 'herring',
> - 'hippopotamus',
> - 'hornet',
> - 'horse',
> - 'horsecrab',
> - 'hound',
> - 'hummingbird',
> - 'hyena',
> - 'hyrax',
> - 'ibex',
> - 'ibis',
> - 'iguana',
> - 'impala',
> - 'insect',
> - 'jackal',
> - 'jaguar',
> - 'jay',
> - 'jellyfish',
> - 'kangaroo',
> - 'koala',
> - 'kouprey',
> - 'kudu',
> - 'lapwing',
> - 'lark',
> - 'lemming',
> - 'lemur',
> - 'leopard',
> - 'lion',
> - 'lizard',
> - 'llama',
> - 'lobster',
> - 'locust',
> - 'loris',
> - 'louse',
> - 'lynx',
> - 'lyrebird',
> - 'magpie',
> - 'mallard',
> - 'mammoth',
> - 'manatee',
> - 'marten',
> - 'meerkat',
> - 'mink',
> - 'minnow',
> - 'mole',
> - 'mongoose',
> - 'monkey',
> - 'moose',
> - 'mosquito',
> - 'mouse',
> - 'mule',
> - 'muskrat',
> - 'narwhal',
> - 'newt',
> - 'nightingale',
> - 'numbat',
> - 'octopus',
> - 'okapi',
> - 'opossum',
> - 'oryx',
> - 'ostrich',
> - 'otter',
> - 'owl',
> - 'ox',
> - 'oyster',
> - 'panda',
> - 'panther',
> - 'parrot',
> - 'partridge',
> - 'peacock',
> - 'peafowl',
> - 'pelican',
> - 'penguin',
> - 'pheasant',
> - 'pig',
> - 'pigeon',
> - 'platypus',
> - 'pony',
> - 'porcupine',
> - 'porpoise',
> - 'puffin',
> - 'pug',
> - 'quagga',
> - 'quail',
> - 'quelea',
> - 'rabbit',
> - 'raccoon',
> - 'ram',
> - 'rat',
> - 'raven',
> - 'reindeer',
> - 'rhea',
> - 'rhinoceros',
> - 'rook',
> - 'ruff',
> - 'salamander',
> - 'salmon',
> - 'sambar',
> - 'sandpiper',
> - 'sardine',
> - 'scorpion',
> - 'seahorse',
> - 'seal',
> - 'serval',
> - 'shark',
> - 'sheep',
> - 'shrew',
> - 'shrimp',
> - 'skink',
> - 'skunk',
> - 'snail',
> - 'snake',
> - 'spider',
> - 'squid',
> - 'squirrel',
> - 'starling',
> - 'stinkbug',
> - 'stork',
> - 'swan',
> - 'tapir',
> - 'tarsier',
> - 'termite',
> - 'tern',
> - 'tiger',
> - 'toad',
> - 'trout',
> - 'turkey',
> - 'turtle',
> - 'unicorn',
> - 'viper',
> - 'vulture',
> - 'wallaby',
> - 'walrus',
> - 'wasp',
> - 'weasel',
> - 'whale',
> - 'wolf',
> - 'wolverine',
> - 'wombat',
> - 'woodchuck',
> - 'woodcock',
> - 'woodpecker',
> - 'worm',
> - 'wren',
> - 'yak',
> - 'zebra',
> - 'zorilla'
> + b'aardvark',
> + b'albatross',
> + b'alligator',
> + b'alpaca',
> + b'ant',
> + b'anteater',
> + b'antelope',
> + b'ape',
> + b'armadillo',
> + b'baboon',
> + b'badger',
> + b'barracuda',
> + b'bat',
> + b'bear',
> + b'beaver',
> + b'bee',
> + b'beetle',
> + b'bison',
> + b'boar',
> + b'buffalo',
> + b'bushbaby',
> + b'bustard',
> + b'butterfly',
> + b'camel',
> + b'capuchin',
> + b'carabao',
> + b'caribou',
> + b'cat',
> + b'caterpillar',
> + b'cattle',
> + b'chameleon',
> + b'chamois',
> + b'cheetah',
> + b'chicken',
> + b'chimpanzee',
> + b'chinchilla',
> + b'chipmunk',
> + b'chough',
> + b'cicada',
> + b'clam',
> + b'cobra',
> + b'cockroach',
> + b'cod',
> + b'cormorant',
> + b'coyote',
> + b'crab',
> + b'crane',
> + b'cricket',
> + b'crocodile',
> + b'crow',
> + b'curlew',
> + b'deer',
> + b'dinosaur',
> + b'dog',
> + b'dogfish',
> + b'dolphin',
> + b'donkey',
> + b'dotterel',
> + b'dove',
> + b'dragon',
> + b'dragonfly',
> + b'duck',
> + b'dugong',
> + b'dunlin',
> + b'eagle',
> + b'echidna',
> + b'eel',
> + b'eland',
> + b'elephant',
> + b'elk',
> + b'emu',
> + b'falcon',
> + b'ferret',
> + b'finch',
> + b'fish',
> + b'flamingo',
> + b'fly',
> + b'fox',
> + b'frog',
> + b'gaur',
> + b'gazelle',
> + b'gecko',
> + b'gerbil',
> + b'giraffe',
> + b'gnat',
> + b'gnu',
> + b'goat',
> + b'goldfish',
> + b'goose',
> + b'gorilla',
> + b'goshawk',
> + b'grasshopper',
> + b'grouse',
> + b'guanaco',
> + b'guinea',
> + b'gull',
> + b'hamster',
> + b'hare',
> + b'hawk',
> + b'hedgehog',
> + b'heron',
> + b'herring',
> + b'hippopotamus',
> + b'hornet',
> + b'horse',
> + b'horsecrab',
> + b'hound',
> + b'hummingbird',
> + b'hyena',
> + b'hyrax',
> + b'ibex',
> + b'ibis',
> + b'iguana',
> + b'impala',
> + b'insect',
> + b'jackal',
> + b'jaguar',
> + b'jay',
> + b'jellyfish',
> + b'kangaroo',
> + b'koala',
> + b'kouprey',
> + b'kudu',
> + b'lapwing',
> + b'lark',
> + b'lemming',
> + b'lemur',
> + b'leopard',
> + b'lion',
> + b'lizard',
> + b'llama',
> + b'lobster',
> + b'locust',
> + b'loris',
> + b'louse',
> + b'lynx',
> + b'lyrebird',
> + b'magpie',
> + b'mallard',
> + b'mammoth',
> + b'manatee',
> + b'marten',
> + b'meerkat',
> + b'mink',
> + b'minnow',
> + b'mole',
> + b'mongoose',
> + b'monkey',
> + b'moose',
> + b'mosquito',
> + b'mouse',
> + b'mule',
> + b'muskrat',
> + b'narwhal',
> + b'newt',
> + b'nightingale',
> + b'numbat',
> + b'octopus',
> + b'okapi',
> + b'opossum',
> + b'oryx',
> + b'ostrich',
> + b'otter',
> + b'owl',
> + b'ox',
> + b'oyster',
> + b'panda',
> + b'panther',
> + b'parrot',
> + b'partridge',
> + b'peacock',
> + b'peafowl',
> + b'pelican',
> + b'penguin',
> + b'pheasant',
> + b'pig',
> + b'pigeon',
> + b'platypus',
> + b'pony',
> + b'porcupine',
> + b'porpoise',
> + b'puffin',
> + b'pug',
> + b'quagga',
> + b'quail',
> + b'quelea',
> + b'rabbit',
> + b'raccoon',
> + b'ram',
> + b'rat',
> + b'raven',
> + b'reindeer',
> + b'rhea',
> + b'rhinoceros',
> + b'rook',
> + b'ruff',
> + b'salamander',
> + b'salmon',
> + b'sambar',
> + b'sandpiper',
> + b'sardine',
> + b'scorpion',
> + b'seahorse',
> + b'seal',
> + b'serval',
> + b'shark',
> + b'sheep',
> + b'shrew',
> + b'shrimp',
> + b'skink',
> + b'skunk',
> + b'snail',
> + b'snake',
> + b'spider',
> + b'squid',
> + b'squirrel',
> + b'starling',
> + b'stinkbug',
> + b'stork',
> + b'swan',
> + b'tapir',
> + b'tarsier',
> + b'termite',
> + b'tern',
> + b'tiger',
> + b'toad',
> + b'trout',
> + b'turkey',
> + b'turtle',
> + b'unicorn',
> + b'viper',
> + b'vulture',
> + b'wallaby',
> + b'walrus',
> + b'wasp',
> + b'weasel',
> + b'whale',
> + b'wolf',
> + b'wolverine',
> + b'wombat',
> + b'woodchuck',
> + b'woodcock',
> + b'woodpecker',
> + b'worm',
> + b'wren',
> + b'yak',
> + b'zebra',
> + b'zorilla'
> ]
>
> adjectives = [
> - 'abiding',
> - 'abject',
> - 'ablaze',
> - 'able',
> - 'aboard',
> - 'abounding',
> - 'absorbed',
> - 'absorbing',
> - 'abstracted',
> - 'abundant',
> - 'acceptable',
> - 'accessible',
> - 'accurate',
> - 'acoustic',
> - 'adamant',
> - 'adaptable',
> - 'adhesive',
> - 'adjoining',
> - 'adorable',
> - 'adventurous',
> - 'affable',
> - 'affectionate',
> - 'agreeable',
> - 'alert',
> - 'alive',
> - 'alluring',
> - 'amazing',
> - 'ambiguous',
> - 'ambitious',
> - 'amiable',
> - 'amicable',
> - 'amused',
> - 'amusing',
> - 'ancient',
> - 'animated',
> - 'apricot',
> - 'aquatic',
> - 'arctic',
> - 'arenaceous',
> - 'aromatic',
> - 'aspiring',
> - 'assiduous',
> - 'assorted',
> - 'astonishing',
> - 'attractive',
> - 'auspicious',
> - 'automatic',
> - 'available',
> - 'average',
> - 'awake',
> - 'aware',
> - 'awesome',
> - 'axiomatic',
> - 'bashful',
> - 'bawdy',
> - 'beautiful',
> - 'beefy',
> - 'befitting',
> - 'beneficial',
> - 'benevolent',
> - 'bent',
> - 'best',
> - 'better',
> - 'bewildered',
> - 'bewitching',
> - 'big',
> - 'billowy',
> - 'bizarre',
> - 'black',
> - 'blithe',
> - 'blue',
> - 'blushing',
> - 'bouncy',
> - 'boundless',
> - 'brainy',
> - 'brash',
> - 'brave',
> - 'brawny',
> - 'brazen',
> - 'breezy',
> - 'brief',
> - 'bright',
> - 'brilliant',
> - 'broad',
> - 'brown',
> - 'bucolic',
> - 'bulky',
> - 'bumpy',
> - 'burgundy',
> - 'burly',
> - 'bustling',
> - 'busy',
> - 'calm',
> - 'capable',
> - 'capricious',
> - 'captivating',
> - 'carefree',
> - 'careful',
> - 'caring',
> - 'carrot',
> - 'ceaseless',
> - 'cerise',
> - 'certain',
> - 'challenging',
> - 'changeable',
> - 'charming',
> - 'cheerful',
> - 'chief',
> - 'chilly',
> - 'chipper',
> - 'classy',
> - 'clean',
> - 'clear',
> - 'clever',
> - 'cloudy',
> - 'coherent',
> - 'colorful',
> - 'colossal',
> - 'comfortable',
> - 'common',
> - 'communicative',
> - 'compassionate',
> - 'complete',
> - 'complex',
> - 'compulsive',
> - 'confused',
> - 'conscientious',
> - 'conscious',
> - 'conservative',
> - 'considerate',
> - 'convivial',
> - 'cooing',
> - 'cool',
> - 'cooperative',
> - 'coordinated',
> - 'courageous',
> - 'courteous',
> - 'crazy',
> - 'creative',
> - 'crispy',
> - 'crooked',
> - 'crowded',
> - 'cuddly',
> - 'cultured',
> - 'cunning',
> - 'curious',
> - 'curly',
> - 'curved',
> - 'curvy',
> - 'cut',
> - 'cute',
> - 'daily',
> - 'damp',
> - 'dapper',
> - 'dashing',
> - 'dazzling',
> - 'dear',
> - 'debonair',
> - 'decisive',
> - 'decorous',
> - 'deep',
> - 'defiant',
> - 'delicate',
> - 'delicious',
> - 'delighted',
> - 'delightful',
> - 'delirious',
> - 'descriptive',
> - 'detached',
> - 'detailed',
> - 'determined',
> - 'different',
> - 'diligent',
> - 'diminutive',
> - 'diplomatic',
> - 'discreet',
> - 'distinct',
> - 'distinctive',
> - 'dramatic',
> - 'dry',
> - 'dynamic',
> - 'dynamite',
> - 'eager',
> - 'early',
> - 'earthy',
> - 'easy',
> - 'easygoing',
> - 'eatable',
> - 'economic',
> - 'ecstatic',
> - 'educated',
> - 'efficacious',
> - 'efficient',
> - 'effortless',
> - 'eight',
> - 'elastic',
> - 'elated',
> - 'electric',
> - 'elegant',
> - 'elfin',
> - 'elite',
> - 'eminent',
> - 'emotional',
> - 'enchanted',
> - 'enchanting',
> - 'encouraging',
> - 'endless',
> - 'energetic',
> - 'enormous',
> - 'entertaining',
> - 'enthusiastic',
> - 'envious',
> - 'epicurean',
> - 'equable',
> - 'equal',
> - 'eternal',
> - 'ethereal',
> - 'evanescent',
> - 'even',
> - 'excellent',
> - 'excited',
> - 'exciting',
> - 'exclusive',
> - 'exotic',
> - 'expensive',
> - 'exquisite',
> - 'extroverted',
> - 'exuberant',
> - 'exultant',
> - 'fabulous',
> - 'fair',
> - 'faithful',
> - 'familiar',
> - 'famous',
> - 'fancy',
> - 'fantastic',
> - 'far',
> - 'fascinated',
> - 'fast',
> - 'fearless',
> - 'female',
> - 'fertile',
> - 'festive',
> - 'few',
> - 'fine',
> - 'first',
> - 'five',
> - 'fixed',
> - 'flamboyant',
> - 'flashy',
> - 'flat',
> - 'flawless',
> - 'flirtatious',
> - 'florid',
> - 'flowery',
> - 'fluffy',
> - 'fluttering',
> - 'foamy',
> - 'foolish',
> - 'foregoing',
> - 'fortunate',
> - 'four',
> - 'frank',
> - 'free',
> - 'frequent',
> - 'fresh',
> - 'friendly',
> - 'full',
> - 'functional',
> - 'funny',
> - 'furry',
> - 'future',
> - 'futuristic',
> - 'fuzzy',
> - 'gabby',
> - 'gainful',
> - 'garrulous',
> - 'general',
> - 'generous',
> - 'gentle',
> - 'giant',
> - 'giddy',
> - 'gifted',
> - 'gigantic',
> - 'gilded',
> - 'glamorous',
> - 'gleaming',
> - 'glorious',
> - 'glossy',
> - 'glowing',
> - 'godly',
> - 'good',
> - 'goofy',
> - 'gorgeous',
> - 'graceful',
> - 'grandiose',
> - 'grateful',
> - 'gratis',
> - 'gray',
> - 'great',
> - 'green',
> - 'gregarious',
> - 'grey',
> - 'groovy',
> - 'guiltless',
> - 'gusty',
> - 'guttural',
> - 'habitual',
> - 'half',
> - 'hallowed',
> - 'halting',
> - 'handsome',
> - 'happy',
> - 'hard',
> - 'hardworking',
> - 'harmonious',
> - 'heady',
> - 'healthy',
> - 'heavenly',
> - 'helpful',
> - 'hilarious',
> - 'historical',
> - 'holistic',
> - 'hollow',
> - 'honest',
> - 'honorable',
> - 'hopeful',
> - 'hospitable',
> - 'hot',
> - 'huge',
> - 'humorous',
> - 'hungry',
> - 'hushed',
> - 'hypnotic',
> - 'illustrious',
> - 'imaginary',
> - 'imaginative',
> - 'immense',
> - 'imminent',
> - 'impartial',
> - 'important',
> - 'imported',
> - 'impossible',
> - 'incandescent',
> - 'inconclusive',
> - 'incredible',
> - 'independent',
> - 'industrious',
> - 'inexpensive',
> - 'innate',
> - 'innocent',
> - 'inquisitive',
> - 'instinctive',
> - 'intellectual',
> - 'intelligent',
> - 'intense',
> - 'interesting',
> - 'internal',
> - 'intuitive',
> - 'inventive',
> - 'invincible',
> - 'jazzy',
> - 'jolly',
> - 'joyful',
> - 'joyous',
> - 'judicious',
> - 'juicy',
> - 'jumpy',
> - 'keen',
> - 'kind',
> - 'kindhearted',
> - 'kindly',
> - 'knotty',
> - 'knowing',
> - 'knowledgeable',
> - 'known',
> - 'laconic',
> - 'large',
> - 'lavish',
> - 'lean',
> - 'learned',
> - 'left',
> - 'legal',
> - 'level',
> - 'light',
> - 'likeable',
> - 'literate',
> - 'little',
> - 'lively',
> - 'living',
> - 'long',
> - 'longing',
> - 'loud',
> - 'lovely',
> - 'loving',
> - 'loyal',
> - 'lucky',
> - 'luminous',
> - 'lush',
> - 'luxuriant',
> - 'luxurious',
> - 'lyrical',
> - 'magenta',
> - 'magical',
> - 'magnificent',
> - 'majestic',
> - 'male',
> - 'mammoth',
> - 'many',
> - 'marvelous',
> - 'massive',
> - 'material',
> - 'mature',
> - 'meandering',
> - 'meaty',
> - 'medical',
> - 'mellow',
> - 'melodic',
> - 'melted',
> - 'merciful',
> - 'mighty',
> - 'miniature',
> - 'miniscule',
> - 'minor',
> - 'minute',
> - 'misty',
> - 'modern',
> - 'modest',
> - 'momentous',
> - 'motionless',
> - 'mountainous',
> - 'mute',
> - 'mysterious',
> - 'narrow',
> - 'natural',
> - 'near',
> - 'neat',
> - 'nebulous',
> - 'necessary',
> - 'neighborly',
> - 'new',
> - 'next',
> - 'nice',
> - 'nifty',
> - 'nimble',
> - 'nine',
> - 'nippy',
> - 'noiseless',
> - 'noisy',
> - 'nonchalant',
> - 'normal',
> - 'numberless',
> - 'numerous',
> - 'nutritious',
> - 'obedient',
> - 'observant',
> - 'obtainable',
> - 'oceanic',
> - 'omniscient',
> - 'one',
> - 'open',
> - 'opposite',
> - 'optimal',
> - 'optimistic',
> - 'opulent',
> - 'orange',
> - 'ordinary',
> - 'organic',
> - 'outgoing',
> - 'outrageous',
> - 'outstanding',
> - 'oval',
> - 'overjoyed',
> - 'overt',
> - 'palatial',
> - 'panoramic',
> - 'parallel',
> - 'passionate',
> - 'past',
> - 'pastoral',
> - 'patient',
> - 'peaceful',
> - 'perfect',
> - 'periodic',
> - 'permissible',
> - 'perpetual',
> - 'persistent',
> - 'petite',
> - 'philosophical',
> - 'physical',
> - 'picturesque',
> - 'pink',
> - 'pioneering',
> - 'piquant',
> - 'plausible',
> - 'pleasant',
> - 'plucky',
> - 'poised',
> - 'polite',
> - 'possible',
> - 'powerful',
> - 'practical',
> - 'precious',
> - 'premium',
> - 'present',
> - 'pretty',
> - 'previous',
> - 'private',
> - 'probable',
> - 'productive',
> - 'profound',
> - 'profuse',
> - 'protective',
> - 'proud',
> - 'psychedelic',
> - 'public',
> - 'pumped',
> - 'purple',
> - 'purring',
> - 'puzzled',
> - 'puzzling',
> - 'quaint',
> - 'quick',
> - 'quicker',
> - 'quickest',
> - 'quiet',
> - 'quirky',
> - 'quixotic',
> - 'quizzical',
> - 'rainy',
> - 'rapid',
> - 'rare',
> - 'rational',
> - 'ready',
> - 'real',
> - 'rebel',
> - 'receptive',
> - 'red',
> - 'reflective',
> - 'regular',
> - 'relaxed',
> - 'reliable',
> - 'relieved',
> - 'remarkable',
> - 'reminiscent',
> - 'reserved',
> - 'resolute',
> - 'resonant',
> - 'resourceful',
> - 'responsible',
> - 'rich',
> - 'ridiculous',
> - 'right',
> - 'rightful',
> - 'ripe',
> - 'ritzy',
> - 'roasted',
> - 'robust',
> - 'romantic',
> - 'roomy',
> - 'round',
> - 'royal',
> - 'ruddy',
> - 'rural',
> - 'rustic',
> - 'sable',
> - 'safe',
> - 'salty',
> - 'same',
> - 'satisfying',
> - 'savory',
> - 'scientific',
> - 'scintillating',
> - 'scrumptious',
> - 'second',
> - 'secret',
> - 'secretive',
> - 'seemly',
> - 'selective',
> - 'sensible',
> - 'separate',
> - 'shaggy',
> - 'shaky',
> - 'shining',
> - 'shiny',
> - 'short',
> - 'shy',
> - 'silent',
> - 'silky',
> - 'silly',
> - 'simple',
> - 'simplistic',
> - 'sincere',
> - 'six',
> - 'sizzling',
> - 'skillful',
> - 'sleepy',
> - 'slick',
> - 'slim',
> - 'smart',
> - 'smiling',
> - 'smooth',
> - 'soaring',
> - 'sociable',
> - 'soft',
> - 'solid',
> - 'sophisticated',
> - 'sparkling',
> - 'special',
> - 'spectacular',
> - 'speedy',
> - 'spicy',
> - 'spiffy',
> - 'spiritual',
> - 'splendid',
> - 'spooky',
> - 'spotless',
> - 'spotted',
> - 'square',
> - 'standing',
> - 'statuesque',
> - 'steadfast',
> - 'steady',
> - 'steep',
> - 'stimulating',
> - 'straight',
> - 'straightforward',
> - 'striking',
> - 'striped',
> - 'strong',
> - 'stunning',
> - 'stupendous',
> - 'sturdy',
> - 'subsequent',
> - 'substantial',
> - 'subtle',
> - 'successful',
> - 'succinct',
> - 'sudden',
> - 'super',
> - 'superb',
> - 'supreme',
> - 'swanky',
> - 'sweet',
> - 'swift',
> - 'sympathetic',
> - 'synonymous',
> - 'talented',
> - 'tall',
> - 'tame',
> - 'tan',
> - 'tangible',
> - 'tangy',
> - 'tasteful',
> - 'tasty',
> - 'telling',
> - 'temporary',
> - 'tempting',
> - 'ten',
> - 'tender',
> - 'terrific',
> - 'tested',
> - 'thankful',
> - 'therapeutic',
> - 'thin',
> - 'thinkable',
> - 'third',
> - 'thoughtful',
> - 'three',
> - 'thrifty',
> - 'tidy',
> - 'tiny',
> - 'toothsome',
> - 'towering',
> - 'tranquil',
> - 'tremendous',
> - 'tricky',
> - 'true',
> - 'truthful',
> - 'two',
> - 'typical',
> - 'ubiquitous',
> - 'ultra',
> - 'unassuming',
> - 'unbiased',
> - 'uncovered',
> - 'understanding',
> - 'understood',
> - 'unequaled',
> - 'unique',
> - 'unusual',
> - 'unwritten',
> - 'upbeat',
> - 'useful',
> - 'utopian',
> - 'utter',
> - 'uttermost',
> - 'valuable',
> - 'various',
> - 'vast',
> - 'verdant',
> - 'vermilion',
> - 'versatile',
> - 'versed',
> - 'victorious',
> - 'vigorous',
> - 'violet',
> - 'vivacious',
> - 'voiceless',
> - 'voluptuous',
> - 'wacky',
> - 'waiting',
> - 'wakeful',
> - 'wandering',
> - 'warm',
> - 'warmhearted',
> - 'wealthy',
> - 'whimsical',
> - 'whispering',
> - 'white',
> - 'whole',
> - 'wholesale',
> - 'whopping',
> - 'wide',
> - 'wiggly',
> - 'wild',
> - 'willing',
> - 'windy',
> - 'winsome',
> - 'wiry',
> - 'wise',
> - 'wistful',
> - 'witty',
> - 'womanly',
> - 'wonderful',
> - 'workable',
> - 'young',
> - 'youthful',
> - 'yummy',
> - 'zany',
> - 'zealous',
> - 'zesty',
> - 'zippy'
> + b'abiding',
> + b'abject',
> + b'ablaze',
> + b'able',
> + b'aboard',
> + b'abounding',
> + b'absorbed',
> + b'absorbing',
> + b'abstracted',
> + b'abundant',
> + b'acceptable',
> + b'accessible',
> + b'accurate',
> + b'acoustic',
> + b'adamant',
> + b'adaptable',
> + b'adhesive',
> + b'adjoining',
> + b'adorable',
> + b'adventurous',
> + b'affable',
> + b'affectionate',
> + b'agreeable',
> + b'alert',
> + b'alive',
> + b'alluring',
> + b'amazing',
> + b'ambiguous',
> + b'ambitious',
> + b'amiable',
> + b'amicable',
> + b'amused',
> + b'amusing',
> + b'ancient',
> + b'animated',
> + b'apricot',
> + b'aquatic',
> + b'arctic',
> + b'arenaceous',
> + b'aromatic',
> + b'aspiring',
> + b'assiduous',
> + b'assorted',
> + b'astonishing',
> + b'attractive',
> + b'auspicious',
> + b'automatic',
> + b'available',
> + b'average',
> + b'awake',
> + b'aware',
> + b'awesome',
> + b'axiomatic',
> + b'bashful',
> + b'bawdy',
> + b'beautiful',
> + b'beefy',
> + b'befitting',
> + b'beneficial',
> + b'benevolent',
> + b'bent',
> + b'best',
> + b'better',
> + b'bewildered',
> + b'bewitching',
> + b'big',
> + b'billowy',
> + b'bizarre',
> + b'black',
> + b'blithe',
> + b'blue',
> + b'blushing',
> + b'bouncy',
> + b'boundless',
> + b'brainy',
> + b'brash',
> + b'brave',
> + b'brawny',
> + b'brazen',
> + b'breezy',
> + b'brief',
> + b'bright',
> + b'brilliant',
> + b'broad',
> + b'brown',
> + b'bucolic',
> + b'bulky',
> + b'bumpy',
> + b'burgundy',
> + b'burly',
> + b'bustling',
> + b'busy',
> + b'calm',
> + b'capable',
> + b'capricious',
> + b'captivating',
> + b'carefree',
> + b'careful',
> + b'caring',
> + b'carrot',
> + b'ceaseless',
> + b'cerise',
> + b'certain',
> + b'challenging',
> + b'changeable',
> + b'charming',
> + b'cheerful',
> + b'chief',
> + b'chilly',
> + b'chipper',
> + b'classy',
> + b'clean',
> + b'clear',
> + b'clever',
> + b'cloudy',
> + b'coherent',
> + b'colorful',
> + b'colossal',
> + b'comfortable',
> + b'common',
> + b'communicative',
> + b'compassionate',
> + b'complete',
> + b'complex',
> + b'compulsive',
> + b'confused',
> + b'conscientious',
> + b'conscious',
> + b'conservative',
> + b'considerate',
> + b'convivial',
> + b'cooing',
> + b'cool',
> + b'cooperative',
> + b'coordinated',
> + b'courageous',
> + b'courteous',
> + b'crazy',
> + b'creative',
> + b'crispy',
> + b'crooked',
> + b'crowded',
> + b'cuddly',
> + b'cultured',
> + b'cunning',
> + b'curious',
> + b'curly',
> + b'curved',
> + b'curvy',
> + b'cut',
> + b'cute',
> + b'daily',
> + b'damp',
> + b'dapper',
> + b'dashing',
> + b'dazzling',
> + b'dear',
> + b'debonair',
> + b'decisive',
> + b'decorous',
> + b'deep',
> + b'defiant',
> + b'delicate',
> + b'delicious',
> + b'delighted',
> + b'delightful',
> + b'delirious',
> + b'descriptive',
> + b'detached',
> + b'detailed',
> + b'determined',
> + b'different',
> + b'diligent',
> + b'diminutive',
> + b'diplomatic',
> + b'discreet',
> + b'distinct',
> + b'distinctive',
> + b'dramatic',
> + b'dry',
> + b'dynamic',
> + b'dynamite',
> + b'eager',
> + b'early',
> + b'earthy',
> + b'easy',
> + b'easygoing',
> + b'eatable',
> + b'economic',
> + b'ecstatic',
> + b'educated',
> + b'efficacious',
> + b'efficient',
> + b'effortless',
> + b'eight',
> + b'elastic',
> + b'elated',
> + b'electric',
> + b'elegant',
> + b'elfin',
> + b'elite',
> + b'eminent',
> + b'emotional',
> + b'enchanted',
> + b'enchanting',
> + b'encouraging',
> + b'endless',
> + b'energetic',
> + b'enormous',
> + b'entertaining',
> + b'enthusiastic',
> + b'envious',
> + b'epicurean',
> + b'equable',
> + b'equal',
> + b'eternal',
> + b'ethereal',
> + b'evanescent',
> + b'even',
> + b'excellent',
> + b'excited',
> + b'exciting',
> + b'exclusive',
> + b'exotic',
> + b'expensive',
> + b'exquisite',
> + b'extroverted',
> + b'exuberant',
> + b'exultant',
> + b'fabulous',
> + b'fair',
> + b'faithful',
> + b'familiar',
> + b'famous',
> + b'fancy',
> + b'fantastic',
> + b'far',
> + b'fascinated',
> + b'fast',
> + b'fearless',
> + b'female',
> + b'fertile',
> + b'festive',
> + b'few',
> + b'fine',
> + b'first',
> + b'five',
> + b'fixed',
> + b'flamboyant',
> + b'flashy',
> + b'flat',
> + b'flawless',
> + b'flirtatious',
> + b'florid',
> + b'flowery',
> + b'fluffy',
> + b'fluttering',
> + b'foamy',
> + b'foolish',
> + b'foregoing',
> + b'fortunate',
> + b'four',
> + b'frank',
> + b'free',
> + b'frequent',
> + b'fresh',
> + b'friendly',
> + b'full',
> + b'functional',
> + b'funny',
> + b'furry',
> + b'future',
> + b'futuristic',
> + b'fuzzy',
> + b'gabby',
> + b'gainful',
> + b'garrulous',
> + b'general',
> + b'generous',
> + b'gentle',
> + b'giant',
> + b'giddy',
> + b'gifted',
> + b'gigantic',
> + b'gilded',
> + b'glamorous',
> + b'gleaming',
> + b'glorious',
> + b'glossy',
> + b'glowing',
> + b'godly',
> + b'good',
> + b'goofy',
> + b'gorgeous',
> + b'graceful',
> + b'grandiose',
> + b'grateful',
> + b'gratis',
> + b'gray',
> + b'great',
> + b'green',
> + b'gregarious',
> + b'grey',
> + b'groovy',
> + b'guiltless',
> + b'gusty',
> + b'guttural',
> + b'habitual',
> + b'half',
> + b'hallowed',
> + b'halting',
> + b'handsome',
> + b'happy',
> + b'hard',
> + b'hardworking',
> + b'harmonious',
> + b'heady',
> + b'healthy',
> + b'heavenly',
> + b'helpful',
> + b'hilarious',
> + b'historical',
> + b'holistic',
> + b'hollow',
> + b'honest',
> + b'honorable',
> + b'hopeful',
> + b'hospitable',
> + b'hot',
> + b'huge',
> + b'humorous',
> + b'hungry',
> + b'hushed',
> + b'hypnotic',
> + b'illustrious',
> + b'imaginary',
> + b'imaginative',
> + b'immense',
> + b'imminent',
> + b'impartial',
> + b'important',
> + b'imported',
> + b'impossible',
> + b'incandescent',
> + b'inconclusive',
> + b'incredible',
> + b'independent',
> + b'industrious',
> + b'inexpensive',
> + b'innate',
> + b'innocent',
> + b'inquisitive',
> + b'instinctive',
> + b'intellectual',
> + b'intelligent',
> + b'intense',
> + b'interesting',
> + b'internal',
> + b'intuitive',
> + b'inventive',
> + b'invincible',
> + b'jazzy',
> + b'jolly',
> + b'joyful',
> + b'joyous',
> + b'judicious',
> + b'juicy',
> + b'jumpy',
> + b'keen',
> + b'kind',
> + b'kindhearted',
> + b'kindly',
> + b'knotty',
> + b'knowing',
> + b'knowledgeable',
> + b'known',
> + b'laconic',
> + b'large',
> + b'lavish',
> + b'lean',
> + b'learned',
> + b'left',
> + b'legal',
> + b'level',
> + b'light',
> + b'likeable',
> + b'literate',
> + b'little',
> + b'lively',
> + b'living',
> + b'long',
> + b'longing',
> + b'loud',
> + b'lovely',
> + b'loving',
> + b'loyal',
> + b'lucky',
> + b'luminous',
> + b'lush',
> + b'luxuriant',
> + b'luxurious',
> + b'lyrical',
> + b'magenta',
> + b'magical',
> + b'magnificent',
> + b'majestic',
> + b'male',
> + b'mammoth',
> + b'many',
> + b'marvelous',
> + b'massive',
> + b'material',
> + b'mature',
> + b'meandering',
> + b'meaty',
> + b'medical',
> + b'mellow',
> + b'melodic',
> + b'melted',
> + b'merciful',
> + b'mighty',
> + b'miniature',
> + b'miniscule',
> + b'minor',
> + b'minute',
> + b'misty',
> + b'modern',
> + b'modest',
> + b'momentous',
> + b'motionless',
> + b'mountainous',
> + b'mute',
> + b'mysterious',
> + b'narrow',
> + b'natural',
> + b'near',
> + b'neat',
> + b'nebulous',
> + b'necessary',
> + b'neighborly',
> + b'new',
> + b'next',
> + b'nice',
> + b'nifty',
> + b'nimble',
> + b'nine',
> + b'nippy',
> + b'noiseless',
> + b'noisy',
> + b'nonchalant',
> + b'normal',
> + b'numberless',
> + b'numerous',
> + b'nutritious',
> + b'obedient',
> + b'observant',
> + b'obtainable',
> + b'oceanic',
> + b'omniscient',
> + b'one',
> + b'open',
> + b'opposite',
> + b'optimal',
> + b'optimistic',
> + b'opulent',
> + b'orange',
> + b'ordinary',
> + b'organic',
> + b'outgoing',
> + b'outrageous',
> + b'outstanding',
> + b'oval',
> + b'overjoyed',
> + b'overt',
> + b'palatial',
> + b'panoramic',
> + b'parallel',
> + b'passionate',
> + b'past',
> + b'pastoral',
> + b'patient',
> + b'peaceful',
> + b'perfect',
> + b'periodic',
> + b'permissible',
> + b'perpetual',
> + b'persistent',
> + b'petite',
> + b'philosophical',
> + b'physical',
> + b'picturesque',
> + b'pink',
> + b'pioneering',
> + b'piquant',
> + b'plausible',
> + b'pleasant',
> + b'plucky',
> + b'poised',
> + b'polite',
> + b'possible',
> + b'powerful',
> + b'practical',
> + b'precious',
> + b'premium',
> + b'present',
> + b'pretty',
> + b'previous',
> + b'private',
> + b'probable',
> + b'productive',
> + b'profound',
> + b'profuse',
> + b'protective',
> + b'proud',
> + b'psychedelic',
> + b'public',
> + b'pumped',
> + b'purple',
> + b'purring',
> + b'puzzled',
> + b'puzzling',
> + b'quaint',
> + b'quick',
> + b'quicker',
> + b'quickest',
> + b'quiet',
> + b'quirky',
> + b'quixotic',
> + b'quizzical',
> + b'rainy',
> + b'rapid',
> + b'rare',
> + b'rational',
> + b'ready',
> + b'real',
> + b'rebel',
> + b'receptive',
> + b'red',
> + b'reflective',
> + b'regular',
> + b'relaxed',
> + b'reliable',
> + b'relieved',
> + b'remarkable',
> + b'reminiscent',
> + b'reserved',
> + b'resolute',
> + b'resonant',
> + b'resourceful',
> + b'responsible',
> + b'rich',
> + b'ridiculous',
> + b'right',
> + b'rightful',
> + b'ripe',
> + b'ritzy',
> + b'roasted',
> + b'robust',
> + b'romantic',
> + b'roomy',
> + b'round',
> + b'royal',
> + b'ruddy',
> + b'rural',
> + b'rustic',
> + b'sable',
> + b'safe',
> + b'salty',
> + b'same',
> + b'satisfying',
> + b'savory',
> + b'scientific',
> + b'scintillating',
> + b'scrumptious',
> + b'second',
> + b'secret',
> + b'secretive',
> + b'seemly',
> + b'selective',
> + b'sensible',
> + b'separate',
> + b'shaggy',
> + b'shaky',
> + b'shining',
> + b'shiny',
> + b'short',
> + b'shy',
> + b'silent',
> + b'silky',
> + b'silly',
> + b'simple',
> + b'simplistic',
> + b'sincere',
> + b'six',
> + b'sizzling',
> + b'skillful',
> + b'sleepy',
> + b'slick',
> + b'slim',
> + b'smart',
> + b'smiling',
> + b'smooth',
> + b'soaring',
> + b'sociable',
> + b'soft',
> + b'solid',
> + b'sophisticated',
> + b'sparkling',
> + b'special',
> + b'spectacular',
> + b'speedy',
> + b'spicy',
> + b'spiffy',
> + b'spiritual',
> + b'splendid',
> + b'spooky',
> + b'spotless',
> + b'spotted',
> + b'square',
> + b'standing',
> + b'statuesque',
> + b'steadfast',
> + b'steady',
> + b'steep',
> + b'stimulating',
> + b'straight',
> + b'straightforward',
> + b'striking',
> + b'striped',
> + b'strong',
> + b'stunning',
> + b'stupendous',
> + b'sturdy',
> + b'subsequent',
> + b'substantial',
> + b'subtle',
> + b'successful',
> + b'succinct',
> + b'sudden',
> + b'super',
> + b'superb',
> + b'supreme',
> + b'swanky',
> + b'sweet',
> + b'swift',
> + b'sympathetic',
> + b'synonymous',
> + b'talented',
> + b'tall',
> + b'tame',
> + b'tan',
> + b'tangible',
> + b'tangy',
> + b'tasteful',
> + b'tasty',
> + b'telling',
> + b'temporary',
> + b'tempting',
> + b'ten',
> + b'tender',
> + b'terrific',
> + b'tested',
> + b'thankful',
> + b'therapeutic',
> + b'thin',
> + b'thinkable',
> + b'third',
> + b'thoughtful',
> + b'three',
> + b'thrifty',
> + b'tidy',
> + b'tiny',
> + b'toothsome',
> + b'towering',
> + b'tranquil',
> + b'tremendous',
> + b'tricky',
> + b'true',
> + b'truthful',
> + b'two',
> + b'typical',
> + b'ubiquitous',
> + b'ultra',
> + b'unassuming',
> + b'unbiased',
> + b'uncovered',
> + b'understanding',
> + b'understood',
> + b'unequaled',
> + b'unique',
> + b'unusual',
> + b'unwritten',
> + b'upbeat',
> + b'useful',
> + b'utopian',
> + b'utter',
> + b'uttermost',
> + b'valuable',
> + b'various',
> + b'vast',
> + b'verdant',
> + b'vermilion',
> + b'versatile',
> + b'versed',
> + b'victorious',
> + b'vigorous',
> + b'violet',
> + b'vivacious',
> + b'voiceless',
> + b'voluptuous',
> + b'wacky',
> + b'waiting',
> + b'wakeful',
> + b'wandering',
> + b'warm',
> + b'warmhearted',
> + b'wealthy',
> + b'whimsical',
> + b'whispering',
> + b'white',
> + b'whole',
> + b'wholesale',
> + b'whopping',
> + b'wide',
> + b'wiggly',
> + b'wild',
> + b'willing',
> + b'windy',
> + b'winsome',
> + b'wiry',
> + b'wise',
> + b'wistful',
> + b'witty',
> + b'womanly',
> + b'wonderful',
> + b'workable',
> + b'young',
> + b'youthful',
> + b'yummy',
> + b'zany',
> + b'zealous',
> + b'zesty',
> + b'zippy'
> ]
>
> def randomtopicname(ui):
> - if ui.configint("devel", "randomseed"):
> - random.seed(ui.configint("devel", "randomseed"))
> - return random.choice(adjectives) + "-" + random.choice(animals)
> + if ui.configint(b"devel", b"randomseed"):
> + random.seed(ui.configint(b"devel", b"randomseed"))
> + return random.choice(adjectives) + b"-" + random.choice(animals)
> diff --git a/hgext3rd/topic/revset.py b/hgext3rd/topic/revset.py
> --- a/hgext3rd/topic/revset.py
> +++ b/hgext3rd/topic/revset.py
> @@ -24,11 +24,11 @@
> revsetpredicate = registrar.revsetpredicate()
>
> def getstringstrict(x, err):
> - if x and x[0] == 'string':
> + if x and x[0] == b'string':
> return x[1]
> raise error.ParseError(err)
>
> - at revsetpredicate('topic([string or set])')
> + at revsetpredicate(b'topic([string or set])')
> def topicset(repo, subset, x):
> """All changesets with the specified topic or the topics of the given
> changesets. Without the argument, all changesets with any topic specified.
> @@ -36,7 +36,7 @@
> If `string` starts with `re:` the remainder of the name is treated
> as a regular expression.
> """
> - args = revset.getargs(x, 0, 1, 'topic takes one or no arguments')
> + args = revset.getargs(x, 0, 1, b'topic takes one or no arguments')
>
> mutable = revset._notpublic(repo, revset.fullreposet(repo), ())
>
> @@ -44,15 +44,15 @@
> return (subset & mutable).filter(lambda r: bool(repo[r].topic()))
>
> try:
> - topic = getstringstrict(args[0], '')
> + topic = getstringstrict(args[0], b'')
> except error.ParseError:
> # not a string, but another revset
> pass
> else:
> kind, pattern, matcher = mkmatcher(topic)
>
> - if topic.startswith('literal:') and pattern not in repo.topics:
> - raise error.RepoLookupError("topic '%s' does not exist" % pattern)
> + if topic.startswith(b'literal:') and pattern not in repo.topics:
> + raise error.RepoLookupError(b"topic '%s' does not exist" % pattern)
>
> def matches(r):
> topic = repo[r].topic()
> @@ -64,7 +64,7 @@
>
> s = revset.getset(repo, revset.fullreposet(repo), x)
> topics = {repo[r].topic() for r in s}
> - topics.discard('')
> + topics.discard(b'')
>
> def matches(r):
> if r in s:
> @@ -76,20 +76,20 @@
>
> return (subset & mutable).filter(matches)
>
> - at revsetpredicate('ngtip([branch])')
> + at revsetpredicate(b'ngtip([branch])')
> def ngtipset(repo, subset, x):
> """The untopiced tip.
>
> Name is horrible so that people change it.
> """
> - args = revset.getargs(x, 1, 1, 'ngtip takes one argument')
> + args = revset.getargs(x, 1, 1, b'ngtip takes one argument')
> # match a specific topic
> - branch = revset.getstring(args[0], 'ngtip requires a string')
> - if branch == '.':
> - branch = repo['.'].branch()
> + branch = revset.getstring(args[0], b'ngtip requires a string')
> + if branch == b'.':
> + branch = repo[b'.'].branch()
> return subset & revset.baseset(destination.ngtip(repo, branch))
>
> - at revsetpredicate('stack()')
> + at revsetpredicate(b'stack()')
> def stackset(repo, subset, x):
> """All relevant changes in the current topic,
>
> @@ -97,7 +97,7 @@
> unstable changeset after there future parent (as if evolve where already
> run).
> """
> - err = 'stack takes no arguments, it works on current topic'
> + err = b'stack takes no arguments, it works on current topic'
> revset.getargs(x, 0, 0, err)
> topic = None
> branch = None
> @@ -107,7 +107,7 @@
> branch = repo[None].branch()
> return revset.baseset(stack.stack(repo, branch=branch, topic=topic)[1:]) & subset
>
> -if util.safehasattr(revset, 'subscriptrelations'):
> +if util.safehasattr(revset, b'subscriptrelations'):
> def stackrel(repo, subset, x, rel, z, order):
> """This is a revset-flavored implementation of stack aliases.
>
> @@ -120,8 +120,8 @@
> if isinstance(z, tuple):
> a, b = revset.getintrange(
> z,
> - 'relation subscript must be an integer or a range',
> - 'relation subscript bounds must be integers',
> + b'relation subscript must be an integer or a range',
> + b'relation subscript bounds must be integers',
> None, None)
> else:
> a = b = z
> @@ -159,12 +159,12 @@
>
> return subset & revset.baseset(revs)
>
> - revset.subscriptrelations['stack'] = stackrel
> - revset.subscriptrelations['s'] = stackrel
> + revset.subscriptrelations[b'stack'] = stackrel
> + revset.subscriptrelations[b's'] = stackrel
>
> def topicrel(repo, subset, x, *args):
> subset &= topicset(repo, subset, x)
> return revset.generationsrel(repo, subset, x, *args)
>
> - revset.subscriptrelations['topic'] = topicrel
> - revset.subscriptrelations['t'] = topicrel
> + revset.subscriptrelations[b'topic'] = topicrel
> + revset.subscriptrelations[b't'] = topicrel
> diff --git a/hgext3rd/topic/stack.py b/hgext3rd/topic/stack.py
> --- a/hgext3rd/topic/stack.py
> +++ b/hgext3rd/topic/stack.py
> @@ -25,7 +25,7 @@
> username = None
> if user:
> # user is of form "abc <abc at xyz.com>"
> - username = user.split('<')[0]
> + username = user.split(b'<')[0]
> if not username:
> # assuming user is of form "<abc at xyz.com>"
> if len(user) > 1:
> @@ -44,10 +44,10 @@
> """
> phasesets = repo._phasecache._phasesets
> if not phasesets or None in phasesets[phases.draft:]:
> - return repo.revs('(not public()) - obsolete()')
> + return repo.revs(b'(not public()) - obsolete()')
>
> result = set.union(*phasesets[phases.draft:])
> - result -= obsolete.getrevs(repo, 'obsolete')
> + result -= obsolete.getrevs(repo, b'obsolete')
> return result
>
> class stack(object):
> @@ -62,13 +62,13 @@
> subset = _stackcandidates(repo)
>
> if topic is not None and branch is not None:
> - raise error.ProgrammingError('both branch and topic specified (not defined yet)')
> + raise error.ProgrammingError(b'both branch and topic specified (not defined yet)')
> elif topic is not None:
> - trevs = repo.revs("%ld and topic(%s)", subset, topic)
> + trevs = repo.revs(b"%ld and topic(%s)", subset, topic)
> elif branch is not None:
> - trevs = repo.revs("%ld and branch(%s) - topic()", subset, branch)
> + trevs = repo.revs(b"%ld and branch(%s) - topic()", subset, branch)
> else:
> - raise error.ProgrammingError('neither branch and topic specified (not defined yet)')
> + raise error.ProgrammingError(b'neither branch and topic specified (not defined yet)')
> self._revs = trevs
>
> def __iter__(self):
> @@ -168,7 +168,7 @@
> if revs:
> pt1 = self._repo[revs[0]].p1()
> else:
> - pt1 = self._repo['.']
> + pt1 = self._repo[b'.']
>
> if pt1.obsolete():
> pt1 = self._repo[_singlesuccessor(self._repo, pt1)]
> @@ -196,15 +196,15 @@
> if revs:
> minroot = [min(r for r in revs if not deps[r])]
> try:
> - dest = destutil.destmerge(self._repo, action='rebase',
> + dest = destutil.destmerge(self._repo, action=b'rebase',
> sourceset=minroot,
> onheadcheck=False)
> - return len(self._repo.revs("only(%d, %ld)", dest, minroot))
> + return len(self._repo.revs(b"only(%d, %ld)", dest, minroot))
> except error.NoMergeDestAbort:
> return 0
> except error.ManyMergeDestAbort as exc:
> # XXX we should make it easier for upstream to provide the information
> - self.behinderror = str(exc).split('-', 1)[0].rstrip()
> + self.behinderror = str(exc).split(b'-', 1)[0].rstrip()
> return -1
> return 0
>
> @@ -216,68 +216,68 @@
> return branches
>
> def labelsgen(prefix, parts):
> - fmt = prefix + '.%s'
> - return prefix + ' ' + ' '.join(fmt % p.replace(' ', '-') for p in parts)
> + fmt = prefix + b'.%s'
> + return prefix + b' ' + b' '.join(fmt % p.replace(b' ', b'-') for p in parts)
>
> def showstack(ui, repo, branch=None, topic=None, opts=None):
> if opts is None:
> opts = {}
>
> if topic is not None and branch is not None:
> - msg = 'both branch and topic specified [%s]{%s}(not defined yet)'
> + msg = b'both branch and topic specified [%s]{%s}(not defined yet)'
> msg %= (branch, topic)
> raise error.ProgrammingError(msg)
> elif topic is not None:
> - prefix = 's'
> + prefix = b's'
> if topic not in repo.topics:
> - raise error.Abort(_('cannot resolve "%s": no such topic found') % topic)
> + raise error.Abort(_(b'cannot resolve "%s": no such topic found') % topic)
> elif branch is not None:
> - prefix = 's'
> + prefix = b's'
> else:
> - raise error.ProgrammingError('neither branch and topic specified (not defined yet)')
> + raise error.ProgrammingError(b'neither branch and topic specified (not defined yet)')
>
> - fm = ui.formatter('topicstack', opts)
> + fm = ui.formatter(b'topicstack', opts)
> prev = None
> entries = []
> idxmap = {}
>
> - label = 'topic'
> + label = b'topic'
> if topic == repo.currenttopic:
> - label = 'topic.active'
> + label = b'topic.active'
>
> st = stack(repo, branch, topic)
> if topic is not None:
> - fm.plain(_('### topic: %s')
> + fm.plain(_(b'### topic: %s')
> % ui.label(topic, label),
> - label='stack.summary.topic')
> + label=b'stack.summary.topic')
>
> if 1 < len(st.heads):
> - fm.plain(' (')
> - fm.plain('%d heads' % len(st.heads),
> - label='stack.summary.headcount.multiple')
> - fm.plain(')')
> - fm.plain('\n')
> - fm.plain(_('### target: %s (branch)')
> - % '+'.join(st.branches), # XXX handle multi branches
> - label='stack.summary.branches')
> + fm.plain(b' (')
> + fm.plain(b'%d heads' % len(st.heads),
> + label=b'stack.summary.headcount.multiple')
> + fm.plain(b')')
> + fm.plain(b'\n')
> + fm.plain(_(b'### target: %s (branch)')
> + % b'+'.join(st.branches), # XXX handle multi branches
> + label=b'stack.summary.branches')
> if topic is None:
> if 1 < len(st.heads):
> - fm.plain(' (')
> - fm.plain('%d heads' % len(st.heads),
> - label='stack.summary.headcount.multiple')
> - fm.plain(')')
> + fm.plain(b' (')
> + fm.plain(b'%d heads' % len(st.heads),
> + label=b'stack.summary.headcount.multiple')
> + fm.plain(b')')
> else:
> if st.behindcount == -1:
> - fm.plain(', ')
> - fm.plain('ambiguous rebase destination - %s' % st.behinderror,
> - label='stack.summary.behinderror')
> + fm.plain(b', ')
> + fm.plain(b'ambiguous rebase destination - %s' % st.behinderror,
> + label=b'stack.summary.behinderror')
> elif st.behindcount:
> - fm.plain(', ')
> - fm.plain('%d behind' % st.behindcount, label='stack.summary.behindcount')
> - fm.plain('\n')
> + fm.plain(b', ')
> + fm.plain(b'%d behind' % st.behindcount, label=b'stack.summary.behindcount')
> + fm.plain(b'\n')
>
> if not st:
> - fm.plain(_("(stack is empty)\n"))
> + fm.plain(_(b"(stack is empty)\n"))
>
> st = stack(repo, branch=branch, topic=topic)
> for idx, r in enumerate(st, 0):
> @@ -316,40 +316,40 @@
>
> symbol = None
> states = []
> - if opts.get('children'):
> - expr = 'children(%d) and merge() - %ld'
> + if opts.get(b'children'):
> + expr = b'children(%d) and merge() - %ld'
> revisions = repo.revs(expr, ctx.rev(), st._revs)
> if len(revisions) > 0:
> - states.append('external-children')
> + states.append(b'external-children')
>
> if ctx.orphan():
> - symbol = '$'
> - states.append('orphan')
> + symbol = b'$'
> + states.append(b'orphan')
>
> if ctx.contentdivergent():
> - symbol = '$'
> - states.append('content divergent')
> + symbol = b'$'
> + states.append(b'content divergent')
>
> if ctx.phasedivergent():
> - symbol = '$'
> - states.append('phase divergent')
> + symbol = b'$'
> + states.append(b'phase divergent')
>
> - iscurrentrevision = repo.revs('%d and parents()', ctx.rev())
> + iscurrentrevision = repo.revs(b'%d and parents()', ctx.rev())
> if iscurrentrevision:
> - symbol = '@'
> - states.append('current')
> + symbol = b'@'
> + states.append(b'current')
>
> if not isentry:
> - symbol = '^'
> + symbol = b'^'
> # "base" is kind of a "ghost" entry
> - states.append('base')
> + states.append(b'base')
>
> # none of the above if statments get executed
> if not symbol:
> - symbol = ':'
> + symbol = b':'
>
> if not states:
> - states.append('clean')
> + states.append(b'clean')
>
> states.sort()
>
> @@ -367,22 +367,22 @@
> spacewidth = 2 + 40
> # s# alias width
> spacewidth += 2
> - fm.plain(' ' * spacewidth)
> + fm.plain(b' ' * spacewidth)
> else:
> - fm.write('stack_index', '%s%%d' % prefix, idx,
> - label=labelsgen('stack.index', states))
> + fm.write(b'stack_index', b'%s%%d' % prefix, idx,
> + label=labelsgen(b'stack.index', states))
> if ui.verbose:
> - fm.write('node', '(%s)', fm.hexfunc(ctx.node()),
> - label=labelsgen('stack.shortnode', states))
> + fm.write(b'node', b'(%s)', fm.hexfunc(ctx.node()),
> + label=labelsgen(b'stack.shortnode', states))
> else:
> fm.data(node=fm.hexfunc(ctx.node()))
> - fm.write('symbol', '%s', symbol,
> - label=labelsgen('stack.state', states))
> - fm.plain(' ')
> - fm.write('desc', '%s', ctx.description().splitlines()[0],
> - label=labelsgen('stack.desc', states))
> - fm.condwrite(states != ['clean'] and idx is not None, 'state',
> - ' (%s)', fm.formatlist(states, 'stack.state'),
> - label=labelsgen('stack.state', states))
> - fm.plain('\n')
> + fm.write(b'symbol', b'%s', symbol,
> + label=labelsgen(b'stack.state', states))
> + fm.plain(b' ')
> + fm.write(b'desc', b'%s', ctx.description().splitlines()[0],
> + label=labelsgen(b'stack.desc', states))
> + fm.condwrite(states != [b'clean'] and idx is not None, b'state',
> + b' (%s)', fm.formatlist(states, b'stack.state'),
> + label=labelsgen(b'stack.state', states))
> + fm.plain(b'\n')
> fm.end()
> diff --git a/hgext3rd/topic/topicmap.py b/hgext3rd/topic/topicmap.py
> --- a/hgext3rd/topic/topicmap.py
> +++ b/hgext3rd/topic/topicmap.py
> @@ -16,22 +16,22 @@
> common,
> )
>
> -basefilter = set(['base', 'immutable'])
> +basefilter = set([b'base', b'immutable'])
> def topicfilter(name):
> """return a "topic" version of a filter level"""
> if name in basefilter:
> return name
> elif name is None:
> return None
> - elif name.endswith('-topic'):
> + elif name.endswith(b'-topic'):
> return name
> else:
> - return name + '-topic'
> + return name + b'-topic'
>
> def istopicfilter(filtername):
> if filtername is None:
> return False
> - return filtername.endswith('-topic')
> + return filtername.endswith(b'-topic')
>
> def gettopicrepo(repo):
> if not common.hastopicext(repo):
> @@ -61,8 +61,8 @@
> if newfilter not in funcmap:
> funcmap[newfilter] = revsfunc
> partialmap[newfilter] = base
> - funcmap['unfiltered-topic'] = lambda repo: frozenset()
> - partialmap['unfiltered-topic'] = 'visible-topic'
> + funcmap[b'unfiltered-topic'] = lambda repo: frozenset()
> + partialmap[b'unfiltered-topic'] = b'visible-topic'
>
> def _phaseshash(repo, maxrev):
> """uniq ID for a phase matching a set of rev"""
> @@ -80,7 +80,7 @@
> if revs:
> s = hashlib.sha1()
> for rev in revs:
> - s.update('%s;' % rev)
> + s.update(b'%s;' % rev)
> key = s.digest()
> return key
>
> @@ -88,8 +88,8 @@
> """call at uisetup time to install various wrappings"""
> _setuptopicfilter(ui)
> _wrapbmcache(ui)
> - extensions.wrapfunction(changegroup.cg1unpacker, 'apply', cgapply)
> - extensions.wrapfunction(cmdutil, 'commitstatus', commitstatus)
> + extensions.wrapfunction(changegroup.cg1unpacker, b'apply', cgapply)
> + extensions.wrapfunction(cmdutil, b'commitstatus', commitstatus)
>
> def cgapply(orig, self, repo, *args, **kwargs):
> """make sure a topicmap is used when applying a changegroup"""
> @@ -100,8 +100,8 @@
> # wrap commit status use the topic branch heads
> ctx = repo[node]
> if ctx.topic() and ctx.branch() == branch:
> - subbranch = "%s:%s" % (branch, ctx.topic())
> - bheads = repo.branchheads("%s:%s" % (subbranch, ctx.topic()))
> + subbranch = b"%s:%s" % (branch, ctx.topic())
> + bheads = repo.branchheads(b"%s:%s" % (subbranch, ctx.topic()))
>
> ret = orig(repo, node, branch, bheads=bheads, opts=opts)
>
> @@ -112,10 +112,10 @@
> return ret
> parents = ctx.parents()
>
> - if (not opts.get('amend') and bheads and node not in bheads and not
> + if (not opts.get(b'amend') and bheads and node not in bheads and not
> [x for x in parents if x.node() in bheads and x.branch() == branch]):
> - repo.ui.status(_("(consider using topic for lightweight branches."
> - " See 'hg help topic')\n"))
> + repo.ui.status(_(b"(consider using topic for lightweight branches."
> + b" See 'hg help topic')\n"))
>
> return ret
>
> @@ -133,10 +133,10 @@
> def _wrapupdatebmcachemethod(orig, self, repo):
> # pass in the bound method as the original
> return _wrapupdatebmcache(orig.__get__(self), repo)
> - extensions.wrapfunction(branchmap.BranchMapCache, 'updatecache', _wrapupdatebmcachemethod)
> + extensions.wrapfunction(branchmap.BranchMapCache, b'updatecache', _wrapupdatebmcachemethod)
> except AttributeError:
> # Mercurial 4.8 and before
> - extensions.wrapfunction(branchmap, 'updatecache', _wrapupdatebmcache)
> + extensions.wrapfunction(branchmap, b'updatecache', _wrapupdatebmcache)
>
>
> def _wrapupdatebmcache(orig, repo):
> @@ -170,7 +170,7 @@
>
> def copy(self):
> """return an deep copy of the branchcache object"""
> - if util.safehasattr(self, '_entries'):
> + if util.safehasattr(self, b'_entries'):
> _entries = self._entries
> else:
> # hg <= 4.9 (624d6683c705, b137a6793c51)
> @@ -180,17 +180,17 @@
> new.phaseshash = self.phaseshash
> return new
>
> - def branchtip(self, branch, topic=''):
> + def branchtip(self, branch, topic=b''):
> '''Return the tipmost open head on branch head, otherwise return the
> tipmost closed head on branch.
> Raise KeyError for unknown branch.'''
> if topic:
> - branch = '%s:%s' % (branch, topic)
> + branch = b'%s:%s' % (branch, topic)
> return super(_topiccache, self).branchtip(branch)
>
> - def branchheads(self, branch, closed=False, topic=''):
> + def branchheads(self, branch, closed=False, topic=b''):
> if topic:
> - branch = '%s:%s' % (branch, topic)
> + branch = b'%s:%s' % (branch, topic)
> return super(_topiccache, self).branchheads(branch, closed=closed)
>
> def validfor(self, repo):
> @@ -230,13 +230,13 @@
>
> def branchinfo(r, changelog=None):
> info = oldgetbranchinfo(r)
> - topic = ''
> + topic = b''
> ctx = unfi[r]
> if ctx.mutable():
> topic = ctx.topic()
> branch = info[0]
> if topic:
> - branch = '%s:%s' % (branch, topic)
> + branch = b'%s:%s' % (branch, topic)
> return (branch, info[1])
> try:
> unfi.revbranchcache().branchinfo = branchinfo
> _______________________________________________
> Mercurial-devel mailing list
> Mercurial-devel at mercurial-scm.org
> https://www.mercurial-scm.org/mailman/listinfo/mercurial-devel
>
--
Pierre-Yves David
More information about the Mercurial-devel
mailing list