Mercurial > evolve
changeset 4845:6f5d3f58fbe4
branching: merge stable into default
author | Pierre-Yves David <pierre-yves.david@octobus.net> |
---|---|
date | Tue, 24 Sep 2019 12:42:27 +0200 |
parents | 62b60fc1983d (diff) 2488ec1bd22f (current diff) |
children | 38ce7fe4d3f2 |
files | CHANGELOG hgext3rd/evolve/cmdrewrite.py hgext3rd/evolve/rewriteutil.py hgext3rd/topic/__init__.py |
diffstat | 104 files changed, 4317 insertions(+), 3523 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.hg-format-source Tue Sep 24 12:42:27 2019 +0200 @@ -0,0 +1,1 @@ +{"pattern": "glob:hgext3rd/**/*.py", "tool": "byteify-strings"}
--- a/CHANGELOG Fri Jul 19 16:26:48 2019 +0200 +++ b/CHANGELOG Tue Sep 24 12:42:27 2019 +0200 @@ -1,6 +1,16 @@ Changelog ========= +9.2.0 - in progress +------------------- + + * python3: beta support for Python 3.6+ + (thanks to ludovicchabant, martinvonz and rgomes for their hard work) + * prune: clarify error message when no revision were passed, + * evolve: avoid possible race conditions bu locking earlier + * abort: add support for `evolve` and `pick` to `hg abort` (hg-5.1+) + * rewind: add --keep flag to preserve working copy + 9.1.1 - in progress ------------------- @@ -10,7 +20,6 @@ * stack: make sure to preserve dependencies, fixes certain complex cases * prune: improve documentation for `--pair` - 9.1.0 -- 2019-07-29 -------------------
--- a/MANIFEST.in Fri Jul 19 16:26:48 2019 +0200 +++ b/MANIFEST.in Tue Sep 24 12:42:27 2019 +0200 @@ -1,4 +1,5 @@ exclude contrib +exclude .hg-format-source recursive-exclude contrib * exclude hgext3rd/evolve/hack recursive-exclude hgext3rd/evolve/hack *
--- a/README Fri Jul 19 16:26:48 2019 +0200 +++ b/README Tue Sep 24 12:42:27 2019 +0200 @@ -96,6 +96,15 @@ needed. During the upstreaming process, we can use this clearer picture to clean up the code and upgrade it to an appropriate quality for Mercurial core. +Python 3 support +================ + +Mercurial announced beta support for Python 3 starting with its 5.0 release. +Since 9.1.0, ``evolve`` has beta support for Python 3.6+. + +Support will stay in beta while Mercurial's support for Python 3 remains in +beta and until it is a bit more battle-tested. + How to Contribute ================= @@ -145,3 +154,23 @@ test output change from a changeset in core should adds the following line to their description: CORE-TEST-OUTPUT-UPDATE: <CORE-NODE-ID> + + +Format-source config +==================== + +Format source helps smooth out the pain of merging after auto-formatting. +Follow the instructions for install here: + +.. _`format-source`: https://bitbucket.org/octobus/format-source + +Then update both your global and repo config files:: + + $ hg config -l # add the lines below + [extensions] + formatsource = + + [format-source] + byteify-strings = python3 ~/workspace/octobus/mercurial-devel/contrib/byteify-strings.py --dictiter --treat-as-kwargs kwargs opts commitopts TROUBLES --allow-attr-methods + byteify-strings:mode.input = file + byteify-strings:mode.output = pipe
--- a/docs/test2rst.py Fri Jul 19 16:26:48 2019 +0200 +++ b/docs/test2rst.py Tue Sep 24 12:42:27 2019 +0200 @@ -14,10 +14,10 @@ ''' ignored_patterns = [ - re.compile('^#if'), - re.compile('^#else'), - re.compile('^#endif'), - re.compile('#rest-ignore$'), + re.compile(r'^#if'), + re.compile(r'^#else'), + re.compile(r'^#endif'), + re.compile(r'#rest-ignore$'), ]
--- a/hgext3rd/evolve/__init__.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/__init__.py Tue Sep 24 12:42:27 2019 +0200 @@ -184,7 +184,7 @@ - "markers" the raw list of changesets. """ -evolutionhelptext = """ +evolutionhelptext = b""" Obsolescence markers make it possible to mark changesets that have been deleted or superset in a new version of the changeset. @@ -258,7 +258,7 @@ registrar.templatekeyword # new in hg-3.8 except ImportError: from . import metadata - raise ImportError('evolve needs Mercurial version %s or above' % + raise ImportError(b'evolve needs Mercurial version %s or above' % min(metadata.testedwith.split())) import mercurial @@ -311,19 +311,19 @@ buglink = metadata.buglink # Flags for enabling optional parts of evolve -commandopt = 'allnewcommands' +commandopt = b'allnewcommands' obsexcmsg = utility.obsexcmsg shorttemplate = utility.shorttemplate -colortable = {'evolve.node': 'yellow', - 'evolve.user': 'green', - 'evolve.rev': 'blue', - 'evolve.short_description': '', - 'evolve.date': 'cyan', - 'evolve.current_rev': 'bold', - 'evolve.verb': '', - 'evolve.operation': 'bold' +colortable = {b'evolve.node': b'yellow', + b'evolve.user': b'green', + b'evolve.rev': b'blue', + b'evolve.short_description': b'', + b'evolve.date': b'cyan', + b'evolve.current_rev': b'bold', + b'evolve.verb': b'', + b'evolve.operation': b'bold' } _pack = struct.pack @@ -359,9 +359,9 @@ templatekeyword = eh.templatekeyword # Configuration -eh.configitem('experimental', 'evolutioncommands', []) -eh.configitem('experimental', 'evolution.allnewcommands', None) -eh.configitem('experimental', 'prunestrip', False) +eh.configitem(b'experimental', b'evolutioncommands', []) +eh.configitem(b'experimental', b'evolution.allnewcommands', None) +eh.configitem(b'experimental', b'prunestrip', False) # pre hg 4.0 compat @@ -394,14 +394,14 @@ def _configureoptions(ui, repo): # If no capabilities are specified, enable everything. # This is so existing evolve users don't need to change their config. - evolveopts = repo.ui.configlist('experimental', 'evolution') + evolveopts = repo.ui.configlist(b'experimental', b'evolution') if not evolveopts: - evolveopts = ['all'] - repo.ui.setconfig('experimental', 'evolution', evolveopts, 'evolve') - if obsolete.isenabled(repo, 'exchange'): + evolveopts = [b'all'] + repo.ui.setconfig(b'experimental', b'evolution', evolveopts, b'evolve') + if obsolete.isenabled(repo, b'exchange'): # if no config explicitly set, disable bundle1 - if not isinstance(repo.ui.config('server', 'bundle1'), bytes): - repo.ui.setconfig('server', 'bundle1', False) + if not isinstance(repo.ui.config(b'server', b'bundle1'), bytes): + repo.ui.setconfig(b'server', b'bundle1', False) class trdescrepo(repo.__class__): @@ -419,19 +419,19 @@ # This must be in the same function as the option configuration above to # guarantee it happens after the above configuration, but before the # extsetup functions. - evolvecommands = ui.configlist('experimental', 'evolutioncommands') - evolveopts = ui.configlist('experimental', 'evolution') + evolvecommands = ui.configlist(b'experimental', b'evolutioncommands') + evolveopts = ui.configlist(b'experimental', b'evolution') if evolveopts and (commandopt not in evolveopts - and 'all' not in evolveopts): + and b'all' not in evolveopts): # We build whitelist containing the commands we want to enable whitelist = set() for cmd in evolvecommands: matchingevolvecommands = [e for e in cmdtable.keys() if cmd in e] if not matchingevolvecommands: - raise error.Abort(_('unknown command: %s') % cmd) + raise error.Abort(_(b'unknown command: %s') % cmd) elif len(matchingevolvecommands) > 1: - matchstr = ', '.join(matchingevolvecommands) - msg = _("ambiguous command specification: '%s' matches [%s]") + matchstr = b', '.join(matchingevolvecommands) + msg = _(b"ambiguous command specification: '%s' matches [%s]") raise error.Abort(msg % (cmd, matchstr)) else: whitelist.add(matchingevolvecommands[0]) @@ -462,8 +462,8 @@ @eh.uisetup def setupparentcommand(ui): - _alias, statuscmd = cmdutil.findcmd('status', commands.table) - pstatusopts = [o for o in statuscmd[1] if o[1] != 'rev'] + _alias, statuscmd = cmdutil.findcmd(b'status', commands.table) + pstatusopts = [o for o in statuscmd[1] if o[1] != b'rev'] @eh.command(b'pstatus', pstatusopts) def pstatus(ui, repo, *args, **kwargs): @@ -474,11 +474,11 @@ match the content of the commit that a bare :hg:`amend` will creates. See :hg:`help status` for details.""" - kwargs['rev'] = ['.^'] + kwargs['rev'] = [b'.^'] return statuscmd[0](ui, repo, *args, **kwargs) - _alias, diffcmd = cmdutil.findcmd('diff', commands.table) - pdiffopts = [o for o in diffcmd[1] if o[1] != 'rev'] + _alias, diffcmd = cmdutil.findcmd(b'diff', commands.table) + pdiffopts = [o for o in diffcmd[1] if o[1] != b'rev'] @eh.command(b'pdiff', pdiffopts) def pdiff(ui, repo, *args, **kwargs): @@ -489,15 +489,15 @@ match the content of the commit that a bare :hg:`amend` will creates. See :hg:`help diff` for details.""" - kwargs['rev'] = ['.^'] + kwargs['rev'] = [b'.^'] return diffcmd[0](ui, repo, *args, **kwargs) @eh.uisetup def _installalias(ui): - if ui.config('alias', 'odiff', None) is None: - ui.setconfig('alias', 'odiff', - "diff --hidden --rev 'limit(predecessors(.),1)' --rev .", - 'evolve') + if ui.config(b'alias', b'odiff', None) is None: + ui.setconfig(b'alias', b'odiff', + b"diff --hidden --rev 'limit(predecessors(.),1)' --rev .", + b'evolve') ### Unstable revset symbol @@ -505,11 +505,11 @@ def revsetunstable(repo, subset, x): """Changesets with instabilities. """ - revset.getargs(x, 0, 0, 'unstable takes no arguments') + revset.getargs(x, 0, 0, b'unstable takes no arguments') troubled = set() - troubled.update(getrevs(repo, 'orphan')) - troubled.update(getrevs(repo, 'phasedivergent')) - troubled.update(getrevs(repo, 'contentdivergent')) + troubled.update(getrevs(repo, b'orphan')) + troubled.update(getrevs(repo, b'phasedivergent')) + troubled.update(getrevs(repo, b'contentdivergent')) troubled = revset.baseset(troubled) troubled.sort() # set is non-ordered, enforce order return subset & troubled @@ -617,8 +617,8 @@ def revsetsuspended(repo, subset, x): """Obsolete changesets with non-obsolete descendants. """ - revset.getargs(x, 0, 0, 'suspended takes no arguments') - suspended = revset.baseset(getrevs(repo, 'suspended')) + revset.getargs(x, 0, 0, b'suspended takes no arguments') + suspended = revset.baseset(getrevs(repo, b'suspended')) suspended.sort() return subset & suspended @@ -679,49 +679,49 @@ # This section take care of issue warning to the user when troubles appear def _warnobsoletewc(ui, repo, prevnode=None, wasobs=None): - rev = repo['.'] + rev = repo[b'.'] if not rev.obsolete(): return if rev.node() == prevnode and wasobs: return - msg = _("working directory parent is obsolete! (%s)\n") + msg = _(b"working directory parent is obsolete! (%s)\n") shortnode = node.short(rev.node()) ui.warn(msg % shortnode) # Check that evolve is activated for performance reasons - evolvecommandenabled = any('evolve' in e for e in cmdtable) + evolvecommandenabled = any(b'evolve' in e for e in cmdtable) if ui.quiet or not evolvecommandenabled: return # Show a warning for helping the user to solve the issue reason, successors = obshistory._getobsfateandsuccs(repo, rev.node()) - if reason == 'pruned': - solvemsg = _("use 'hg evolve' to update to its parent successor") - elif reason == 'diverged': - debugcommand = "hg evolve --list --content-divergent" - basemsg = _("%s has diverged, use '%s' to resolve the issue") + if reason == b'pruned': + solvemsg = _(b"use 'hg evolve' to update to its parent successor") + elif reason == b'diverged': + debugcommand = b"hg evolve --list --content-divergent" + basemsg = _(b"%s has diverged, use '%s' to resolve the issue") solvemsg = basemsg % (shortnode, debugcommand) - elif reason == 'superseed': - msg = _("use 'hg evolve' to update to its successor: %s") + elif reason == b'superseed': + msg = _(b"use 'hg evolve' to update to its successor: %s") solvemsg = msg % successors[0] - elif reason == 'superseed_split': - msg = _("use 'hg evolve' to update to its tipmost successor: %s") + elif reason == b'superseed_split': + msg = _(b"use 'hg evolve' to update to its tipmost successor: %s") if len(successors) <= 2: - solvemsg = msg % ", ".join(successors) + solvemsg = msg % b", ".join(successors) else: - firstsuccessors = ", ".join(successors[:2]) + firstsuccessors = b", ".join(successors[:2]) remainingnumber = len(successors) - 2 - successorsmsg = _("%s and %d more") % (firstsuccessors, remainingnumber) + successorsmsg = _(b"%s and %d more") % (firstsuccessors, remainingnumber) solvemsg = msg % successorsmsg else: raise ValueError(reason) - ui.warn("(%s)\n" % solvemsg) + ui.warn(b"(%s)\n" % solvemsg) if util.safehasattr(context, '_filterederror'): # <= hg-4.5 @eh.wrapfunction(context, '_filterederror') @@ -730,36 +730,36 @@ This is extracted in a function to help extensions (eg: evolve) to experiment with various message variants.""" - if repo.filtername.startswith('visible'): + if repo.filtername.startswith(b'visible'): unfilteredrepo = repo.unfiltered() rev = repo[scmutil.revsingle(unfilteredrepo, changeid)] reason, successors = obshistory._getobsfateandsuccs(unfilteredrepo, rev.node()) # Be more precise in case the revision is superseed - if reason == 'superseed': - reason = _("successor: %s") % successors[0] - elif reason == 'superseed_split': + if reason == b'superseed': + reason = _(b"successor: %s") % successors[0] + elif reason == b'superseed_split': if len(successors) <= 2: - reason = _("successors: %s") % ", ".join(successors) + reason = _(b"successors: %s") % b", ".join(successors) else: - firstsuccessors = ", ".join(successors[:2]) + firstsuccessors = b", ".join(successors[:2]) remainingnumber = len(successors) - 2 - successorsmsg = _("%s and %d more") % (firstsuccessors, remainingnumber) - reason = _("successors: %s") % successorsmsg + successorsmsg = _(b"%s and %d more") % (firstsuccessors, remainingnumber) + reason = _(b"successors: %s") % successorsmsg - msg = _("hidden revision '%s'") % changeid - hint = _('use --hidden to access hidden revisions; %s') % reason + msg = _(b"hidden revision '%s'") % changeid + hint = _(b'use --hidden to access hidden revisions; %s') % reason return error.FilteredRepoLookupError(msg, hint=hint) - msg = _("filtered revision '%s' (not in '%s' subset)") + msg = _(b"filtered revision '%s' (not in '%s' subset)") msg %= (changeid, repo.filtername) return error.FilteredRepoLookupError(msg) -@eh.wrapcommand("update") -@eh.wrapcommand("pull") +@eh.wrapcommand(b"update") +@eh.wrapcommand(b"pull") def wrapmayobsoletewc(origfn, ui, repo, *args, **opts): """Warn that the working directory parent is an obsolete changeset""" - ctx = repo['.'] + ctx = repo[b'.'] node = ctx.node() isobs = ctx.obsolete() @@ -774,7 +774,7 @@ lockmod.release(wlock) return res -@eh.wrapcommand("parents") +@eh.wrapcommand(b"parents") def wrapparents(origfn, ui, repo, *args, **opts): res = origfn(ui, repo, *args, **opts) _warnobsoletewc(ui, repo) @@ -787,10 +787,10 @@ try: return orig(repo, *args, **opts) except error.Abort as ex: - hint = _("use 'hg evolve' to get a stable history " - "or --force to ignore warnings") + hint = _(b"use 'hg evolve' to get a stable history " + b"or --force to ignore warnings") if (len(ex.args) >= 1 - and ex.args[0].startswith('push includes ') + and ex.args[0].startswith(b'push includes ') and ex.hint is None): ex.hint = hint raise @@ -799,11 +799,11 @@ evolvestate = state.cmdstate(repo) if evolvestate: # i18n: column positioning for "hg summary" - ui.status(_('evolve: (evolve --continue)\n')) + ui.status(_(b'evolve: (evolve --continue)\n')) @eh.extsetup def obssummarysetup(ui): - cmdutil.summaryhooks.add('evolve', summaryhook) + cmdutil.summaryhooks.add(b'evolve', summaryhook) ##################################################################### ### Old Evolve extension content ### @@ -814,19 +814,19 @@ @eh.uisetup def _installimportobsolete(ui): - entry = cmdutil.findcmd('import', commands.table)[1] - entry[1].append(('', 'obsolete', False, - _('mark the old node as obsoleted by ' - 'the created commit'))) + entry = cmdutil.findcmd(b'import', commands.table)[1] + entry[1].append((b'', b'obsolete', False, + _(b'mark the old node as obsoleted by ' + b'the created commit'))) def _getnodefrompatch(patch, dest): - patchnode = patch.get('nodeid') + patchnode = patch.get(b'nodeid') if patchnode is not None: - dest['node'] = node.bin(patchnode) + dest[b'node'] = node.bin(patchnode) @eh.wrapfunction(mercurial.cmdutil, 'tryimportone') def tryimportone(orig, ui, repo, hunk, parents, opts, *args, **kwargs): - expected = {'node': None} + expected = {b'node': None} if not util.safehasattr(hunk, 'get'): # hg < 4.6 oldextract = patch.extract @@ -843,12 +843,12 @@ _getnodefrompatch(hunk, expected) ret = orig(ui, repo, hunk, parents, opts, *args, **kwargs) created = ret[1] - if (opts['obsolete'] and None not in (created, expected['node']) - and created != expected['node']): - tr = repo.transaction('import-obs') + if (opts[b'obsolete'] and None not in (created, expected[b'node']) + and created != expected[b'node']): + tr = repo.transaction(b'import-obs') try: - metadata = {'user': ui.username()} - repo.obsstore.create(tr, expected['node'], (created,), + metadata = {b'user': ui.username()} + repo.obsstore.create(tr, expected[b'node'], (created,), metadata=metadata) tr.close() finally: @@ -876,62 +876,62 @@ if e is entry: break - synopsis = '(DEPRECATED)' + synopsis = b'(DEPRECATED)' if len(entry) > 2: fn, opts, _syn = entry else: fn, opts, = entry - deprecationwarning = _('%s have been deprecated in favor of %s\n') % ( + deprecationwarning = _(b'%s have been deprecated in favor of %s\n') % ( oldalias, newalias) def newfn(*args, **kwargs): ui = args[0] ui.warn(deprecationwarning) util.checksignature(fn)(*args, **kwargs) - newfn.__doc__ = pycompat.sysstr(deprecationwarning + ' (DEPRECATED)') + newfn.__doc__ = pycompat.sysstr(deprecationwarning + b' (DEPRECATED)') cmdwrapper = eh.command(oldalias, opts, synopsis) cmdwrapper(newfn) @eh.extsetup def deprecatealiases(ui): - _deprecatealias('gup', 'next') - _deprecatealias('gdown', 'previous') + _deprecatealias(b'gup', b'next') + _deprecatealias(b'gdown', b'previous') def _gettopic(ctx): """handle topic fetching with or without the extension""" - return getattr(ctx, 'topic', lambda: '')() + return getattr(ctx, 'topic', lambda: b'')() def _gettopicidx(ctx): """handle topic fetching with or without the extension""" return getattr(ctx, 'topicidx', lambda: None)() def _getcurrenttopic(repo): - return getattr(repo, 'currenttopic', '') + return getattr(repo, 'currenttopic', b'') def _prevupdate(repo, displayer, target, bookmark, dryrun, mergeopt): if dryrun: - repo.ui.write(_('hg update %s;\n') % target) + repo.ui.write(_(b'hg update %s;\n') % target) if bookmark is not None: - repo.ui.write(_('hg bookmark %s -r %s;\n') + repo.ui.write(_(b'hg bookmark %s -r %s;\n') % (bookmark, target)) else: updatecheck = None # --merge is passed, we don't need to care about commands.update.check # config option if mergeopt: - updatecheck = 'none' + updatecheck = b'none' try: ret = hg.updatetotally(repo.ui, repo, target.node(), None, updatecheck=updatecheck) except error.Abort as exc: # replace the hint to mention about --merge option - exc.hint = _('do you want --merge?') + exc.hint = _(b'do you want --merge?') raise if not ret: tr = lock = None try: lock = repo.lock() - tr = repo.transaction('previous') + tr = repo.transaction(b'previous') if bookmark is not None: bmchanges = [(bookmark, target.node())] repo._bookmarks.applychanges(repo, tr, bmchanges) @@ -959,23 +959,23 @@ # issue message for the various case if p1.node() == node.nullid: - repo.ui.warn(_('already at repository root\n')) + repo.ui.warn(_(b'already at repository root\n')) elif not parents and currenttopic: - repo.ui.warn(_('no parent in topic "%s"\n') % currenttopic) - repo.ui.warn(_('(do you want --no-topic)\n')) + repo.ui.warn(_(b'no parent in topic "%s"\n') % currenttopic) + repo.ui.warn(_(b'(do you want --no-topic)\n')) elif len(parents) == 1: target = parents[0] bookmark = None if movebookmark: bookmark = repo._activebookmark else: - header = _("multiple parents, choose one to update:") + header = _(b"multiple parents, choose one to update:") prevs = [p.rev() for p in parents] choosedrev = utility.revselectionprompt(repo.ui, repo, prevs, header) if choosedrev is None: for p in parents: displayer.show(p) - repo.ui.warn(_('multiple parents, explicitly update to one\n')) + repo.ui.warn(_(b'multiple parents, explicitly update to one\n')) else: target = repo[choosedrev] return target, bookmark @@ -1003,13 +1003,13 @@ wkctx = repo[None] wparents = wkctx.parents() if len(wparents) != 1: - raise error.Abort(_('merge in progress')) + raise error.Abort(_(b'merge in progress')) if not mergeopt: # we only skip the check if noconflict is set - if ui.config('commands', 'update.check') == 'noconflict': + if ui.config(b'commands', b'update.check') == b'noconflict': pass else: - cmdutil.bailifchanged(repo, hint=_('do you want --merge?')) + cmdutil.bailifchanged(repo, hint=_(b'do you want --merge?')) topic = not opts.get("no_topic", False) hastopic = bool(_getcurrenttopic(repo)) @@ -1018,16 +1018,16 @@ if topic and hastopic: template = utility.stacktemplate - displayer = compat.changesetdisplayer(ui, repo, {'template': template}) + displayer = compat.changesetdisplayer(ui, repo, {b'template': template}) target, bookmark = _findprevtarget(repo, displayer, opts.get('move_bookmark'), topic) if target is not None: - backup = repo.ui.backupconfig('_internal', 'keep-topic') + backup = repo.ui.backupconfig(b'_internal', b'keep-topic') try: if topic and _getcurrenttopic(repo) != _gettopic(target): - repo.ui.setconfig('_internal', 'keep-topic', 'yes', - source='topic-extension') + repo.ui.setconfig(b'_internal', b'keep-topic', b'yes', + source=b'topic-extension') _prevupdate(repo, displayer, target, bookmark, dryrunopt, mergeopt) finally: @@ -1065,7 +1065,7 @@ wkctx = repo[None] wparents = wkctx.parents() if len(wparents) != 1: - raise error.Abort(_('merge in progress')) + raise error.Abort(_(b'merge in progress')) children = [ctx for ctx in wparents[0].children() if not ctx.obsolete()] topic = _getcurrenttopic(repo) @@ -1076,11 +1076,11 @@ children = [ctx for ctx in children if ctx not in filtered] template = utility.stacktemplate opts['stacktemplate'] = True - displayer = compat.changesetdisplayer(ui, repo, {'template': template}) + displayer = compat.changesetdisplayer(ui, repo, {b'template': template}) # check if we need to evolve while updating to the next child revision needevolve = False - aspchildren = evolvecmd._aspiringchildren(repo, [repo['.'].rev()]) + aspchildren = evolvecmd._aspiringchildren(repo, [repo[b'.'].rev()]) if topic: filtered.update(repo[c] for c in aspchildren if repo[c].topic() != topic) @@ -1100,53 +1100,53 @@ # check if working directory is clean before we evolve the next cset if needevolve and opts['evolve']: - hint = _('use `hg amend`, `hg revert` or `hg shelve`') + hint = _(b'use `hg amend`, `hg revert` or `hg shelve`') cmdutil.bailifchanged(repo, hint=hint) if not (opts['merge'] or (needevolve and opts['evolve'])): # we only skip the check if noconflict is set - if ui.config('commands', 'update.check') == 'noconflict': + if ui.config(b'commands', b'update.check') == b'noconflict': pass else: - cmdutil.bailifchanged(repo, hint=_('do you want --merge?')) + cmdutil.bailifchanged(repo, hint=_(b'do you want --merge?')) if len(children) == 1: c = children[0] return _updatetonext(ui, repo, c, displayer, opts) elif children: - cheader = _("ambiguous next changeset, choose one to update:") + cheader = _(b"ambiguous next changeset, choose one to update:") crevs = [c.rev() for c in children] choosedrev = utility.revselectionprompt(ui, repo, crevs, cheader) if choosedrev is None: - ui.warn(_("ambiguous next changeset:\n")) + ui.warn(_(b"ambiguous next changeset:\n")) for c in children: displayer.show(c) - ui.warn(_("explicitly update to one of them\n")) + ui.warn(_(b"explicitly update to one of them\n")) return 1 else: return _updatetonext(ui, repo, repo[choosedrev], displayer, opts) else: if not opts['evolve'] or not aspchildren: if filtered: - ui.warn(_('no children on topic "%s"\n') % topic) - ui.warn(_('do you want --no-topic\n')) + ui.warn(_(b'no children on topic "%s"\n') % topic) + ui.warn(_(b'do you want --no-topic\n')) else: - ui.warn(_('no children\n')) + ui.warn(_(b'no children\n')) if aspchildren: - msg = _('(%i unstable changesets to be evolved here, ' - 'do you want --evolve?)\n') + msg = _(b'(%i unstable changesets to be evolved here, ' + b'do you want --evolve?)\n') ui.warn(msg % len(aspchildren)) return 1 elif len(aspchildren) > 1: - cheader = _("ambiguous next (unstable) changeset, choose one to" - " evolve and update:") + cheader = _(b"ambiguous next (unstable) changeset, choose one to" + b" evolve and update:") choosedrev = utility.revselectionprompt(ui, repo, aspchildren, cheader) if choosedrev is None: - ui.warn(_("ambiguous next (unstable) changeset:\n")) + ui.warn(_(b"ambiguous next (unstable) changeset:\n")) for c in aspchildren: displayer.show(repo[c]) - ui.warn(_("(run 'hg evolve --rev REV' on one of them)\n")) + ui.warn(_(b"(run 'hg evolve --rev REV' on one of them)\n")) return 1 else: return _nextevolve(ui, repo, repo[choosedrev], opts) @@ -1159,21 +1159,21 @@ """logic for hg next command to evolve and update to an aspiring children""" cmdutil.bailifchanged(repo) - evolvestate = state.cmdstate(repo, opts={'command': 'next', - 'bookmarkchanges': []}) + evolvestate = state.cmdstate(repo, opts={b'command': b'next', + b'bookmarkchanges': []}) with repo.wlock(), repo.lock(): - tr = repo.transaction("evolve") + tr = repo.transaction(b"evolve") with util.acceptintervention(tr): result = evolvecmd._solveone(ui, repo, repo[aspchildren], evolvestate, opts.get('dry_run'), False, - lambda: None, category='orphan', + lambda: None, category=b'orphan', stacktmplt=opts.get('stacktemplate', False)) # making sure a next commit is formed if result[0] and result[1]: - ui.status(_('working directory is now at %s\n') - % ui.label(bytes(repo['.']), 'evolve.node')) + ui.status(_(b'working directory is now at %s\n') + % ui.label(bytes(repo[b'.']), b'evolve.node')) return 0 def _updatetonext(ui, repo, child, displayer, opts): @@ -1182,28 +1182,28 @@ bm = repo._activebookmark shouldmove = opts.get('move_bookmark') and bm is not None if opts.get('dry_run'): - ui.write(_('hg update %s;\n') % child) + ui.write(_(b'hg update %s;\n') % child) if shouldmove: - ui.write(_('hg bookmark %s -r %s;\n') % (bm, child)) + ui.write(_(b'hg bookmark %s -r %s;\n') % (bm, child)) else: updatecheck = None # --merge is passed, we don't need to care about commands.update.check # config option if opts['merge']: - updatecheck = 'none' + updatecheck = b'none' try: ret = hg.updatetotally(ui, repo, child.node(), None, updatecheck=updatecheck) except error.Abort as exc: # replace the hint to mention about --merge option - exc.hint = _('do you want --merge?') + exc.hint = _(b'do you want --merge?') raise if not ret: lock = tr = None try: lock = repo.lock() - tr = repo.transaction('next') + tr = repo.transaction(b'next') if shouldmove: bmchanges = [(bm, child.node())] repo._bookmarks.applychanges(repo, tr, bmchanges) @@ -1216,7 +1216,7 @@ displayer.show(child) return 0 -@eh.wrapcommand('commit') +@eh.wrapcommand(b'commit') def commitwrapper(orig, ui, repo, *arg, **kwargs): tr = None if kwargs.get('amend', False): @@ -1227,17 +1227,17 @@ try: obsoleted = kwargs.get('obsolete', []) if obsoleted: - obsoleted = repo.set('%lr', obsoleted) + obsoleted = repo.set(b'%lr', obsoleted) result = orig(ui, repo, *arg, **kwargs) if not result: # commit succeeded - new = repo['tip'] + new = repo[b'tip'] oldbookmarks = [] markers = [] for old in obsoleted: oldbookmarks.extend(repo.nodebookmarks(old.node())) markers.append((old, (new,))) if markers: - obsolete.createmarkers(repo, markers, operation="amend") + obsolete.createmarkers(repo, markers, operation=b"amend") bmchanges = [] for book in oldbookmarks: bmchanges.append((book, new.node())) @@ -1246,32 +1246,32 @@ wlock = repo.wlock() if not lock: lock = repo.lock() - tr = repo.transaction('commit') + tr = repo.transaction(b'commit') repo._bookmarks.applychanges(repo, tr, bmchanges) tr.close() return result finally: lockmod.release(tr, lock, wlock) -@eh.wrapcommand('strip', extension='strip', opts=[ - ('', 'bundle', None, _("delete the commit entirely and move it to a " - "backup bundle")), +@eh.wrapcommand(b'strip', extension=b'strip', opts=[ + (b'', b'bundle', None, _(b"delete the commit entirely and move it to a " + b"backup bundle")), ]) def stripwrapper(orig, ui, repo, *revs, **kwargs): - if (not ui.configbool('experimental', 'prunestrip') + if (not ui.configbool(b'experimental', b'prunestrip') or kwargs.get('bundle', False)): return orig(ui, repo, *revs, **kwargs) if kwargs.get('force'): - ui.warn(_("warning: --force has no effect during strip with evolve " - "enabled\n")) + ui.warn(_(b"warning: --force has no effect during strip with evolve " + b"enabled\n")) if kwargs.get('no_backup', False): - ui.warn(_("warning: --no-backup has no effect during strips with " - "evolve enabled\n")) + ui.warn(_(b"warning: --no-backup has no effect during strips with " + b"evolve enabled\n")) revs = list(revs) + kwargs.pop('rev', []) revs = set(scmutil.revrange(repo, revs)) - revs = repo.revs("(%ld)::", revs) + revs = repo.revs(b"(%ld)::", revs) kwargs['rev'] = [] kwargs['new'] = [] kwargs['successor'] = [] @@ -1280,9 +1280,9 @@ @eh.extsetup def oldevolveextsetup(ui): - entry = cmdutil.findcmd('commit', commands.table)[1] - entry[1].append(('o', 'obsolete', [], - _("make commit obsolete this revision (DEPRECATED)"))) + entry = cmdutil.findcmd(b'commit', commands.table)[1] + entry[1].append((b'o', b'obsolete', [], + _(b"make commit obsolete this revision (DEPRECATED)"))) @eh.wrapfunction(obsolete, '_checkinvalidmarkers') def _checkinvalidmarkers(orig, markers): @@ -1291,12 +1291,12 @@ Exist as a separated function to allow the evolve extension for a more subtle handling. """ - if 'debugobsconvert' in sys.argv: + if r'debugobsconvert' in sys.argv: return for mark in markers: if node.nullid in mark[1]: - msg = _('bad obsolescence marker detected: invalid successors nullid') - hint = _('You should run `hg debugobsconvert`') + msg = _(b'bad obsolescence marker detected: invalid successors nullid') + hint = _(b'You should run `hg debugobsconvert`') raise error.Abort(msg, hint=hint) @eh.command( @@ -1306,10 +1306,10 @@ def debugobsconvert(ui, repo, new_format): origmarkers = repo.obsstore._all # settle version if new_format == repo.obsstore._version: - msg = _('New format is the same as the old format, not upgrading!') + msg = _(b'New format is the same as the old format, not upgrading!') raise error.Abort(msg) with repo.lock(): - f = repo.svfs('obsstore', 'wb', atomictemp=True) + f = repo.svfs(b'obsstore', b'wb', atomictemp=True) known = set() markers = [] for m in origmarkers: @@ -1322,12 +1322,12 @@ continue known.add(m) markers.append(m) - ui.write(_('Old store is version %d, will rewrite in version %d\n') % ( + ui.write(_(b'Old store is version %d, will rewrite in version %d\n') % ( repo.obsstore._version, new_format)) for data in obsolete.encodemarkers(markers, True, new_format): f.write(data) f.close() - ui.write(_('Done!\n')) + ui.write(_(b'Done!\n')) def _helploader(ui): @@ -1336,55 +1336,57 @@ @eh.uisetup def _setuphelp(ui): for entry in help.helptable: - if entry[0] == "evolution": + if entry[0] == b"evolution": break else: - help.helptable.append((["evolution"], _("Safely Rewriting History"), - _helploader)) + help.helptable.append(([b"evolution"], _(b"Safely Rewriting History"), + _helploader)) help.helptable.sort() evolvestateversion = 0 def _evolvemessage(): - _msg = _('To continue: hg evolve --continue\n' - 'To abort: hg evolve --abort\n' - 'To stop: hg evolve --stop\n' - '(also see `hg help evolve.interrupted`)') + _msg = _(b'To continue: hg evolve --continue\n' + b'To abort: hg evolve --abort\n' + b'To stop: hg evolve --stop\n' + b'(also see `hg help evolve.interrupted`)') return cmdutil._commentlines(_msg) @eh.uisetup def setupevolveunfinished(ui): if not util.safehasattr(cmdutil, 'unfinishedstates'): from mercurial import state as statemod - _msg = _('To continue: hg evolve --continue\n' - 'To abort: hg evolve --abort\n' - 'To stop: hg evolve --stop\n' - '(also see `hg help evolve.interrupted`)') - statemod.addunfinished('evolve', fname='evolvestate', + _msg = _(b'To continue: hg evolve --continue\n' + b'To abort: hg evolve --abort\n' + b'To stop: hg evolve --stop\n' + b'(also see `hg help evolve.interrupted`)') + statemod.addunfinished(b'evolve', fname=b'evolvestate', continueflag=True, stopflag=True, - statushint=_msg) - statemod.addunfinished('pick', fname='pickstate', continueflag=True) + statushint=_msg, + abortfunc=evolvecmd.hgabortevolve) + statemod.addunfinished(b'pick', fname=b'pickstate', continueflag=True, + abortfunc=cmdrewrite.hgabortpick) else: # compat <= hg-5.0 (5f2f6912c9e6) - estate = ('evolvestate', False, False, _('evolve in progress'), - _("use 'hg evolve --continue' or 'hg evolve --abort' to abort")) + estate = (b'evolvestate', False, False, _(b'evolve in progress'), + _(b"use 'hg evolve --continue' or 'hg evolve --abort' to abort")) cmdutil.unfinishedstates.append(estate) - pstate = ('pickstate', False, False, _('pick in progress'), - _("use 'hg pick --continue' or 'hg pick --abort' to abort")) + pstate = (b'pickstate', False, False, _(b'pick in progress'), + _(b"use 'hg pick --continue' or 'hg pick --abort' to abort")) cmdutil.unfinishedstates.append(pstate) - afterresolved = ('evolvestate', _('hg evolve --continue')) - pickresolved = ('pickstate', _('hg pick --continue')) + afterresolved = (b'evolvestate', _(b'hg evolve --continue')) + pickresolved = (b'pickstate', _(b'hg pick --continue')) cmdutil.afterresolvedstates.append(afterresolved) cmdutil.afterresolvedstates.append(pickresolved) if util.safehasattr(cmdutil, 'STATES'): - statedata = ('evolve', cmdutil.fileexistspredicate('evolvestate'), + statedata = (b'evolve', cmdutil.fileexistspredicate(b'evolvestate'), _evolvemessage) cmdutil.STATES = (statedata, ) + cmdutil.STATES @eh.wrapfunction(hg, 'clean') def clean(orig, repo, *args, **kwargs): ret = orig(repo, *args, **kwargs) - util.unlinkpath(repo.vfs.join('evolvestate'), ignoremissing=True) + util.unlinkpath(repo.vfs.join(b'evolvestate'), ignoremissing=True) return ret
--- a/hgext3rd/evolve/cmdrewrite.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/cmdrewrite.py Tue Sep 24 12:42:27 2019 +0200 @@ -67,12 +67,12 @@ return if not compat.isobsnotesupported(): - ui.warn(_("current hg version does not support storing" - " note in obsmarker\n")) + ui.warn(_(b"current hg version does not support storing" + b" note in obsmarker\n")) if len(note) > 255: - raise error.Abort(_("cannot store a note of more than 255 bytes")) - if '\n' in note: - raise error.Abort(_("note cannot contain a newline")) + raise error.Abort(_(b"cannot store a note of more than 255 bytes")) + if b'\n' in note: + raise error.Abort(_(b"note cannot contain a newline")) def _resolveoptions(ui, opts): """modify commit options dict to handle related options @@ -82,7 +82,7 @@ """ # N.B. this is extremely similar to setupheaderopts() in mq.py if not opts.get('date') and opts.get('current_date'): - opts['date'] = '%d %d' % compat.makedate() + opts['date'] = b'%d %d' % compat.makedate() if not opts.get('user') and opts.get('current_user'): opts['user'] = ui.username() @@ -136,26 +136,26 @@ if opts.pop('all', False): # add an include for all include = list(opts.get('include')) - include.append('re:.*') + include.append(b're:.*') edit = opts.pop('edit', False) log = opts.get('logfile') opts['amend'] = True _resolveoptions(ui, opts) - _alias, commitcmd = cmdutil.findcmd('commit', commands.table) + _alias, commitcmd = cmdutil.findcmd(b'commit', commands.table) with repo.wlock(), repo.lock(): if not (edit or opts['message'] or log): - opts['message'] = repo['.'].description() - rewriteutil.precheck(repo, [repo['.'].rev()], action='amend') + opts['message'] = repo[b'.'].description() + rewriteutil.precheck(repo, [repo[b'.'].rev()], action=b'amend') return commitcmd[0](ui, repo, *pats, **opts) def amendpatch(ui, repo, *pats, **opts): """logic for --patch flag of `hg amend` command.""" - with repo.wlock(), repo.lock(), repo.transaction('amend') as tr: + with repo.wlock(), repo.lock(), repo.transaction(b'amend') as tr: cmdutil.bailifchanged(repo) # first get the patch - old = repo['.'] + old = repo[b'.'] p1 = old.p1() - rewriteutil.precheck(repo, [old.rev()], 'amend') + rewriteutil.precheck(repo, [old.rev()], b'amend') diffopts = patch.difffeatureopts(repo.ui, whitespace=True) diffopts.nodates = True diffopts.git = True @@ -168,12 +168,12 @@ fp.write(chunk) newnode = _editandapply(ui, repo, pats, old, p1, fp, diffopts) if newnode == old.node(): - raise error.Abort(_("nothing changed")) + raise error.Abort(_(b"nothing changed")) metadata = {} if opts.get('note'): - metadata['note'] = opts['note'] + metadata[b'note'] = opts['note'] replacements = {old.node(): [newnode]} - scmutil.cleanupnodes(repo, replacements, operation='amend', + scmutil.cleanupnodes(repo, replacements, operation=b'amend', metadata=metadata) phases.retractboundary(repo, tr, old.phase(), [newnode]) hg.updaterepo(repo, newnode, True) @@ -183,13 +183,13 @@ while newnode is None: fp.seek(0) previous_patch = fp.getvalue() - newpatch = ui.edit(fp.getvalue(), old.user(), action="diff") + newpatch = ui.edit(fp.getvalue(), old.user(), action=b"diff") afp = stringio() afp.write(newpatch) if pats: # write rest of the files in the patch - restmatcher = scmutil.match(old, [], opts={'exclude': pats}) + restmatcher = scmutil.match(old, [], opts={b'exclude': pats}) for chunk, label in patch.diffui(repo, p1.node(), old.node(), match=restmatcher, opts=diffopts): @@ -197,21 +197,21 @@ user_patch = afp.getvalue() if not user_patch: - raise error.Abort(_("empty patch file, amend aborted")) + raise error.Abort(_(b"empty patch file, amend aborted")) if user_patch == previous_patch: - raise error.Abort(_("patch unchanged")) + raise error.Abort(_(b"patch unchanged")) afp.seek(0) # write the patch to repo and get the newnode try: newnode = _writepatch(ui, repo, old, afp) except patch.PatchError as err: - ui.write_err(_("failed to apply edited patch: %s\n") % err) + ui.write_err(_(b"failed to apply edited patch: %s\n") % err) defaultchoice = 0 # yes if not ui.interactive: defaultchoice = 1 # no - retrychoice = _('try to fix the patch (yn)?$$ &Yes $$ &No') + retrychoice = _(b'try to fix the patch (yn)?$$ &Yes $$ &No') if ui.promptchoice(retrychoice, default=defaultchoice): - raise error.Abort(_("Could not apply amended path")) + raise error.Abort(_(b"Could not apply amended path")) else: # consider a third choice where we restore the original patch fp = stringio() @@ -233,20 +233,20 @@ with patchcontext as metadata: # store the metadata from the patch to variables - parents = (metadata.get('p1'), metadata.get('p2')) - date = metadata.get('date') or old.date() - branch = metadata.get('branch') or old.branch() - user = metadata.get('user') or old.user() + parents = (metadata.get(b'p1'), metadata.get(b'p2')) + date = metadata.get(b'date') or old.date() + branch = metadata.get(b'branch') or old.branch() + user = metadata.get(b'user') or old.user() # XXX: we must extract extras from the patchfile too extra = old.extra() - message = metadata.get('message') or old.description() + message = metadata.get(b'message') or old.description() store = patch.filestore() fp.seek(0) try: files = set() # beware: next line may raise a PatchError to be handled by the caller # of this function - patch.patchrepo(ui, repo, pold, store, fp, 1, '', + patch.patchrepo(ui, repo, pold, store, fp, 1, b'', files=files, eolmode=None) memctx = context.memctx(repo, parents, message, files=files, @@ -269,23 +269,23 @@ else: prev = node.nullid - fp.write("# HG changeset patch\n") - fp.write("# User %s\n" % ctx.user()) - fp.write("# Date %d %d\n" % ctx.date()) - fp.write("# %s\n" % datestr(ctx.date())) - if branch and branch != 'default': - fp.write("# Branch %s\n" % branch) - fp.write("# Node ID %s\n" % node.hex(nodeval)) - fp.write("# Parent %s\n" % node.hex(prev)) + fp.write(b"# HG changeset patch\n") + fp.write(b"# User %s\n" % ctx.user()) + fp.write(b"# Date %d %d\n" % ctx.date()) + fp.write(b"# %s\n" % datestr(ctx.date())) + if branch and branch != b'default': + fp.write(b"# Branch %s\n" % branch) + fp.write(b"# Node ID %s\n" % node.hex(nodeval)) + fp.write(b"# Parent %s\n" % node.hex(prev)) if len(parents) > 1: - fp.write("# Parent %s\n" % node.hex(parents[1])) + fp.write(b"# Parent %s\n" % node.hex(parents[1])) for headerid in cmdutil.extraexport: header = cmdutil.extraexportmap[headerid](1, ctx) if header is not None: - fp.write('# %s\n' % header) + fp.write(b'# %s\n' % header) fp.write(ctx.description().rstrip()) - fp.write("\n\n") + fp.write(b"\n\n") def _touchedbetween(repo, source, dest, match=None): touched = set() @@ -354,7 +354,7 @@ oldctx to a copy of oldctx not containing changed files matched by match. """ - ctx = repo['.'] + ctx = repo[b'.'] ds = repo.dirstate copies = dict(ds.copies()) if interactive: @@ -373,7 +373,7 @@ # Also any modifications to a removed file will result the status as # added, so we have only two cases. So in either of the cases, the # resulting status can be modified or clean. - if ds[f] == 'r': + if ds[f] == b'r': # But the file is removed in the working directory, leaving that # as removed continue @@ -387,7 +387,7 @@ # does not adds it back. If it's adds it back, we do a normallookup. # The file can't be removed in working directory, because it was # removed in oldctx - if ds[f] == 'a': + if ds[f] == b'a': ds.normallookup(f) continue ds.remove(f) @@ -399,7 +399,7 @@ # would have resulted in modified status, not removed. # So a file added in a commit, and uncommitting that addition must # result in file being stated as unknown. - if ds[f] == 'r': + if ds[f] == b'r': # The working directory say it's removed, so lets make the file # unknown ds.drop(f) @@ -408,23 +408,23 @@ else: m, a, r = repo.status(oldctx.p1(), oldctx, match=match)[:3] for f in m: - if ds[f] == 'r': + if ds[f] == b'r': # modified + removed -> removed continue ds.normallookup(f) for f in a: - if ds[f] == 'r': + if ds[f] == b'r': # added + removed -> unknown ds.drop(f) - elif ds[f] != 'a': + elif ds[f] != b'a': ds.add(f) for f in r: - if ds[f] == 'a': + if ds[f] == b'a': # removed + added -> normal ds.normallookup(f) - elif ds[f] != 'r': + elif ds[f] != b'r': ds.remove(f) # Merge old parent and old working dir copies @@ -442,7 +442,7 @@ for dst, src in oldcopies.items()) # Adjust the dirstate copies for dst, src in copies.items(): - if (src not in ctx or dst in ctx or ds[dst] != 'a'): + if (src not in ctx or dst in ctx or ds[dst] != b'a'): src = None ds.copy(src, dst) @@ -487,13 +487,13 @@ lock = repo.lock() wctx = repo[None] if len(wctx.parents()) <= 0: - raise error.Abort(_("cannot uncommit null changeset")) + raise error.Abort(_(b"cannot uncommit null changeset")) if len(wctx.parents()) > 1: - raise error.Abort(_("cannot uncommit while merging")) - old = repo['.'] - rewriteutil.precheck(repo, [repo['.'].rev()], action='uncommit') + raise error.Abort(_(b"cannot uncommit while merging")) + old = repo[b'.'] + rewriteutil.precheck(repo, [repo[b'.'].rev()], action=b'uncommit') if len(old.parents()) > 1: - raise error.Abort(_("cannot uncommit merge changeset")) + raise error.Abort(_(b"cannot uncommit merge changeset")) oldphase = old.phase() rev = None @@ -501,13 +501,13 @@ rev = scmutil.revsingle(repo, opts.get('rev')) ctx = repo[None] if ctx.p1() == rev or ctx.p2() == rev: - raise error.Abort(_("cannot uncommit to parent changeset")) + raise error.Abort(_(b"cannot uncommit to parent changeset")) onahead = old.rev() in repo.changelog.headrevs() disallowunstable = not obsolete.isenabled(repo, obsolete.allowunstableopt) if disallowunstable and not onahead: - raise error.Abort(_("cannot uncommit in the middle of a stack")) + raise error.Abort(_(b"cannot uncommit in the middle of a stack")) match = scmutil.match(old, pats, pycompat.byteskwargs(opts)) @@ -542,7 +542,7 @@ % uipathfn(f), hint=hint) # Recommit the filtered changeset - tr = repo.transaction('uncommit') + tr = repo.transaction(b'uncommit') if interactive: opts['all'] = True newid = _interactiveuncommit(ui, repo, old, match) @@ -557,16 +557,16 @@ message=message, user=opts.get('user'), date=opts.get('date')) if newid is None: - raise error.Abort(_('nothing to uncommit'), - hint=_("use --all to uncommit all files")) + raise error.Abort(_(b'nothing to uncommit'), + hint=_(b"use --all to uncommit all files")) # metadata to be stored in obsmarker metadata = {} if opts.get('note'): - metadata['note'] = opts['note'] + metadata[b'note'] = opts['note'] replacements = {old.node(): [newid]} - scmutil.cleanupnodes(repo, replacements, operation="uncommit", + scmutil.cleanupnodes(repo, replacements, operation=b"uncommit", metadata=metadata) phases.retractboundary(repo, tr, oldphase, [newid]) if opts.get('revert'): @@ -576,8 +576,8 @@ repo.dirstate.setparents(newid, node.nullid) _uncommitdirstate(repo, old, match, interactive) if not repo[newid].files(): - ui.warn(_("new changeset is empty\n")) - ui.status(_("(use 'hg prune .' to remove it)\n")) + ui.warn(_(b"new changeset is empty\n")) + ui.status(_(b"(use 'hg prune .' to remove it)\n")) tr.close() finally: lockmod.release(tr, lock, wlock) @@ -604,7 +604,7 @@ fp.seek(0) newnode = _patchtocommit(ui, repo, old, fp) # creating obs marker temp -> () - obsolete.createmarkers(repo, [(repo[tempnode], ())], operation="uncommit") + obsolete.createmarkers(repo, [(repo[tempnode], ())], operation=b"uncommit") return newnode def _createtempcommit(ui, repo, old, match): @@ -626,20 +626,20 @@ # to add uncommit as an operation taking care of BC. try: chunks, opts = cmdutil.recordfilter(repo.ui, originalchunks, match, - operation='discard') + operation=b'discard') except TypeError: # hg <= 4.9 (db72f9f6580e) chunks, opts = cmdutil.recordfilter(repo.ui, originalchunks, - operation='discard') + operation=b'discard') if not chunks: - raise error.Abort(_("nothing selected to uncommit")) + raise error.Abort(_(b"nothing selected to uncommit")) fp = stringio() for c in chunks: c.write(fp) fp.seek(0) oldnode = node.hex(old.node())[:12] - message = 'temporary commit for uncommiting %s' % oldnode + message = b'temporary commit for uncommiting %s' % oldnode tempnode = _patchtocommit(ui, repo, old, fp, message, oldnode) return tempnode @@ -656,14 +656,14 @@ user = old.user() extra = old.extra() if extras: - extra['uncommit_source'] = extras + extra[b'uncommit_source'] = extras if not message: message = old.description() store = patch.filestore() try: files = set() try: - patch.patchrepo(ui, repo, pold, store, fp, 1, '', + patch.patchrepo(ui, repo, pold, store, fp, 1, b'', files=files, eolmode=None) except patch.PatchError as err: raise error.Abort(pycompat.bytestr(err)) @@ -733,33 +733,33 @@ revs = list(revs) revs.extend(opts['rev']) if not revs: - raise error.Abort(_('no revisions specified')) + raise error.Abort(_(b'no revisions specified')) revs = scmutil.revrange(repo, revs) if opts['from'] and opts['exact']: - raise error.Abort(_('cannot use both --from and --exact')) + raise error.Abort(_(b'cannot use both --from and --exact')) elif opts['from']: # Try to extend given revision starting from the working directory - extrevs = repo.revs('(%ld::.) or (.::%ld)', revs, revs) + extrevs = repo.revs(b'(%ld::.) or (.::%ld)', revs, revs) discardedrevs = [r for r in revs if r not in extrevs] if discardedrevs: - msg = _("cannot fold non-linear revisions") - hint = _("given revisions are unrelated to parent of working" - " directory") + msg = _(b"cannot fold non-linear revisions") + hint = _(b"given revisions are unrelated to parent of working" + b" directory") raise error.Abort(msg, hint=hint) revs = extrevs elif opts['exact']: # Nothing to do; "revs" is already set correctly pass else: - raise error.Abort(_('must specify either --from or --exact')) + raise error.Abort(_(b'must specify either --from or --exact')) if not revs: - raise error.Abort(_('specified revisions evaluate to an empty set'), - hint=_('use different revision arguments')) + raise error.Abort(_(b'specified revisions evaluate to an empty set'), + hint=_(b'use different revision arguments')) elif len(revs) == 1: - ui.write_err(_('single revision specified, nothing to fold\n')) + ui.write_err(_(b'single revision specified, nothing to fold\n')) return 1 # Sort so combined commit message of `hg fold --exact -r . -r .^` is @@ -773,7 +773,7 @@ root, head, p2 = rewriteutil.foldcheck(repo, revs) - tr = repo.transaction('fold') + tr = repo.transaction(b'fold') try: commitopts = opts.copy() allctx = [repo[r] for r in revs] @@ -782,15 +782,15 @@ if commitopts.get('message') or commitopts.get('logfile'): commitopts['edit'] = False else: - msgs = ["HG: This is a fold of %d changesets." % len(allctx)] - msgs += ["HG: Commit message of changeset %d.\n\n%s\n" % + msgs = [b"HG: This is a fold of %d changesets." % len(allctx)] + msgs += [b"HG: Commit message of changeset %d.\n\n%s\n" % (c.rev(), c.description()) for c in allctx] - commitopts['message'] = "\n".join(msgs) + commitopts['message'] = b"\n".join(msgs) commitopts['edit'] = True metadata = {} if opts.get('note'): - metadata['note'] = opts['note'] + metadata[b'note'] = opts['note'] updates = allctx[:] if p2 is not None and root.p2() != p2: @@ -803,13 +803,13 @@ commitopts=commitopts) phases.retractboundary(repo, tr, targetphase, [newid]) replacements = {ctx.node(): [newid] for ctx in allctx} - scmutil.cleanupnodes(repo, replacements, operation="fold", + scmutil.cleanupnodes(repo, replacements, operation=b"fold", metadata=metadata) tr.close() finally: tr.release() - ui.status('%i changesets folded\n' % len(revs)) - if repo['.'].rev() in revs: + ui.status(b'%i changesets folded\n' % len(revs)) + if repo[b'.'].rev() in revs: hg.update(repo, newid) finally: lockmod.release(lock, wlock) @@ -856,8 +856,8 @@ revs.extend(opts['rev']) if not revs: if opts['fold']: - raise error.Abort(_('revisions must be specified with --fold')) - revs = ['.'] + raise error.Abort(_(b'revisions must be specified with --fold')) + revs = [b'.'] with repo.wlock(), repo.lock(): revs = scmutil.revrange(repo, revs) @@ -870,21 +870,21 @@ # we need to rewrite a first, then directly rewrite b on top of the # new a, then rewrite c on top of the new b. So we need to handle # revisions in topological order. - raise error.Abort(_('editing multiple revisions without --fold is ' - 'not currently supported')) + raise error.Abort(_(b'editing multiple revisions without --fold is ' + b'not currently supported')) if opts['fold']: root, head, p2 = rewriteutil.foldcheck(repo, revs) else: - if repo.revs("%ld and public()", revs): - raise error.Abort(_('cannot edit commit information for public ' - 'revisions')) + if repo.revs(b"%ld and public()", revs): + raise error.Abort(_(b'cannot edit commit information for public ' + b'revisions')) newunstable = rewriteutil.disallowednewunstable(repo, revs) if newunstable: - msg = _('cannot edit commit information in the middle' - ' of a stack') - hint = _('%s will become unstable and new unstable changes' - ' are not allowed') + msg = _(b'cannot edit commit information in the middle' + b' of a stack') + hint = _(b'%s will become unstable and new unstable changes' + b' are not allowed') hint %= repo[newunstable.first()] raise error.Abort(msg, hint=hint) root = head = repo[revs.first()] @@ -892,7 +892,7 @@ wctx = repo[None] p1 = wctx.p1() - tr = repo.transaction('metaedit') + tr = repo.transaction(b'metaedit') newp1 = None try: commitopts = opts.copy() @@ -903,12 +903,12 @@ commitopts['edit'] = False else: if opts['fold']: - msgs = ["HG: This is a fold of %d changesets." % len(allctx)] - msgs += ["HG: Commit message of changeset %d.\n\n%s\n" % + msgs = [b"HG: This is a fold of %d changesets." % len(allctx)] + msgs += [b"HG: Commit message of changeset %d.\n\n%s\n" % (c.rev(), c.description()) for c in allctx] else: msgs = [head.description()] - commitopts['message'] = "\n".join(msgs) + commitopts['message'] = b"\n".join(msgs) commitopts['edit'] = True updates = allctx[:] @@ -928,20 +928,20 @@ # metadata to be stored on obsmarker metadata = {} if opts.get('note'): - metadata['note'] = opts['note'] + metadata[b'note'] = opts['note'] phases.retractboundary(repo, tr, targetphase, [newid]) obsolete.createmarkers(repo, [(ctx, (repo[newid],)) for ctx in allctx], - metadata=metadata, operation="metaedit") + metadata=metadata, operation=b"metaedit") else: - ui.status(_("nothing changed\n")) + ui.status(_(b"nothing changed\n")) tr.close() finally: tr.release() if opts['fold']: - ui.status('%i changesets folded\n' % len(revs)) + ui.status(b'%i changesets folded\n' % len(revs)) if newp1 is not None: hg.update(repo, newp1) @@ -957,9 +957,9 @@ date = opts.get('date') user = opts.get('user') if date: - metadata['date'] = '%i %i' % compat.parsedate(date) + metadata[b'date'] = b'%i %i' % compat.parsedate(date) if user: - metadata['user'] = user + metadata[b'user'] = user return metadata @eh.command( @@ -1021,10 +1021,10 @@ fold = opts.get('fold') split = opts.get('split') - options = [o for o in ('pair', 'fold', 'split') if opts.get(o)] + options = [o for o in (r'pair', r'fold', r'split') if opts.get(o)] if 1 < len(options): - _opts = pycompat.sysbytes(', '.join(options)) - raise error.Abort(_("can only specify one of %s") % _opts) + _opts = pycompat.sysbytes(r', '.join(options)) + raise error.Abort(_(b"can only specify one of %s") % _opts) if bookmarks: reachablefrombookmark = rewriteutil.reachablefrombookmark @@ -1034,14 +1034,14 @@ rewriteutil.deletebookmark(repo, repomarks, bookmarks) if not revs: - raise error.Abort(_('nothing to prune')) + raise error.Abort(_(b'no revisions specified to prune')) wlock = lock = tr = None try: wlock = repo.wlock() lock = repo.lock() - rewriteutil.precheck(repo, revs, 'prune') - tr = repo.transaction('prune') + rewriteutil.precheck(repo, revs, b'prune') + tr = repo.transaction(b'prune') # defines pruned changesets precs = [] revs.sort() @@ -1049,33 +1049,33 @@ cp = repo[p] precs.append(cp) if not precs: - raise error.Abort('nothing to prune') + raise error.Abort(b'nothing to prune') # defines successors changesets sucs = scmutil.revrange(repo, succs) sucs.sort() sucs = tuple(repo[n] for n in sucs) if not biject and len(sucs) > 1 and len(precs) > 1: - msg = "Can't use multiple successors for multiple precursors" - hint = _("use --pair to mark a series as a replacement" - " for another") + msg = b"Can't use multiple successors for multiple precursors" + hint = _(b"use --pair to mark a series as a replacement" + b" for another") raise error.Abort(msg, hint=hint) elif biject and len(sucs) != len(precs): - msg = "Can't use %d successors for %d precursors" \ + msg = b"Can't use %d successors for %d precursors"\ % (len(sucs), len(precs)) raise error.Abort(msg) elif (len(precs) == 1 and len(sucs) > 1) and not split: - msg = "please add --split if you want to do a split" + msg = b"please add --split if you want to do a split" raise error.Abort(msg) elif len(sucs) == 1 and len(precs) > 1 and not fold: - msg = "please add --fold if you want to do a fold" + msg = b"please add --fold if you want to do a fold" raise error.Abort(msg) elif biject: replacements = {p.node(): [s.node()] for p, s in zip(precs, sucs)} else: replacements = {p.node(): [s.node() for s in sucs] for p in precs} - wdp = repo['.'] + wdp = repo[b'.'] if wdp in precs: if len(sucs) == 1 and len(precs) == 1: @@ -1103,7 +1103,7 @@ # only reset the dirstate for files that would actually change # between the working context and uctx - descendantrevs = repo.revs("%d::." % newnode.rev()) + descendantrevs = repo.revs(b"%d::." % newnode.rev()) changedfiles = [] for rev in descendantrevs: # blindly reset the files, regardless of what actually @@ -1112,7 +1112,7 @@ # reset files that only changed in the dirstate too dirstate = repo.dirstate - dirchanges = [f for f in dirstate if dirstate[f] != 'n'] + dirchanges = [f for f in dirstate if dirstate[f] != b'n'] changedfiles.extend(dirchanges) repo.dirstate.rebuild(newnode.node(), newnode.manifest(), changedfiles) @@ -1128,8 +1128,8 @@ bmchanges = [(bookactive, newnode.node())] repo._bookmarks.applychanges(repo, tr, bmchanges) commands.update(ui, repo, newnode.hex()) - ui.status(_('working directory is now at %s\n') - % ui.label(bytes(newnode), 'evolve.node')) + ui.status(_(b'working directory is now at %s\n') + % ui.label(bytes(newnode), b'evolve.node')) if movebookmark: bookmarksmod.activate(repo, bookactive) @@ -1139,11 +1139,11 @@ # store note in metadata if opts.get('note'): - metadata['note'] = opts['note'] + metadata[b'note'] = opts['note'] precrevs = (precursor.rev() for precursor in precs) moves = {} - for ctx in repo.unfiltered().set('bookmark() and %ld', precrevs): + for ctx in repo.unfiltered().set(b'bookmark() and %ld', precrevs): # used to be: # # ldest = list(repo.set('max((::%d) - obsolete())', ctx)) @@ -1156,11 +1156,11 @@ if not dest.obsolete() and dest.node() not in replacements: moves[ctx.node()] = dest.node() break - scmutil.cleanupnodes(repo, replacements, operation="prune", moves=moves, + scmutil.cleanupnodes(repo, replacements, operation=b"prune", moves=moves, metadata=metadata) # informs that changeset have been pruned - ui.status(_('%i changesets pruned\n') % len(precs)) + ui.status(_(b'%i changesets pruned\n') % len(precs)) tr.close() finally: @@ -1194,13 +1194,13 @@ revs = opts.get('rev') if not revs: - revarg = '.' + revarg = b'.' elif len(revs) == 1: revarg = revs[0] else: # XXX --rev often accept multiple value, it seems safer to explicitly # complains here instead of just taking the last value. - raise error.Abort(_('more than one revset is given')) + raise error.Abort(_(b'more than one revset is given')) # Save the current branch to restore it in the end savedbranch = repo.dirstate.branch() @@ -1211,18 +1211,18 @@ ctx = scmutil.revsingle(repo, revarg) rev = ctx.rev() cmdutil.bailifchanged(repo) - rewriteutil.precheck(repo, [rev], action='split') - tr = repo.transaction('split') + rewriteutil.precheck(repo, [rev], action=b'split') + tr = repo.transaction(b'split') # make sure we respect the phase while splitting - overrides = {('phases', 'new-commit'): ctx.phase()} + overrides = {(b'phases', b'new-commit'): ctx.phase()} if len(ctx.parents()) > 1: - raise error.Abort(_("cannot split merge commits")) + raise error.Abort(_(b"cannot split merge commits")) prev = ctx.p1() bmupdate = rewriteutil.bookmarksupdater(repo, ctx.node(), tr) bookactive = repo._activebookmark if bookactive is not None: - repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark) + repo.ui.status(_(b"(leaving bookmark %s)\n") % repo._activebookmark) bookmarksmod.deactivate(repo) # Prepare the working directory @@ -1231,8 +1231,8 @@ def haschanges(matcher=None): modified, added, removed, deleted = repo.status(match=matcher)[:4] return modified or added or removed or deleted - msg = ("HG: This is the original pre-split commit message. " - "Edit it as appropriate.\n\n") + msg = (b"HG: This is the original pre-split commit message. " + b"Edit it as appropriate.\n\n") msg += ctx.description() opts['message'] = msg opts['edit'] = True @@ -1253,21 +1253,21 @@ if haschanges(matcher): if iselect: - with repo.ui.configoverride(overrides, 'split'): - cmdutil.dorecord(ui, repo, commands.commit, 'commit', + with repo.ui.configoverride(overrides, b'split'): + cmdutil.dorecord(ui, repo, commands.commit, b'commit', False, cmdutil.recordfilter, *pats, **opts) # TODO: Does no seem like the best way to do this # We should make dorecord return the newly created commit - newcommits.append(repo['.']) + newcommits.append(repo[b'.']) elif not pats: - msg = _("no files of directories specified") - hint = _("do you want --interactive") + msg = _(b"no files of directories specified") + hint = _(b"do you want --interactive") raise error.Abort(msg, hint=hint) else: - with repo.ui.configoverride(overrides, 'split'): + with repo.ui.configoverride(overrides, b'split'): commands.commit(ui, repo, *pats, **opts) - newcommits.append(repo['.']) + newcommits.append(repo[b'.']) if pats: # refresh the wctx used for the matcher matcher = scmutil.match(repo[None], pats) @@ -1277,20 +1277,20 @@ if haschanges(matcher): nextaction = None while nextaction is None: - nextaction = ui.prompt('continue splitting? [Ycdq?]', default='y') - if nextaction == 'c': - with repo.ui.configoverride(overrides, 'split'): + nextaction = ui.prompt(b'continue splitting? [Ycdq?]', default=b'y') + if nextaction == b'c': + with repo.ui.configoverride(overrides, b'split'): commands.commit(ui, repo, **opts) - newcommits.append(repo['.']) + newcommits.append(repo[b'.']) break - elif nextaction == 'q': - raise error.Abort(_('user quit')) - elif nextaction == 'd': + elif nextaction == b'q': + raise error.Abort(_(b'user quit')) + elif nextaction == b'd': # TODO: We should offer a way for the user to confirm # what is the remaining changes, either via a separate # diff action or by showing the remaining and # prompting for confirmation - ui.status(_('discarding remaining changes\n')) + ui.status(_(b'discarding remaining changes\n')) target = newcommits[0] if pats: status = repo.status(match=matcher)[:4] @@ -1303,24 +1303,24 @@ else: cmdutil.revert(ui, repo, repo[target], (target, node.nullid), all=True) - elif nextaction == '?': + elif nextaction == b'?': nextaction = None - ui.write(_("y - yes, continue selection\n")) - ui.write(_("c - commit, select all remaining changes\n")) - ui.write(_("d - discard, discard remaining changes\n")) - ui.write(_("q - quit, abort the split\n")) - ui.write(_("? - ?, display help\n")) + ui.write(_(b"y - yes, continue selection\n")) + ui.write(_(b"c - commit, select all remaining changes\n")) + ui.write(_(b"d - discard, discard remaining changes\n")) + ui.write(_(b"q - quit, abort the split\n")) + ui.write(_(b"? - ?, display help\n")) else: continue break # propagate the previous break else: - ui.status(_("no more change to split\n")) + ui.status(_(b"no more change to split\n")) if haschanges(): # XXX: Should we show a message for informing the user # that we create another commit with remaining changes? - with repo.ui.configoverride(overrides, 'split'): + with repo.ui.configoverride(overrides, b'split'): commands.commit(ui, repo, **opts) - newcommits.append(repo['.']) + newcommits.append(repo[b'.']) if newcommits: tip = repo[newcommits[-1]] bmupdate(tip.node()) @@ -1328,9 +1328,9 @@ bookmarksmod.activate(repo, bookactive) metadata = {} if opts.get('note'): - metadata['note'] = opts['note'] + metadata[b'note'] = opts['note'] obsolete.createmarkers(repo, [(repo[rev], newcommits)], - metadata=metadata, operation="split") + metadata=metadata, operation=b"split") tr.close() finally: # Restore the old branch @@ -1348,7 +1348,7 @@ b'mark the new revision as successor of the old one potentially creating ' b'divergence')], # allow to choose the seed ? - _('[-r] revs')) + _(b'[-r] revs')) def touch(ui, repo, *revs, **opts): """create successors identical to their predecessors but the changeset ID @@ -1358,18 +1358,18 @@ revs = list(revs) revs.extend(opts['rev']) if not revs: - revs = ['.'] + revs = [b'.'] revs = scmutil.revrange(repo, revs) if not revs: - ui.write_err('no revision to touch\n') + ui.write_err(b'no revision to touch\n') return 1 duplicate = opts['duplicate'] if not duplicate: - rewriteutil.precheck(repo, revs, 'touch') + rewriteutil.precheck(repo, revs, b'touch') tmpl = utility.shorttemplate - displayer = compat.changesetdisplayer(ui, repo, {'template': tmpl}) - with repo.wlock(), repo.lock(), repo.transaction('touch'): + displayer = compat.changesetdisplayer(ui, repo, {b'template': tmpl}) + with repo.wlock(), repo.lock(), repo.transaction(b'touch'): touchnodes(ui, repo, revs, displayer, **opts) def touchnodes(ui, repo, revs, displayer, **opts): @@ -1380,7 +1380,7 @@ for r in revs: ctx = repo[r] extra = ctx.extra().copy() - extra['__touch-noise__'] = random.randint(0, 0xffffffff) + extra[b'__touch-noise__'] = random.randint(0, 0xffffffff) # search for touched parent p1 = ctx.p1().node() p2 = ctx.p2().node() @@ -1411,12 +1411,12 @@ else: displayer.show(ctx) index = ui.promptchoice( - _("reviving this changeset will create divergence" - " unless you make a duplicate.\n(a)llow divergence or" - " (d)uplicate the changeset? $$ &Allowdivergence $$ " - "&Duplicate"), 0) - choice = ['allowdivergence', 'duplicate'][index] - if choice == 'allowdivergence': + _(b"reviving this changeset will create divergence" + b" unless you make a duplicate.\n(a)llow divergence or" + b" (d)uplicate the changeset? $$ &Allowdivergence $$ " + b"&Duplicate"), 0) + choice = [b'allowdivergence', b'duplicate'][index] + if choice == b'allowdivergence': duplicate = False else: duplicate = True @@ -1424,7 +1424,7 @@ updates = [] if len(ctx.parents()) > 1: updates = ctx.parents() - extradict = {'extra': extra} + extradict = {b'extra': extra} new, unusedvariable = rewriteutil.rewrite(repo, ctx, updates, ctx, [p1, p2], commitopts=extradict) @@ -1434,9 +1434,9 @@ if not duplicate: metadata = {} if opts.get('note'): - metadata['note'] = opts['note'] + metadata[b'note'] = opts['note'] obsolete.createmarkers(repo, [(ctx, (repo[new],))], - metadata=metadata, operation="touch") + metadata=metadata, operation=b"touch") tr = repo.currenttransaction() phases.retractboundary(repo, tr, ctx.phase(), [new]) if ctx in repo[None].parents(): @@ -1448,7 +1448,7 @@ [(b'r', b'rev', b'', _(b'revision to pick'), _(b'REV')), (b'c', b'continue', False, b'continue interrupted pick'), (b'a', b'abort', False, b'abort interrupted pick'), - ] + mergetoolopts, + ] + mergetoolopts, _(b'[-r] rev')) def cmdpick(ui, repo, *revs, **opts): """move a commit on the top of working directory parent and updates to it.""" @@ -1457,69 +1457,61 @@ abort = opts.get('abort') if cont and abort: - raise error.Abort(_("cannot specify both --continue and --abort")) + raise error.Abort(_(b"cannot specify both --continue and --abort")) revs = list(revs) if opts.get('rev'): revs.append(opts['rev']) with repo.wlock(), repo.lock(): - pickstate = state.cmdstate(repo, path='pickstate') - pctx = repo['.'] + pickstate = state.cmdstate(repo, path=b'pickstate') + pctx = repo[b'.'] if not cont and not abort: cmdutil.bailifchanged(repo) revs = scmutil.revrange(repo, revs) if len(revs) > 1: - raise error.Abort(_("specify just one revision")) + raise error.Abort(_(b"specify just one revision")) elif not revs: - raise error.Abort(_("empty revision set")) + raise error.Abort(_(b"empty revision set")) origctx = repo[revs.first()] if origctx in pctx.ancestors() or origctx.node() == pctx.node(): - raise error.Abort(_("cannot pick an ancestor revision")) + raise error.Abort(_(b"cannot pick an ancestor revision")) - rewriteutil.precheck(repo, [origctx.rev()], 'pick') + rewriteutil.precheck(repo, [origctx.rev()], b'pick') - ui.status(_('picking %d:%s "%s"\n') % + ui.status(_(b'picking %d:%s "%s"\n') % (origctx.rev(), origctx, - origctx.description().split("\n", 1)[0])) - overrides = {('ui', 'forcemerge'): opts.get('tool', '')} - with ui.configoverride(overrides, 'pick'): + origctx.description().split(b"\n", 1)[0])) + overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')} + with ui.configoverride(overrides, b'pick'): stats = merge.graft(repo, origctx, origctx.p1(), - ['local', 'destination']) + [b'local', b'destination']) if compat.hasconflict(stats): - pickstate.addopts({'orignode': origctx.node(), - 'oldpctx': pctx.node()}) + pickstate.addopts({b'orignode': origctx.node(), + b'oldpctx': pctx.node()}) pickstate.save() - raise error.InterventionRequired(_("unresolved merge conflicts" - " (see hg help resolve)")) + raise error.InterventionRequired(_(b"unresolved merge conflicts" + b" (see hg help resolve)")) elif abort: - if not pickstate: - raise error.Abort(_("no interrupted pick state exists")) - pickstate.load() - pctxnode = pickstate['oldpctx'] - ui.status(_("aborting pick, updating to %s\n") % - node.hex(pctxnode)[:12]) - hg.updaterepo(repo, pctxnode, True) - pickstate.delete() - return 0 + return abortpick(ui, repo, pickstate) else: if revs: - raise error.Abort(_("cannot specify both --continue and " - "revision")) + raise error.Abort(_(b"cannot specify both --continue and " + b"revision")) if not pickstate: - raise error.Abort(_("no interrupted pick state exists")) + raise error.Abort(_(b"no interrupted pick state exists")) pickstate.load() - orignode = pickstate['orignode'] + orignode = pickstate[b'orignode'] origctx = repo[orignode] - overrides = {('phases', 'new-commit'): origctx.phase()} - with repo.ui.configoverride(overrides, 'pick'): + overrides = {(b'phases', b'new-commit'): origctx.phase()} + with repo.ui.configoverride(overrides, b'pick'): newnode = repo.commit(text=origctx.description(), user=origctx.user(), date=origctx.date(), extra=origctx.extra()) @@ -1529,11 +1521,29 @@ pickstate.delete() newctx = repo[newnode] if newnode else pctx replacements = {origctx.node(): [newctx.node()]} - scmutil.cleanupnodes(repo, replacements, operation="pick") + scmutil.cleanupnodes(repo, replacements, operation=b"pick") if newnode is None: - ui.warn(_("note: picking %d:%s created no changes to commit\n") % + ui.warn(_(b"note: picking %d:%s created no changes to commit\n") % (origctx.rev(), origctx)) return 0 return 0 + +def abortpick(ui, repo, pickstate, abortcmd=False): + """logic to abort pick""" + if not pickstate and not abortcmd: + raise error.Abort(_(b"no interrupted pick state exists")) + pickstate.load() + pctxnode = pickstate[b'oldpctx'] + ui.status(_(b"aborting pick, updating to %s\n") % + node.hex(pctxnode)[:12]) + hg.updaterepo(repo, pctxnode, True) + pickstate.delete() + return 0 + +def hgabortpick(ui, repo): + """logic to abort pick using 'hg abort'""" + with repo.wlock(), repo.lock(): + pickstate = state.cmdstate(repo, path=b'pickstate') + return abortpick(ui, repo, pickstate, abortcmd=True)
--- a/hgext3rd/evolve/compat.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/compat.py Tue Sep 24 12:42:27 2019 +0200 @@ -62,24 +62,24 @@ # Evolution renaming compat TROUBLES = { - 'ORPHAN': 'orphan', - 'CONTENTDIVERGENT': 'content-divergent', - 'PHASEDIVERGENT': 'phase-divergent', + r'ORPHAN': b'orphan', + r'CONTENTDIVERGENT': b'content-divergent', + r'PHASEDIVERGENT': b'phase-divergent', } if util.safehasattr(uimod.ui, 'makeprogress'): - def progress(ui, topic, pos, item="", unit="", total=None): + def progress(ui, topic, pos, item=b"", unit=b"", total=None): progress = ui.makeprogress(topic, unit, total) if pos is not None: progress.update(pos, item=item) else: progress.complete() else: - def progress(ui, topic, pos, item="", unit="", total=None): + def progress(ui, topic, pos, item=b"", unit=b"", total=None): ui.progress(topic, pos, item, unit, total) # XXX: Better detection of property cache -if 'predecessors' not in dir(obsolete.obsstore): +if r'predecessors' not in dir(obsolete.obsstore): @property def predecessors(self): return self.precursors @@ -90,36 +90,36 @@ # XXX Would it be better at the module level? varnames = context.memfilectx.__init__.__code__.co_varnames - if "copysource" in varnames: + if r"copysource" in varnames: mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(), - islink='l' in flags, - isexec='x' in flags, + islink=b'l' in flags, + isexec=b'x' in flags, copysource=copied.get(path)) # compat with hg <- 4.9 - elif varnames[2] == "changectx": + elif varnames[2] == r"changectx": mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(), - islink='l' in flags, - isexec='x' in flags, + islink=b'l' in flags, + isexec=b'x' in flags, copied=copied.get(path)) else: mctx = context.memfilectx(repo, fctx.path(), fctx.data(), - islink='l' in flags, - isexec='x' in flags, + islink=b'l' in flags, + isexec=b'x' in flags, copied=copied.get(path)) return mctx def strdiff(a, b, fn1, fn2): """ A version of mdiff.unidiff for comparing two strings """ - args = [a, '', b, '', fn1, fn2] + args = [a, b'', b, b'', fn1, fn2] # hg < 4.6 compat 8b6dd3922f70 if util.safehasattr(inspect, 'signature'): signature = inspect.signature(mdiff.unidiff) - needsbinary = 'binary' in signature.parameters + needsbinary = r'binary' in signature.parameters else: argspec = inspect.getargspec(mdiff.unidiff) - needsbinary = 'binary' in argspec.args + needsbinary = r'binary' in argspec.args if needsbinary: args.append(False) @@ -218,7 +218,7 @@ if limit is None: # no common ancestor, no copies return {}, {}, {}, {}, {} - repo.ui.debug(" searching for copies back to rev %d\n" % limit) + repo.ui.debug(b" searching for copies back to rev %d\n" % limit) m1 = c1.manifest() m2 = c2.manifest() @@ -232,18 +232,18 @@ # - incompletediverge = record divergent partial copies here diverge = {} # divergence data is shared incompletediverge = {} - data1 = {'copy': {}, - 'fullcopy': {}, - 'incomplete': {}, - 'diverge': diverge, - 'incompletediverge': incompletediverge, - } - data2 = {'copy': {}, - 'fullcopy': {}, - 'incomplete': {}, - 'diverge': diverge, - 'incompletediverge': incompletediverge, - } + data1 = {b'copy': {}, + b'fullcopy': {}, + b'incomplete': {}, + b'diverge': diverge, + b'incompletediverge': incompletediverge, + } + data2 = {b'copy': {}, + b'fullcopy': {}, + b'incomplete': {}, + b'diverge': diverge, + b'incompletediverge': incompletediverge, + } # find interesting file sets from manifests if hg48: @@ -260,20 +260,20 @@ else: # unmatched file from base (DAG rotation in the graft case) u1r, u2r = copies._computenonoverlap(repo, c1, c2, addedinm1, addedinm2, - baselabel='base') + baselabel=b'base') # unmatched file from topological common ancestors (no DAG rotation) # need to recompute this for directory move handling when grafting mta = tca.manifest() if hg48: m1f = m1.filesnotin(mta, repo.narrowmatch()) m2f = m2.filesnotin(mta, repo.narrowmatch()) - baselabel = 'topological common ancestor' + baselabel = b'topological common ancestor' u1u, u2u = copies._computenonoverlap(repo, c1, c2, m1f, m2f, baselabel=baselabel) else: u1u, u2u = copies._computenonoverlap(repo, c1, c2, m1.filesnotin(mta), m2.filesnotin(mta), - baselabel='topological common ancestor') + baselabel=b'topological common ancestor') for f in u1u: copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, data1) @@ -281,16 +281,16 @@ for f in u2u: copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, data2) - copy = dict(data1['copy']) - copy.update(data2['copy']) - fullcopy = dict(data1['fullcopy']) - fullcopy.update(data2['fullcopy']) + copy = dict(data1[b'copy']) + copy.update(data2[b'copy']) + fullcopy = dict(data1[b'fullcopy']) + fullcopy.update(data2[b'fullcopy']) if dirtyc1: - copies._combinecopies(data2['incomplete'], data1['incomplete'], copy, diverge, + copies._combinecopies(data2[b'incomplete'], data1[b'incomplete'], copy, diverge, incompletediverge) else: - copies._combinecopies(data1['incomplete'], data2['incomplete'], copy, diverge, + copies._combinecopies(data1[b'incomplete'], data2[b'incomplete'], copy, diverge, incompletediverge) renamedelete = {} @@ -308,23 +308,23 @@ divergeset.update(fl) # reverse map for below if bothnew: - repo.ui.debug(" unmatched files new in both:\n %s\n" - % "\n ".join(bothnew)) + repo.ui.debug(b" unmatched files new in both:\n %s\n" + % b"\n ".join(bothnew)) bothdiverge = {} bothincompletediverge = {} remainder = {} - both1 = {'copy': {}, - 'fullcopy': {}, - 'incomplete': {}, - 'diverge': bothdiverge, - 'incompletediverge': bothincompletediverge - } - both2 = {'copy': {}, - 'fullcopy': {}, - 'incomplete': {}, - 'diverge': bothdiverge, - 'incompletediverge': bothincompletediverge - } + both1 = {b'copy': {}, + b'fullcopy': {}, + b'incomplete': {}, + b'diverge': bothdiverge, + b'incompletediverge': bothincompletediverge + } + both2 = {b'copy': {}, + b'fullcopy': {}, + b'incomplete': {}, + b'diverge': bothdiverge, + b'incompletediverge': bothincompletediverge + } for f in bothnew: copies._checkcopies(c1, c2, f, base, tca, dirtyc1, limit, both1) copies._checkcopies(c2, c1, f, base, tca, dirtyc2, limit, both2) @@ -333,17 +333,17 @@ pass elif dirtyc1: # incomplete copies may only be found on the "dirty" side for bothnew - assert not both2['incomplete'] - remainder = copies._combinecopies({}, both1['incomplete'], copy, bothdiverge, + assert not both2[b'incomplete'] + remainder = copies._combinecopies({}, both1[b'incomplete'], copy, bothdiverge, bothincompletediverge) elif dirtyc2: - assert not both1['incomplete'] - remainder = copies._combinecopies({}, both2['incomplete'], copy, bothdiverge, + assert not both1[b'incomplete'] + remainder = copies._combinecopies({}, both2[b'incomplete'], copy, bothdiverge, bothincompletediverge) else: # incomplete copies and divergences can't happen outside grafts - assert not both1['incomplete'] - assert not both2['incomplete'] + assert not both1[b'incomplete'] + assert not both2[b'incomplete'] assert not bothincompletediverge for f in remainder: assert f not in bothdiverge @@ -356,30 +356,30 @@ copy[fl[0]] = of # not actually divergent, just matching renames if fullcopy and repo.ui.debugflag: - repo.ui.debug(" all copies found (* = to merge, ! = divergent, " - "% = renamed and deleted):\n") + repo.ui.debug(b" all copies found (* = to merge, ! = divergent, " + b"% = renamed and deleted):\n") for f in sorted(fullcopy): - note = "" + note = b"" if f in copy: - note += "*" + note += b"*" if f in divergeset: - note += "!" + note += b"!" if f in renamedeleteset: - note += "%" - repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, - note)) + note += b"%" + repo.ui.debug(b" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f, + note)) del divergeset if not fullcopy: return copy, {}, diverge, renamedelete, {} - repo.ui.debug(" checking for directory renames\n") + repo.ui.debug(b" checking for directory renames\n") # generate a directory move map d1, d2 = c1.dirs(), c2.dirs() # Hack for adding '', which is not otherwise added, to d1 and d2 - d1.addpath('/') - d2.addpath('/') + d1.addpath(b'/') + d2.addpath(b'/') invalid = set() dirmove = {} @@ -392,16 +392,16 @@ continue elif dsrc in d1 and ddst in d1: # directory wasn't entirely moved locally - invalid.add(dsrc + "/") + invalid.add(dsrc + b"/") elif dsrc in d2 and ddst in d2: # directory wasn't entirely moved remotely - invalid.add(dsrc + "/") - elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/": + invalid.add(dsrc + b"/") + elif dsrc + b"/" in dirmove and dirmove[dsrc + b"/"] != ddst + b"/": # files from the same directory moved to two different places - invalid.add(dsrc + "/") + invalid.add(dsrc + b"/") else: # looks good so far - dirmove[dsrc + "/"] = ddst + "/" + dirmove[dsrc + b"/"] = ddst + b"/" for i in invalid: if i in dirmove: @@ -412,7 +412,7 @@ return copy, {}, diverge, renamedelete, {} for d in dirmove: - repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" % + repo.ui.debug(b" discovered dir src: '%s' -> dst: '%s'\n" % (d, dirmove[d])) movewithdir = {} @@ -425,8 +425,8 @@ df = dirmove[d] + f[len(d):] if df not in copy: movewithdir[f] = df - repo.ui.debug((" pending file src: '%s' -> " - "dst: '%s'\n") % (f, df)) + repo.ui.debug((b" pending file src: '%s' -> " + b"dst: '%s'\n") % (f, df)) break return copy, movewithdir, diverge, renamedelete, dirmove @@ -494,8 +494,8 @@ return obsutil.markersusers(markers) markersmeta = [dict(m[3]) for m in markers] - users = set(encoding.tolocal(meta['user']) for meta in markersmeta - if meta.get('user')) + users = set(encoding.tolocal(meta[b'user']) for meta in markersmeta + if meta.get(b'user')) return sorted(users) @@ -511,7 +511,7 @@ return obsutil.markersoperations(markers) markersmeta = [dict(m[3]) for m in markers] - operations = set(meta.get('operation') for meta in markersmeta - if meta.get('operation')) + operations = set(meta.get(b'operation') for meta in markersmeta + if meta.get(b'operation')) return sorted(operations)
--- a/hgext3rd/evolve/dagutil.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/dagutil.py Tue Sep 24 12:42:27 2019 +0200 @@ -140,7 +140,7 @@ def _internalize(self, id): ix = self._revlog.rev(id) if ix == nullrev: - raise LookupError(id, self._revlog.indexfile, _('nullid')) + raise LookupError(id, self._revlog.indexfile, _(b'nullid')) return ix def _internalizeall(self, ids, filterunknown):
--- a/hgext3rd/evolve/debugcmd.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/debugcmd.py Tue Sep 24 12:42:27 2019 +0200 @@ -45,7 +45,7 @@ unfi = repo.unfiltered() nm = unfi.changelog.nodemap nbmarkers = len(store._all) - ui.write(_('markers total: %9i\n') % nbmarkers) + ui.write(_(b'markers total: %9i\n') % nbmarkers) sucscount = [0, 0, 0, 0] known = 0 parentsdata = 0 @@ -67,7 +67,7 @@ metakeys.setdefault(key, 0) metakeys[key] += 1 meta = dict(meta) - parents = [meta.get('p1'), meta.get('p2')] + parents = [meta.get(b'p1'), meta.get(b'p2')] parents = [node.bin(p) for p in parents if p is not None] if parents: parentsdata += 1 @@ -91,71 +91,71 @@ fc = (frozenset(c[0]), frozenset(c[1])) for n in fc[0]: pclustersmap[n] = fc - numobs = len(unfi.revs('obsolete()')) + numobs = len(unfi.revs(b'obsolete()')) numtotal = len(unfi) - ui.write((' for known precursors: %9i' % known)) - ui.write((' (%i/%i obsolete changesets)\n' % (numobs, numtotal))) - ui.write((' with parents data: %9i\n' % parentsdata)) + ui.write((b' for known precursors: %9i' % known)) + ui.write((b' (%i/%i obsolete changesets)\n' % (numobs, numtotal))) + ui.write((b' with parents data: %9i\n' % parentsdata)) # successors data - ui.write(('markers with no successors: %9i\n' % sucscount[0])) - ui.write((' 1 successors: %9i\n' % sucscount[1])) - ui.write((' 2 successors: %9i\n' % sucscount[2])) - ui.write((' more than 2 successors: %9i\n' % sucscount[3])) + ui.write((b'markers with no successors: %9i\n' % sucscount[0])) + ui.write((b' 1 successors: %9i\n' % sucscount[1])) + ui.write((b' 2 successors: %9i\n' % sucscount[2])) + ui.write((b' more than 2 successors: %9i\n' % sucscount[3])) # meta data info - ui.write((' available keys:\n')) + ui.write((b' available keys:\n')) for key in sorted(metakeys): - ui.write((' %15s: %9i\n' % (key, metakeys[key]))) + ui.write((b' %15s: %9i\n' % (key, metakeys[key]))) size_v0.sort() size_v1.sort() if size_v0: - ui.write('marker size:\n') + ui.write(b'marker size:\n') # format v1 - ui.write(' format v1:\n') - ui.write((' smallest length: %9i\n' % size_v1[0])) - ui.write((' longer length: %9i\n' % size_v1[-1])) + ui.write(b' format v1:\n') + ui.write((b' smallest length: %9i\n' % size_v1[0])) + ui.write((b' longer length: %9i\n' % size_v1[-1])) median = size_v1[nbmarkers // 2] - ui.write((' median length: %9i\n' % median)) + ui.write((b' median length: %9i\n' % median)) mean = sum(size_v1) // nbmarkers - ui.write((' mean length: %9i\n' % mean)) + ui.write((b' mean length: %9i\n' % mean)) # format v0 - ui.write(' format v0:\n') - ui.write((' smallest length: %9i\n' % size_v0[0])) - ui.write((' longer length: %9i\n' % size_v0[-1])) + ui.write(b' format v0:\n') + ui.write((b' smallest length: %9i\n' % size_v0[0])) + ui.write((b' longer length: %9i\n' % size_v0[-1])) median = size_v0[nbmarkers // 2] - ui.write((' median length: %9i\n' % median)) + ui.write((b' median length: %9i\n' % median)) mean = sum(size_v0) // nbmarkers - ui.write((' mean length: %9i\n' % mean)) + ui.write((b' mean length: %9i\n' % mean)) allclusters = list(set(clustersmap.values())) allclusters.sort(key=lambda x: len(x[1])) - ui.write(('disconnected clusters: %9i\n' % len(allclusters))) + ui.write((b'disconnected clusters: %9i\n' % len(allclusters))) - ui.write(' any known node: %9i\n' + ui.write(b' any known node: %9i\n' % len([c for c in allclusters if [n for n in c[0] if nm.get(n) is not None]])) if allclusters: nbcluster = len(allclusters) - ui.write((' smallest length: %9i\n' % len(allclusters[0][1]))) - ui.write((' longer length: %9i\n' - % len(allclusters[-1][1]))) + ui.write((b' smallest length: %9i\n' % len(allclusters[0][1]))) + ui.write((b' longer length: %9i\n' + % len(allclusters[-1][1]))) median = len(allclusters[nbcluster // 2][1]) - ui.write((' median length: %9i\n' % median)) + ui.write((b' median length: %9i\n' % median)) mean = sum(len(x[1]) for x in allclusters) // nbcluster - ui.write((' mean length: %9i\n' % mean)) + ui.write((b' mean length: %9i\n' % mean)) allpclusters = list(set(pclustersmap.values())) allpclusters.sort(key=lambda x: len(x[1])) - ui.write((' using parents data: %9i\n' % len(allpclusters))) - ui.write(' any known node: %9i\n' + ui.write((b' using parents data: %9i\n' % len(allpclusters))) + ui.write(b' any known node: %9i\n' % len([c for c in allclusters if [n for n in c[0] if nm.get(n) is not None]])) if allpclusters: nbcluster = len(allpclusters) - ui.write((' smallest length: %9i\n' - % len(allpclusters[0][1]))) - ui.write((' longer length: %9i\n' - % len(allpclusters[-1][1]))) + ui.write((b' smallest length: %9i\n' + % len(allpclusters[0][1]))) + ui.write((b' longer length: %9i\n' + % len(allpclusters[-1][1]))) median = len(allpclusters[nbcluster // 2][1]) - ui.write((' median length: %9i\n' % median)) + ui.write((b' median length: %9i\n' % median)) mean = sum(len(x[1]) for x in allpclusters) // nbcluster - ui.write((' mean length: %9i\n' % mean)) + ui.write((b' mean length: %9i\n' % mean))
--- a/hgext3rd/evolve/depthcache.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/depthcache.py Tue Sep 24 12:42:27 2019 +0200 @@ -32,7 +32,7 @@ def simpledepth(repo, rev): """simple but obviously right implementation of depth""" - return len(repo.revs('::%d', rev)) + return len(repo.revs(b'::%d', rev)) @eh.command( b'debugdepth', @@ -46,25 +46,25 @@ """ revs = scmutil.revrange(repo, opts['rev']) method = opts['method'] - if method in ('cached', 'compare'): + if method in (b'cached', b'compare'): cache = repo.depthcache cache.save(repo) for r in revs: ctx = repo[r] - if method == 'simple': + if method == b'simple': depth = simpledepth(repo, r) - elif method == 'cached': + elif method == b'cached': depth = cache.get(r) - elif method == 'compare': + elif method == b'compare': simple = simpledepth(repo, r) cached = cache.get(r) if simple != cached: - raise error.Abort('depth differ for revision %s: %d != %d' + raise error.Abort(b'depth differ for revision %s: %d != %d' % (ctx, simple, cached)) depth = simple else: - raise error.Abort('unknown method "%s"' % method) - ui.write('%s %d\n' % (ctx, depth)) + raise error.Abort(b'unknown method "%s"' % method) + ui.write(b'%s %d\n' % (ctx, depth)) @eh.reposetup def setupcache(ui, repo): @@ -79,7 +79,7 @@ @localrepo.unfilteredmethod def destroyed(self): - if 'depthcache' in vars(self): + if r'depthcache' in vars(self): self.depthcache.clear() super(depthcacherepo, self).destroyed() @@ -94,16 +94,16 @@ class depthcache(genericcaches.changelogsourcebase): - _filepath = 'evoext-depthcache-00' - _cachename = 'evo-ext-depthcache' + _filepath = b'evoext-depthcache-00' + _cachename = b'evo-ext-depthcache' def __init__(self): super(depthcache, self).__init__() - self._data = array.array('l') + self._data = array.array(r'l') def get(self, rev): if len(self._data) <= rev: - raise error.ProgrammingError('depthcache must be warmed before use') + raise error.ProgrammingError(b'depthcache must be warmed before use') return self._data[rev] def _updatefrom(self, repo, data): @@ -113,9 +113,9 @@ total = len(data) def progress(pos, rev=None): - revstr = '' if rev is None else ('rev %d' % rev) - compat.progress(repo.ui, 'updating depth cache', - pos, revstr, unit='revision', total=total) + revstr = b'' if rev is None else (b'rev %d' % rev) + compat.progress(repo.ui, b'updating depth cache', + pos, revstr, unit=b'revision', total=total) progress(0) for idx, rev in enumerate(data, 1): assert rev == len(self._data), (rev, len(self._data)) @@ -171,7 +171,7 @@ Subclasses MUST overide this method to actually affect the cache data. """ super(depthcache, self).clear() - self._data = array.array('l') + self._data = array.array(r'l') # crude version of a cache, to show the kind of information we have to store @@ -180,7 +180,7 @@ assert repo.filtername is None data = repo.cachevfs.tryread(self._filepath) - self._data = array.array('l') + self._data = array.array(r'l') if not data: self._cachekey = self.emptykey else: @@ -199,12 +199,12 @@ return try: - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True) + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True) headerdata = self._serializecachekey() cachefile.write(headerdata) cachefile.write(compat.arraytobytes(self._data)) cachefile.close() self._ondiskkey = self._cachekey except (IOError, OSError) as exc: - repo.ui.log('depthcache', 'could not write update %s\n' % exc) - repo.ui.debug('depthcache: could not write update %s\n' % exc) + repo.ui.log(b'depthcache', b'could not write update %s\n' % exc) + repo.ui.debug(b'depthcache: could not write update %s\n' % exc)
--- a/hgext3rd/evolve/evolvecmd.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/evolvecmd.py Tue Sep 24 12:42:27 2019 +0200 @@ -48,12 +48,12 @@ shorttemplate = utility.shorttemplate stacktemplate = utility.stacktemplate _bookmarksupdater = rewriteutil.bookmarksupdater -sha1re = re.compile(r'\b[0-9a-f]{6,40}\b') +sha1re = re.compile(br'\b[0-9a-f]{6,40}\b') eh = exthelper.exthelper() mergetoolopts = commands.mergetoolopts -abortmessage = _("see `hg help evolve.interrupted`\n") +abortmessage = _(b"see `hg help evolve.interrupted`\n") def _solveone(ui, repo, ctx, evolvestate, dryrun, confirm, progresscb, category, lastsolved=None, stacktmplt=False): @@ -70,23 +70,23 @@ displayer = None if stacktmplt: displayer = compat.changesetdisplayer(ui, repo, - {'template': stacktemplate}) + {b'template': stacktemplate}) else: displayer = compat.changesetdisplayer(ui, repo, - {'template': shorttemplate}) - if 'orphan' == category: + {b'template': shorttemplate}) + if b'orphan' == category: result = _solveunstable(ui, repo, ctx, evolvestate, displayer, dryrun, confirm, progresscb, lastsolved=lastsolved) - elif 'phasedivergent' == category: + elif b'phasedivergent' == category: result = _solvephasedivergence(ui, repo, ctx, evolvestate, displayer, dryrun, confirm, progresscb) - elif 'contentdivergent' == category: + elif b'contentdivergent' == category: result = _solvedivergent(ui, repo, ctx, evolvestate, displayer, dryrun, confirm, progresscb) else: - assert False, "unknown trouble category: %s" % (category) + assert False, b"unknown trouble category: %s" % (category) return result def _solveunstable(ui, repo, orig, evolvestate, displayer, dryrun=False, @@ -112,7 +112,7 @@ else: # store that we are resolving an orphan merge with both parents # obsolete and proceed with first parent - evolvestate['orphanmerge'] = True + evolvestate[b'orphanmerge'] = True # we should process the second parent first, so that in case of # no-conflicts the first parent is processed later and preserved as # first parent @@ -120,40 +120,40 @@ keepbranch = orig.p2().branch() != orig.branch() if not pctx.obsolete(): - ui.warn(_("cannot solve instability of %s, skipping\n") % orig) - return (False, ".") + ui.warn(_(b"cannot solve instability of %s, skipping\n") % orig) + return (False, b".") obs = pctx newer = obsutil.successorssets(repo, obs.node()) # search of a parent which is not killed, but also isn't the orig while not newer or newer == [()] or newer[0][0] == orig.node(): - ui.debug("stabilize target %s is plain dead," - " trying to stabilize on its parent\n" % + ui.debug(b"stabilize target %s is plain dead," + b" trying to stabilize on its parent\n" % obs) obs = obs.parents()[0] newer = obsutil.successorssets(repo, obs.node()) if len(newer) > 1: - msg = _("skipping %s: divergent rewriting. can't choose " - "destination\n") % obs + msg = _(b"skipping %s: divergent rewriting. can't choose " + b"destination\n") % obs ui.write_err(msg) - return (False, ".") + return (False, b".") targets = newer[0] assert targets if len(targets) > 1: # split target, figure out which one to pick, are they all in line? targetrevs = [repo[r].rev() for r in targets] - roots = repo.revs('roots(%ld)', targetrevs) - heads = repo.revs('heads(%ld)', targetrevs) + roots = repo.revs(b'roots(%ld)', targetrevs) + heads = repo.revs(b'heads(%ld)', targetrevs) if len(roots) > 1 or len(heads) > 1: - cheader = _("ancestor '%s' split over multiple topological" - " branches.\nchoose an evolve destination:") % orig + cheader = _(b"ancestor '%s' split over multiple topological" + b" branches.\nchoose an evolve destination:") % orig selectedrev = utility.revselectionprompt(ui, repo, list(heads), cheader) if selectedrev is None: - msg = _("could not solve instability, " - "ambiguous destination: " - "parent split across two branches\n") + msg = _(b"could not solve instability, " + b"ambiguous destination: " + b"parent split across two branches\n") ui.write_err(msg) - return (False, ".") + return (False, b".") target = repo[selectedrev] else: target = repo[heads.first()] @@ -161,29 +161,29 @@ target = targets[0] target = repo[target] if not ui.quiet or confirm: - repo.ui.write(_('move:'), label='evolve.operation') + repo.ui.write(_(b'move:'), label=b'evolve.operation') displayer.show(orig) if lastsolved is None or target != repo[lastsolved]: - repo.ui.write(_('atop:')) + repo.ui.write(_(b'atop:')) displayer.show(target) - if confirm and ui.prompt('perform evolve? [Ny]', 'n') != 'y': - raise error.Abort(_('evolve aborted by user')) - todo = 'hg rebase -r %s -d %s\n' % (orig, target) + if confirm and ui.prompt(b'perform evolve? [Ny]', b'n') != b'y': + raise error.Abort(_(b'evolve aborted by user')) + todo = b'hg rebase -r %s -d %s\n' % (orig, target) if dryrun: if progresscb: progresscb() repo.ui.write(todo) - return (False, ".") + return (False, b".") else: repo.ui.note(todo) if progresscb: progresscb() try: newid = relocate(repo, orig, target, evolvestate, pctx, - keepbranch, 'orphan') + keepbranch, b'orphan') return (True, newid) except error.InterventionRequired: - ops = {'current': orig.node()} + ops = {b'current': orig.node()} evolvestate.addopts(ops) evolvestate.save() raise @@ -202,32 +202,32 @@ bumped = repo[bumped.rev()] # For now we deny bumped merge if len(bumped.parents()) > 1: - msg = _('skipping %s : we do not handle merge yet\n') % bumped + msg = _(b'skipping %s : we do not handle merge yet\n') % bumped ui.write_err(msg) - return (False, ".") - prec = next(repo.set('last(allpredecessors(%d) and public())', bumped.rev())) + return (False, b".") + prec = next(repo.set(b'last(allpredecessors(%d) and public())', bumped.rev())) # For now we deny target merge if len(prec.parents()) > 1: - msg = _('skipping: %s: public version is a merge, ' - 'this is not handled yet\n') % prec + msg = _(b'skipping: %s: public version is a merge, ' + b'this is not handled yet\n') % prec ui.write_err(msg) - return (False, ".") + return (False, b".") if not ui.quiet or confirm: - repo.ui.write(_('recreate:'), label='evolve.operation') + repo.ui.write(_(b'recreate:'), label=b'evolve.operation') displayer.show(bumped) - repo.ui.write(_('atop:')) + repo.ui.write(_(b'atop:')) displayer.show(prec) - if confirm and ui.prompt(_('perform evolve? [Ny]'), 'n') != 'y': - raise error.Abort(_('evolve aborted by user')) + if confirm and ui.prompt(_(b'perform evolve? [Ny]'), b'n') != b'y': + raise error.Abort(_(b'evolve aborted by user')) if dryrun: - todo = 'hg rebase --rev %s --dest %s;\n' % (bumped, prec.p1()) + todo = b'hg rebase --rev %s --dest %s;\n' % (bumped, prec.p1()) repo.ui.write(todo) - repo.ui.write(('hg update %s;\n' % prec)) - repo.ui.write(('hg revert --all --rev %s;\n' % bumped)) - repo.ui.write(('hg commit --msg "%s update to %s"\n' % + repo.ui.write((b'hg update %s;\n' % prec)) + repo.ui.write((b'hg revert --all --rev %s;\n' % bumped)) + repo.ui.write((b'hg commit --msg "%s update to %s"\n' % (TROUBLES['PHASEDIVERGENT'], bumped))) - return (False, ".") + return (False, b".") if progresscb: progresscb() @@ -237,22 +237,22 @@ # evolved or any other operation which can change parent. In such cases, # when parents are not same, we first rebase the divergent changeset onto # parent or precursor and then perform later steps - if not list(repo.set('parents(%d) and parents(%d)', bumped.rev(), prec.rev())): + if not list(repo.set(b'parents(%d) and parents(%d)', bumped.rev(), prec.rev())): # Need to rebase the changeset at the right place repo.ui.status( - _('rebasing to destination parent: %s\n') % prec.p1()) + _(b'rebasing to destination parent: %s\n') % prec.p1()) try: newnode = relocate(repo, bumped, prec.p1(), evolvestate, - category='phasedivergent') + category=b'phasedivergent') if newnode is not None: new = repo[newnode] obsolete.createmarkers(repo, [(bumped, (new,))], - operation='evolve') + operation=b'evolve') bumped = new - evolvestate['temprevs'].append(newnode) + evolvestate[b'temprevs'].append(newnode) except error.InterventionRequired: - evolvestate['current'] = bumped.hex() - evolvestate['precursor'] = prec.hex() + evolvestate[b'current'] = bumped.hex() + evolvestate[b'precursor'] = prec.hex() evolvestate.save() raise @@ -284,7 +284,7 @@ merge.update(repo, bumped.node(), ancestor=prec, mergeancestor=True, branchmerge=True, force=False, wc=wctx) if not wctx.isempty(): - text = '%s update to %s:\n\n' % (TROUBLES['PHASEDIVERGENT'], prec) + text = b'%s update to %s:\n\n' % (TROUBLES['PHASEDIVERGENT'], prec) text += bumped.description() memctx = wctx.tomemctx(text, parents=(prec.node(), nodemod.nullid), @@ -294,14 +294,14 @@ newid = repo.commitctx(memctx) replacementnode = newid if newid is None: - repo.ui.status(_('no changes to commit\n')) - obsolete.createmarkers(repo, [(bumped, ())], operation='evolve') + repo.ui.status(_(b'no changes to commit\n')) + obsolete.createmarkers(repo, [(bumped, ())], operation=b'evolve') newid = prec.node() else: - repo.ui.status(_('committed as %s\n') % nodemod.short(newid)) + repo.ui.status(_(b'committed as %s\n') % nodemod.short(newid)) phases.retractboundary(repo, tr, bumped.phase(), [newid]) obsolete.createmarkers(repo, [(bumped, (repo[newid],))], - flag=obsolete.bumpedfix, operation='evolve') + flag=obsolete.bumpedfix, operation=b'evolve') bmupdate(newid) # reroute the working copy parent to the new changeset with repo.dirstate.parentchange(): @@ -320,45 +320,45 @@ """ repo = repo.unfiltered() divergent = repo[divergent.rev()] - evolvestate['divergent'] = divergent.node() - evolvestate['orig-divergent'] = divergent.node() + evolvestate[b'divergent'] = divergent.node() + evolvestate[b'orig-divergent'] = divergent.node() # sometimes we will relocate a node in case of different parents and we can # encounter conflicts after relocation is done while solving # content-divergence and if the user calls `hg evolve --stop`, we need to # strip that relocated commit. However if `--all` is passed, we need to # reset this value for each content-divergence resolution which we are doing # below. - evolvestate['relocated'] = None - evolvestate['relocating'] = False + evolvestate[b'relocated'] = None + evolvestate[b'relocating'] = False # in case or relocation we get a new other node, we need to store the old # other for purposes like `--abort` or `--stop` - evolvestate['old-other'] = None + evolvestate[b'old-other'] = None base, others = divergentdata(divergent) # we don't handle split in content-divergence yet if len(others) > 1: - othersstr = "[%s]" % (','.join([bytes(i) for i in others])) - msg = _("skipping %s: %s with a changeset that got split" - " into multiple ones:\n" - "|[%s]\n" - "| This is not handled by automatic evolution yet\n" - "| You have to fallback to manual handling with commands " - "such as:\n" - "| - hg touch -D\n" - "| - hg prune\n" - "| \n" - "| You should contact your local evolution Guru for help.\n" + othersstr = b"[%s]" % (b','.join([bytes(i) for i in others])) + msg = _(b"skipping %s: %s with a changeset that got split" + b" into multiple ones:\n" + b"|[%s]\n" + b"| This is not handled by automatic evolution yet\n" + b"| You have to fallback to manual handling with commands " + b"such as:\n" + b"| - hg touch -D\n" + b"| - hg prune\n" + b"| \n" + b"| You should contact your local evolution Guru for help.\n" ) % (divergent, TROUBLES['CONTENTDIVERGENT'], othersstr) ui.write_err(msg) - return (False, ".") + return (False, b".") other = others[0] - evolvestate['other-divergent'] = other.node() - evolvestate['base'] = base.node() + evolvestate[b'other-divergent'] = other.node() + evolvestate[b'base'] = base.node() def swapnodes(div, other): div, other = other, div - evolvestate['divergent'] = div.node() - evolvestate['other-divergent'] = other.node() + evolvestate[b'divergent'] = div.node() + evolvestate[b'other-divergent'] = other.node() return div, other # haspubdiv: to keep track if we are solving public content-divergence haspubdiv = False @@ -371,17 +371,17 @@ divergent, other = swapnodes(divergent, other) else: publicdiv = divergent - evolvestate['public-divergent'] = publicdiv.node() + evolvestate[b'public-divergent'] = publicdiv.node() # we don't handle merge content-divergent changesets yet if len(other.parents()) > 1: - msg = _("skipping %s: %s changeset can't be " - "a merge (yet)\n") % (divergent, TROUBLES['CONTENTDIVERGENT']) + msg = _(b"skipping %s: %s changeset can't be " + b"a merge (yet)\n") % (divergent, TROUBLES['CONTENTDIVERGENT']) ui.write_err(msg) - hint = _("You have to fallback to solving this by hand...\n" - "| This probably means redoing the merge and using \n" - "| `hg prune` to kill older version.\n") + hint = _(b"You have to fallback to solving this by hand...\n" + b"| This probably means redoing the merge and using \n" + b"| `hg prune` to kill older version.\n") ui.write_err(hint) - return (False, ".") + return (False, b".") otherp1 = other.p1().rev() divp1 = divergent.p1().rev() @@ -400,15 +400,15 @@ # the changeset on which resolution changeset will be based on resolutionparent = repo[divp1].node() - gca = repo.revs("ancestor(%d, %d)" % (otherp1, divp1)) + gca = repo.revs(b"ancestor(%d, %d)" % (otherp1, divp1)) # divonly: non-obsolete csets which are topological ancestor of "divergent" # but not "other" - divonly = repo.revs("only(%d, %d) - obsolete()" % (divergent.rev(), - other.rev())) + divonly = repo.revs(b"only(%d, %d) - obsolete()" % (divergent.rev(), + other.rev())) # otheronly: non-obsolete csets which are topological ancestor of "other" # but not "div" - otheronly = repo.revs("only(%d, %d) - obsolete()" % (other.rev(), - divergent.rev())) + otheronly = repo.revs(b"only(%d, %d) - obsolete()" % (other.rev(), + divergent.rev())) # make it exclusive set divonly = set(divonly) - {divergent.rev()} otheronly = set(otheronly) - {other.rev()} @@ -468,62 +468,62 @@ divergent, other = swapnodes(divergent, other) resolutionparent = divergent.p1().node() else: - msg = _("skipping %s: have a different parent than %s " - "(not handled yet)\n") % (divergent, other) - hint = _("| %(d)s, %(o)s are not based on the same changeset.\n" - "| With the current state of its implementation, \n" - "| evolve does not work in that case.\n" - "| rebase one of them next to the other and run \n" - "| this command again.\n" - "| - either: hg rebase --dest 'p1(%(d)s)' -r %(o)s\n" - "| - or: hg rebase --dest 'p1(%(o)s)' -r %(d)s\n" - ) % {'d': divergent, 'o': other} + msg = _(b"skipping %s: have a different parent than %s " + b"(not handled yet)\n") % (divergent, other) + hint = _(b"| %(d)s, %(o)s are not based on the same changeset.\n" + b"| With the current state of its implementation, \n" + b"| evolve does not work in that case.\n" + b"| rebase one of them next to the other and run \n" + b"| this command again.\n" + b"| - either: hg rebase --dest 'p1(%(d)s)' -r %(o)s\n" + b"| - or: hg rebase --dest 'p1(%(o)s)' -r %(d)s\n" + ) % {b'd': divergent, b'o': other} ui.write_err(msg) ui.write_err(hint) - return (False, ".") + return (False, b".") if not ui.quiet or confirm: - ui.write(_('merge:'), label='evolve.operation') + ui.write(_(b'merge:'), label=b'evolve.operation') displayer.show(divergent) - ui.write(_('with: ')) + ui.write(_(b'with: ')) displayer.show(other) - ui.write(_('base: ')) + ui.write(_(b'base: ')) displayer.show(base) - if confirm and ui.prompt(_('perform evolve? [Ny]'), 'n') != 'y': - raise error.Abort(_('evolve aborted by user')) + if confirm and ui.prompt(_(b'perform evolve? [Ny]'), b'n') != b'y': + raise error.Abort(_(b'evolve aborted by user')) if dryrun: - ui.write(('hg update -c %s &&\n' % divergent)) - ui.write(('hg merge %s &&\n' % other)) - ui.write(('hg commit -m "auto merge resolving conflict between ' - '%s and %s"&&\n' % (divergent, other))) - ui.write(('hg up -C %s &&\n' % base)) - ui.write(('hg revert --all --rev tip &&\n')) - ui.write(('hg commit -m "`hg log -r %s --template={desc}`";\n' - % divergent)) - return (False, ".") + ui.write((b'hg update -c %s &&\n' % divergent)) + ui.write((b'hg merge %s &&\n' % other)) + ui.write((b'hg commit -m "auto merge resolving conflict between ' + b'%s and %s"&&\n' % (divergent, other))) + ui.write((b'hg up -C %s &&\n' % base)) + ui.write((b'hg revert --all --rev tip &&\n')) + ui.write((b'hg commit -m "`hg log -r %s --template={desc}`";\n' + % divergent)) + return (False, b".") - evolvestate['resolutionparent'] = resolutionparent + evolvestate[b'resolutionparent'] = resolutionparent # relocate the other divergent if required if relocatereq: # relocating will help us understand during the time of conflicts that # whether conflicts occur at reloacting or they occured at merging # content divergent changesets - evolvestate['relocating'] = True - ui.status(_('rebasing "other" content-divergent changeset %s on' - ' %s\n' % (other, divergent.p1()))) + evolvestate[b'relocating'] = True + ui.status(_(b'rebasing "other" content-divergent changeset %s on' + b' %s\n' % (other, divergent.p1()))) try: newother = relocate(repo, other, divergent.p1(), evolvestate, keepbranch=True) except error.InterventionRequired: - evolvestate['current'] = other.node() + evolvestate[b'current'] = other.node() evolvestate.save() raise - evolvestate['old-other'] = other.node() + evolvestate[b'old-other'] = other.node() other = repo[newother] - evolvestate['relocating'] = False - evolvestate['relocated'] = other.node() - evolvestate['temprevs'].append(other.node()) - evolvestate['other-divergent'] = other.node() + evolvestate[b'relocating'] = False + evolvestate[b'relocated'] = other.node() + evolvestate[b'temprevs'].append(other.node()) + evolvestate[b'other-divergent'] = other.node() _mergecontentdivergents(repo, progresscb, divergent, other, base, evolvestate) @@ -545,9 +545,9 @@ # case 2) pubstr = bytes(publicdiv) othstr = bytes(other) - msg = _('content divergence resolution between %s ' - '(public) and %s has same content as %s, ' - 'discarding %s\n') + msg = _(b'content divergence resolution between %s ' + b'(public) and %s has same content as %s, ' + b'discarding %s\n') msg %= (pubstr, othstr, pubstr, othstr) repo.ui.status(msg) return (res, newnode) @@ -559,11 +559,11 @@ def _mergecontentdivergents(repo, progresscb, divergent, other, base, evolvestate): if divergent not in repo[None].parents(): - repo.ui.note(_("updating to \"local\" side of the conflict: %s\n") % + repo.ui.note(_(b"updating to \"local\" side of the conflict: %s\n") % divergent.hex()[:12]) hg.updaterepo(repo, divergent.node(), False) # merging the two content-divergent changesets - repo.ui.note(_("merging \"other\" %s changeset '%s'\n") % + repo.ui.note(_(b"merging \"other\" %s changeset '%s'\n") % (TROUBLES['CONTENTDIVERGENT'], other.hex()[:12])) if progresscb: progresscb() @@ -579,8 +579,8 @@ # conflicts while merging content-divergent changesets if compat.hasconflict(stats): evolvestate.save() - hint = _("see 'hg help evolve.interrupted'") - raise error.InterventionRequired(_("unresolved merge conflicts"), + hint = _(b"see 'hg help evolve.interrupted'") + raise error.InterventionRequired(_(b"unresolved merge conflicts"), hint=hint) def _completecontentdivergent(ui, repo, progresscb, divergent, other, @@ -590,20 +590,20 @@ # resume resolution if progresscb: progresscb() - emtpycommitallowed = repo.ui.backupconfig('ui', 'allowemptycommit') + emtpycommitallowed = repo.ui.backupconfig(b'ui', b'allowemptycommit') tr = repo.currenttransaction() assert tr is not None # whether to store the obsmarker in the evolvestate storemarker = False - resparent = evolvestate['resolutionparent'] + resparent = evolvestate[b'resolutionparent'] # whether we are solving public divergence haspubdiv = False - if evolvestate.get('public-divergent'): + if evolvestate.get(b'public-divergent'): haspubdiv = True - publicnode = evolvestate['public-divergent'] + publicnode = evolvestate[b'public-divergent'] publicdiv = repo[publicnode] - othernode = evolvestate['other-divergent'] + othernode = evolvestate[b'other-divergent'] otherdiv = repo[othernode] with repo.dirstate.parentchange(): @@ -616,7 +616,7 @@ warnmetadataloss(repo, publicdiv, otherdiv) # no changes, create markers to resolve divergence obsolete.createmarkers(repo, [(otherdiv, (publicdiv,))], - operation='evolve') + operation=b'evolve') return (True, publicnode) try: with repo.dirstate.parentchange(): @@ -642,29 +642,29 @@ # no changes new = divergent storemarker = True - repo.ui.status(_("nothing changed\n")) + repo.ui.status(_(b"nothing changed\n")) hg.updaterepo(repo, divergent.rev(), False) else: new = repo[newnode] newnode = new.node() hg.updaterepo(repo, new.rev(), False) if haspubdiv and publicdiv == divergent: - bypassphase(repo, (divergent, new), operation='evolve') + bypassphase(repo, (divergent, new), operation=b'evolve') else: obsolete.createmarkers(repo, [(divergent, (new,))], - operation='evolve') + operation=b'evolve') # creating markers and moving phases post-resolution if haspubdiv and publicdiv == other: - bypassphase(repo, (other, new), operation='evolve') + bypassphase(repo, (other, new), operation=b'evolve') else: - obsolete.createmarkers(repo, [(other, (new,))], operation='evolve') + obsolete.createmarkers(repo, [(other, (new,))], operation=b'evolve') if storemarker: # storing the marker in the evolvestate # we just store the precursors and successor pair for now, we might # want to store more data and serialize obsmarker in a better way in # future - evolvestate['obsmarkers'].append((other.node(), new.node())) + evolvestate[b'obsmarkers'].append((other.node(), new.node())) phases.retractboundary(repo, tr, other.phase(), [new.node()]) return (True, newnode) @@ -676,7 +676,7 @@ public content-divergence""" # needtowarn: aspects where we need to warn user - needtowarn = ['branch', 'topic', 'close'] + needtowarn = [b'branch', b'topic', b'close'] aspects = set() localextra = local.extra() otherextra = other.extra() @@ -688,48 +688,48 @@ aspects.add(asp) if other.description() != local.description(): - aspects.add('description') + aspects.add(b'description') if aspects: # warn user locstr = bytes(local) othstr = bytes(other) - if 'close' in aspects: - filteredasp = aspects - {'close'} + if b'close' in aspects: + filteredasp = aspects - {b'close'} if filteredasp: - msg = _('other divergent changeset %s is a closed branch head ' - 'and differs from local %s by "%s" only,' % - (othstr, locstr, ', '.join(sorted(filteredasp)))) + msg = _(b'other divergent changeset %s is a closed branch head ' + b'and differs from local %s by "%s" only,' % + (othstr, locstr, b', '.join(sorted(filteredasp)))) else: - msg = _('other divergent changeset %s is a closed branch head ' - 'and has same content as local %s,' % (othstr, locstr)) + msg = _(b'other divergent changeset %s is a closed branch head ' + b'and has same content as local %s,' % (othstr, locstr)) else: - msg = _('other divergent changeset %s has same content as local %s' - ' and differs by "%s" only,' % - (othstr, locstr, ', '.join(sorted(aspects)))) - msg += _(' discarding %s\n' % othstr) + msg = _(b'other divergent changeset %s has same content as local %s' + b' and differs by "%s" only,' % + (othstr, locstr, b', '.join(sorted(aspects)))) + msg += _(b' discarding %s\n' % othstr) repo.ui.warn(msg) -def bypassphase(repo, relation, flag=0, metadata=None, operation='evolve'): +def bypassphase(repo, relation, flag=0, metadata=None, operation=b'evolve'): """function to create a single obsmarker relation even for public csets where relation should be a single pair (prec, succ)""" # prepare metadata if metadata is None: metadata = {} - if 'user' not in metadata: - luser = repo.ui.config('devel', 'user.obsmarker') or repo.ui.username() - metadata['user'] = encoding.fromlocal(luser) + if b'user' not in metadata: + luser = repo.ui.config(b'devel', b'user.obsmarker') or repo.ui.username() + metadata[b'user'] = encoding.fromlocal(luser) # Operation metadata handling - useoperation = repo.ui.configbool('experimental', - 'evolution.track-operation') + useoperation = repo.ui.configbool(b'experimental', + b'evolution.track-operation') if useoperation and operation: - metadata['operation'] = operation + metadata[b'operation'] = operation # Effect flag metadata handling - saveeffectflag = repo.ui.configbool('experimental', - 'evolution.effect-flags') - with repo.transaction('add-obsolescence-marker') as tr: + saveeffectflag = repo.ui.configbool(b'experimental', + b'evolution.effect-flags') + with repo.transaction(b'add-obsolescence-marker') as tr: prec, succ = relation nprec = prec.node() npare = None @@ -737,7 +737,7 @@ if not nsucs: npare = tuple(p.node() for p in prec.parents()) if nprec in nsucs: - raise error.Abort(_("changeset %s cannot obsolete itself") % prec) + raise error.Abort(_(b"changeset %s cannot obsolete itself") % prec) if saveeffectflag: # The effect flag is saved in a versioned field name for @@ -747,7 +747,7 @@ except TypeError: # hg <= 4.7 effectflag = obsutil.geteffectflag((prec, (succ,))) - metadata[obsutil.EFFECTFLAGFIELD] = "%d" % effectflag + metadata[obsutil.EFFECTFLAGFIELD] = b"%d" % effectflag # create markers repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare, @@ -849,11 +849,11 @@ repo.dirstate.setbranch(othbranch) else: # all the three branches are different - index = repo.ui.promptchoice(_("content divergent changesets on " - "different branches.\nchoose branch" - " for the resolution changeset. (a) " - "%s or (b) %s or (c) %s? $$ &a $$ &b" - " $$ &c") % + index = repo.ui.promptchoice(_(b"content divergent changesets on " + b"different branches.\nchoose branch" + b" for the resolution changeset. (a) " + b"%s or (b) %s or (c) %s? $$ &a $$ &b" + b" $$ &c") % (basebranch, divbranch, othbranch), 0) if index == 0: @@ -870,20 +870,20 @@ merger = simplemerge.Merge3Text(basedesc, divdesc, othdesc) mdesc = [] kwargs = {} - kwargs['name_base'] = 'base' - kwargs['base_marker'] = '|||||||' - for line in merger.merge_lines(name_a='divergent', name_b='other', + kwargs['name_base'] = b'base' + kwargs['base_marker'] = b'|||||||' + for line in merger.merge_lines(name_a=b'divergent', name_b=b'other', **kwargs): mdesc.append(line) - desc = ''.join(mdesc) + desc = b''.join(mdesc) if merger.conflicts: - prefixes = ("HG: Conflicts while merging changeset description of" - " content-divergent changesets.\nHG: Resolve conflicts" - " in commit messages to continue.\n\n") + prefixes = (b"HG: Conflicts while merging changeset description of" + b" content-divergent changesets.\nHG: Resolve conflicts" + b" in commit messages to continue.\n\n") - resolveddesc = ui.edit(prefixes + desc, ui.username(), action='desc') + resolveddesc = ui.edit(prefixes + desc, ui.username(), action=b'desc') # make sure we remove the prefixes part from final commit message if prefixes in resolveddesc: # hack, we should find something better @@ -934,17 +934,17 @@ returns the node of new commit which is formed """ if orig.rev() == dest.rev(): - msg = _('tried to relocate a node on top of itself') - hint = _("This shouldn't happen. If you still need to move changesets, " - "please do so manually with nothing to rebase - working " - "directory parent is also destination") + msg = _(b'tried to relocate a node on top of itself') + hint = _(b"This shouldn't happen. If you still need to move changesets, " + b"please do so manually with nothing to rebase - working " + b"directory parent is also destination") raise error.ProgrammingError(msg, hint=hint) if pctx is None: if len(orig.parents()) == 2: - msg = _("tried to relocate a merge commit without specifying which " - "parent should be moved") - hint = _("Specify the parent by passing in pctx") + msg = _(b"tried to relocate a merge commit without specifying which " + b"parent should be moved") + hint = _(b"Specify the parent by passing in pctx") raise error.ProgrammingError(msg, hint) pctx = orig.p1() @@ -972,8 +972,8 @@ newsha1 = nodemod.hex(successors[0][0]) commitmsg = commitmsg.replace(sha1, newsha1[:len(sha1)]) else: - repo.ui.note(_('The stale commit message reference to %s could ' - 'not be updated\n') % sha1) + repo.ui.note(_(b'The stale commit message reference to %s could ' + b'not be updated\n') % sha1) tr = repo.currenttransaction() assert tr is not None @@ -986,8 +986,8 @@ copies.duplicatecopies(repo, repo[None], dest.rev(), orig.p1().rev()) dirstatedance(repo, dest, orig.node(), None) - hint = _("see 'hg help evolve.interrupted'") - raise error.InterventionRequired(_("unresolved merge conflicts"), + hint = _(b"see 'hg help evolve.interrupted'") + raise error.InterventionRequired(_(b"unresolved merge conflicts"), hint=hint) nodenew = _relocatecommit(repo, orig, commitmsg) _finalizerelocate(repo, orig, dest, nodenew, tr, category, evolvestate) @@ -997,14 +997,14 @@ if commitmsg is None: commitmsg = orig.description() extra = dict(orig.extra()) - if 'branch' in extra: - del extra['branch'] - extra['rebase_source'] = orig.hex() + if b'branch' in extra: + del extra[b'branch'] + extra[b'rebase_source'] = orig.hex() - backup = repo.ui.backupconfig('phases', 'new-commit') + backup = repo.ui.backupconfig(b'phases', b'new-commit') try: targetphase = max(orig.phase(), phases.draft) - repo.ui.setconfig('phases', 'new-commit', targetphase, 'evolve') + repo.ui.setconfig(b'phases', b'new-commit', targetphase, b'evolve') # Commit might fail if unresolved files exist nodenew = repo.commit(text=commitmsg, user=orig.user(), date=orig.date(), extra=extra) @@ -1020,18 +1020,18 @@ if nodenew is not None: obsolete.createmarkers(repo, [(repo[nodesrc], (repo[nodenew],))], - operation='evolve') + operation=b'evolve') for book in oldbookmarks: bmchanges.append((book, nodenew)) - evolvestate['bookmarkchanges'].append((book, nodesrc)) + evolvestate[b'bookmarkchanges'].append((book, nodesrc)) else: - if category == 'orphan': - repo.ui.status(_("evolution of %d:%s created no changes " - "to commit\n") % (orig.rev(), orig)) - obsolete.createmarkers(repo, [(repo[nodesrc], ())], operation='evolve') + if category == b'orphan': + repo.ui.status(_(b"evolution of %d:%s created no changes " + b"to commit\n") % (orig.rev(), orig)) + obsolete.createmarkers(repo, [(repo[nodesrc], ())], operation=b'evolve') # Behave like rebase, move bookmarks to dest for book in oldbookmarks: - evolvestate['bookmarkchanges'].append((book, nodesrc)) + evolvestate[b'bookmarkchanges'].append((book, nodesrc)) bmchanges.append((book, dest.node())) for book in destbookmarks: # restore bookmark that rebase move bmchanges.append((book, dest.node())) @@ -1041,61 +1041,61 @@ def _evolvemerge(repo, orig, dest, pctx, keepbranch): """Used by the evolve function to merge dest on top of pctx. return the same tuple as merge.graft""" - if repo['.'].rev() != dest.rev(): + if repo[b'.'].rev() != dest.rev(): merge.update(repo, dest, branchmerge=False, force=True) if repo._activebookmark: - repo.ui.status(_("(leaving bookmark %s)\n") % repo._activebookmark) + repo.ui.status(_(b"(leaving bookmark %s)\n") % repo._activebookmark) bookmarksmod.deactivate(repo) if keepbranch: repo.dirstate.setbranch(orig.branch()) if util.safehasattr(repo, 'currenttopic'): # uurrgs # there no other topic setter yet - if not orig.topic() and repo.vfs.exists('topic'): - repo.vfs.unlink('topic') + if not orig.topic() and repo.vfs.exists(b'topic'): + repo.vfs.unlink(b'topic') else: - with repo.vfs.open('topic', 'w') as f: + with repo.vfs.open(b'topic', b'w') as f: f.write(orig.topic()) - return merge.graft(repo, orig, pctx, ['destination', 'evolving'], True) + return merge.graft(repo, orig, pctx, [b'destination', b'evolving'], True) instabilities_map = { - 'contentdivergent': "content-divergent", - 'phasedivergent': "phase-divergent" + b'contentdivergent': b"content-divergent", + b'phasedivergent': b"phase-divergent" } def _selectrevs(repo, allopt, revopt, anyopt, targetcat): """select troubles in repo matching according to given options""" revs = set() if allopt or revopt: - revs = repo.revs("%s()" % targetcat) + revs = repo.revs(b"%s()" % targetcat) if revopt: revs = scmutil.revrange(repo, revopt) & revs elif not anyopt: - topic = getattr(repo, 'currenttopic', '') + topic = getattr(repo, 'currenttopic', b'') if topic: - revs = repo.revs('topic(%s)', topic) & revs - elif targetcat == 'orphan': + revs = repo.revs(b'topic(%s)', topic) & revs + elif targetcat == b'orphan': revs = _aspiringdescendant(repo, - repo.revs('(.::) - obsolete()::')) + repo.revs(b'(.::) - obsolete()::')) revs = set(revs) - if targetcat == 'contentdivergent': + if targetcat == b'contentdivergent': # Pick one divergent per group of divergents revs = _dedupedivergents(repo, revs) elif anyopt: - revs = repo.revs('first(%s())' % (targetcat)) - elif targetcat == 'orphan': - revs = set(_aspiringchildren(repo, repo.revs('(.::) - obsolete()::'))) + revs = repo.revs(b'first(%s())' % (targetcat)) + elif targetcat == b'orphan': + revs = set(_aspiringchildren(repo, repo.revs(b'(.::) - obsolete()::'))) if 1 < len(revs): - msg = "multiple evolve candidates" - hint = (_("select one of %s with --rev") - % ', '.join([bytes(repo[r]) for r in sorted(revs)])) + msg = b"multiple evolve candidates" + hint = (_(b"select one of %s with --rev") + % b', '.join([bytes(repo[r]) for r in sorted(revs)])) raise error.Abort(msg, hint=hint) - elif instabilities_map.get(targetcat, targetcat) in repo['.'].instabilities(): - revs = set([repo['.'].rev()]) + elif instabilities_map.get(targetcat, targetcat) in repo[b'.'].instabilities(): + revs = set([repo[b'.'].rev()]) return revs def _dedupedivergents(repo, revs): @@ -1124,14 +1124,14 @@ XXX this woobly function won't survive XXX """ repo = ctx._repo.unfiltered() - for base in repo.set('reverse(allpredecessors(%d))', ctx.rev()): + for base in repo.set(b'reverse(allpredecessors(%d))', ctx.rev()): newer = obsutil.successorssets(ctx._repo, base.node()) # drop filter and solution including the original ctx newer = [n for n in newer if n and ctx.node() not in n] if newer: return base, tuple(ctx._repo[o] for o in newer[0]) - raise error.Abort(_("base of divergent changeset %s not found") % ctx, - hint=_('this case is not yet handled')) + raise error.Abort(_(b"base of divergent changeset %s not found") % ctx, + hint=_(b'this case is not yet handled')) def _aspiringdescendant(repo, revs): """Return a list of changectx which can be stabilized on top of pctx or @@ -1139,7 +1139,7 @@ target = set(revs) result = set(target) paths = collections.defaultdict(set) - for r in repo.revs('orphan() - %ld', revs): + for r in repo.revs(b'orphan() - %ld', revs): for d in _possibledestination(repo, r): paths[d].add(r) @@ -1158,7 +1158,7 @@ one of its descendants. Empty list if none can be found.""" target = set(revs) result = [] - for r in repo.revs('orphan() - %ld', revs): + for r in repo.revs(b'orphan() - %ld', revs): dest = _possibledestination(repo, r) if target & dest: result.append(r) @@ -1191,104 +1191,104 @@ def _handlenotrouble(ui, repo, allopt, revopt, anyopt, targetcat): """Used by the evolve function to display an error message when no troubles can be resolved""" - troublecategories = ['phasedivergent', 'contentdivergent', 'orphan'] + troublecategories = [b'phasedivergent', b'contentdivergent', b'orphan'] unselectedcategories = [c for c in troublecategories if c != targetcat] msg = None hint = None retoverride = None troubled = { - "orphan": repo.revs("orphan()"), - "contentdivergent": repo.revs("contentdivergent()"), - "phasedivergent": repo.revs("phasedivergent()"), - "all": repo.revs("unstable()"), + b"orphan": repo.revs(b"orphan()"), + b"contentdivergent": repo.revs(b"contentdivergent()"), + b"phasedivergent": repo.revs(b"phasedivergent()"), + b"all": repo.revs(b"unstable()"), } hintmap = { - 'phasedivergent': _("do you want to use --phase-divergent"), - 'phasedivergent+contentdivergent': _("do you want to use " - "--phase-divergent or" - " --content-divergent"), - 'phasedivergent+orphan': _("do you want to use --phase-divergent" - " or --orphan"), - 'contentdivergent': _("do you want to use --content-divergent"), - 'contentdivergent+orphan': _("do you want to use --content-divergent" - " or --orphan"), - 'orphan': _("do you want to use --orphan"), - 'any+phasedivergent': _("do you want to use --any (or --rev) and" - " --phase-divergent"), - 'any+phasedivergent+contentdivergent': _("do you want to use --any" - " (or --rev) and" - " --phase-divergent or" - " --content-divergent"), - 'any+phasedivergent+orphan': _("do you want to use --any (or --rev)" - " and --phase-divergent or --orphan"), - 'any+contentdivergent': _("do you want to use --any (or --rev) and" - " --content-divergent"), - 'any+contentdivergent+orphan': _("do you want to use --any (or --rev)" - " and --content-divergent or " - "--orphan"), - 'any+orphan': _("do you want to use --any (or --rev)" - "and --orphan"), + b'phasedivergent': _(b"do you want to use --phase-divergent"), + b'phasedivergent+contentdivergent': _(b"do you want to use " + b"--phase-divergent or" + b" --content-divergent"), + b'phasedivergent+orphan': _(b"do you want to use --phase-divergent" + b" or --orphan"), + b'contentdivergent': _(b"do you want to use --content-divergent"), + b'contentdivergent+orphan': _(b"do you want to use --content-divergent" + b" or --orphan"), + b'orphan': _(b"do you want to use --orphan"), + b'any+phasedivergent': _(b"do you want to use --any (or --rev) and" + b" --phase-divergent"), + b'any+phasedivergent+contentdivergent': _(b"do you want to use --any" + b" (or --rev) and" + b" --phase-divergent or" + b" --content-divergent"), + b'any+phasedivergent+orphan': _(b"do you want to use --any (or --rev)" + b" and --phase-divergent or --orphan"), + b'any+contentdivergent': _(b"do you want to use --any (or --rev) and" + b" --content-divergent"), + b'any+contentdivergent+orphan': _(b"do you want to use --any (or --rev)" + b" and --content-divergent or " + b"--orphan"), + b'any+orphan': _(b"do you want to use --any (or --rev)" + b"and --orphan"), } if revopt: revs = scmutil.revrange(repo, revopt) if not revs: - msg = _("set of specified revisions is empty") + msg = _(b"set of specified revisions is empty") else: - msg = _("no %s changesets in specified revisions") % targetcat + msg = _(b"no %s changesets in specified revisions") % targetcat othertroubles = [] for cat in unselectedcategories: if revs & troubled[cat]: othertroubles.append(cat) if othertroubles: - hint = hintmap['+'.join(othertroubles)] + hint = hintmap[b'+'.join(othertroubles)] elif anyopt: - msg = _("no %s changesets to evolve") % targetcat + msg = _(b"no %s changesets to evolve") % targetcat othertroubles = [] for cat in unselectedcategories: if troubled[cat]: othertroubles.append(cat) if othertroubles: - hint = hintmap['+'.join(othertroubles)] + hint = hintmap[b'+'.join(othertroubles)] else: # evolve without any option = relative to the current wdir - if targetcat == 'orphan': - msg = _("nothing to evolve on current working copy parent") + if targetcat == b'orphan': + msg = _(b"nothing to evolve on current working copy parent") else: - msg = _("current working copy parent is not %s") % targetcat + msg = _(b"current working copy parent is not %s") % targetcat - p1 = repo['.'].rev() + p1 = repo[b'.'].rev() othertroubles = [] for cat in unselectedcategories: if p1 in troubled[cat]: othertroubles.append(cat) if othertroubles: - hint = hintmap['+'.join(othertroubles)] + hint = hintmap[b'+'.join(othertroubles)] else: length = len(troubled[targetcat]) if length: - hint = _("%d other %s in the repository, do you want --any " - "or --rev") % (length, targetcat) + hint = _(b"%d other %s in the repository, do you want --any " + b"or --rev") % (length, targetcat) else: othertroubles = [] for cat in unselectedcategories: if troubled[cat]: othertroubles.append(cat) if othertroubles: - hint = hintmap['any+' + ('+'.join(othertroubles))] + hint = hintmap[b'any+' + (b'+'.join(othertroubles))] else: - msg = _("no troubled changesets") + msg = _(b"no troubled changesets") # Exit with a 0 (success) status in this case. retoverride = 0 assert msg is not None - ui.write_err("%s\n" % msg) + ui.write_err(b"%s\n" % msg) if hint: - ui.write_err("(%s)\n" % hint) + ui.write_err(b"(%s)\n" % hint) ret = 2 else: ret = 1 @@ -1308,21 +1308,21 @@ def listtroubles(ui, repo, troublecategories, **opts): """Print all the troubles for the repo (or given revset)""" - troublecategories = troublecategories or ['contentdivergent', 'orphan', 'phasedivergent'] - showunstable = 'orphan' in troublecategories - showbumped = 'phasedivergent' in troublecategories - showdivergent = 'contentdivergent' in troublecategories + troublecategories = troublecategories or [b'contentdivergent', b'orphan', b'phasedivergent'] + showunstable = b'orphan' in troublecategories + showbumped = b'phasedivergent' in troublecategories + showdivergent = b'contentdivergent' in troublecategories - revs = repo.revs('+'.join("%s()" % t for t in troublecategories)) + revs = repo.revs(b'+'.join(b"%s()" % t for t in troublecategories)) if opts.get('rev'): revs = scmutil.revrange(repo, opts.get('rev')) - fm = ui.formatter('evolvelist', pycompat.byteskwargs(opts)) + fm = ui.formatter(b'evolvelist', pycompat.byteskwargs(opts)) for rev in revs: ctx = repo[rev] unpars = _preparelistctxs(ctx.parents(), lambda p: p.orphan()) obspars = _preparelistctxs(ctx.parents(), lambda p: p.obsolete()) - imprecs = _preparelistctxs(repo.set("allpredecessors(%n)", ctx.node()), + imprecs = _preparelistctxs(repo.set(b"allpredecessors(%n)", ctx.node()), lambda p: not p.mutable()) dsets = divergentsets(repo, ctx) @@ -1332,55 +1332,55 @@ desc = ctx.description() if desc: desc = desc.splitlines()[0] - desc = (desc[:desclen] + '...') if len(desc) > desclen else desc - fm.plain('%s: ' % ctx.hex()[:hashlen]) - fm.plain('%s\n' % desc) + desc = (desc[:desclen] + b'...') if len(desc) > desclen else desc + fm.plain(b'%s: ' % ctx.hex()[:hashlen]) + fm.plain(b'%s\n' % desc) fm.data(node=ctx.hex(), rev=ctx.rev(), desc=desc, phase=ctx.phasestr()) for unpar in unpars if showunstable else []: - fm.plain(' %s: %s (%s parent)\n' % (TROUBLES['ORPHAN'], - unpar[:hashlen], - TROUBLES['ORPHAN'])) + fm.plain(b' %s: %s (%s parent)\n' % (TROUBLES['ORPHAN'], + unpar[:hashlen], + TROUBLES['ORPHAN'])) for obspar in obspars if showunstable else []: - fm.plain(' %s: %s (obsolete parent)\n' % (TROUBLES['ORPHAN'], - obspar[:hashlen])) + fm.plain(b' %s: %s (obsolete parent)\n' % (TROUBLES['ORPHAN'], + obspar[:hashlen])) for imprec in imprecs if showbumped else []: - fm.plain(' %s: %s (immutable precursor)\n' % + fm.plain(b' %s: %s (immutable precursor)\n' % (TROUBLES['PHASEDIVERGENT'], imprec[:hashlen])) if dsets and showdivergent: for dset in dsets: - fm.plain(' %s: ' % TROUBLES['CONTENTDIVERGENT']) + fm.plain(b' %s: ' % TROUBLES['CONTENTDIVERGENT']) first = True - for n in dset['divergentnodes']: - t = "%s (%s)" if first else " %s (%s)" + for n in dset[b'divergentnodes']: + t = b"%s (%s)" if first else b" %s (%s)" first = False fm.plain(t % (nodemod.hex(n)[:hashlen], repo[n].phasestr())) - comprec = nodemod.hex(dset['commonprecursor'])[:hashlen] - fm.plain(" (precursor %s)\n" % comprec) - fm.plain("\n") + comprec = nodemod.hex(dset[b'commonprecursor'])[:hashlen] + fm.plain(b" (precursor %s)\n" % comprec) + fm.plain(b"\n") # templater-friendly section _formatctx(fm, ctx) troubles = [] for unpar in unpars: - troubles.append({'troubletype': TROUBLES['ORPHAN'], - 'sourcenode': unpar, 'sourcetype': 'orphanparent'}) + troubles.append({b'troubletype': TROUBLES['ORPHAN'], + b'sourcenode': unpar, b'sourcetype': b'orphanparent'}) for obspar in obspars: - troubles.append({'troubletype': TROUBLES['ORPHAN'], - 'sourcenode': obspar, - 'sourcetype': 'obsoleteparent'}) + troubles.append({b'troubletype': TROUBLES['ORPHAN'], + b'sourcenode': obspar, + b'sourcetype': b'obsoleteparent'}) for imprec in imprecs: - troubles.append({'troubletype': TROUBLES['PHASEDIVERGENT'], - 'sourcenode': imprec, - 'sourcetype': 'immutableprecursor'}) + troubles.append({b'troubletype': TROUBLES['PHASEDIVERGENT'], + b'sourcenode': imprec, + b'sourcetype': b'immutableprecursor'}) for dset in dsets: - divnodes = [{'node': nodemod.hex(n), - 'phase': repo[n].phasestr(), - } for n in dset['divergentnodes']] - troubles.append({'troubletype': TROUBLES['CONTENTDIVERGENT'], - 'commonprecursor': nodemod.hex(dset['commonprecursor']), - 'divergentnodes': divnodes}) + divnodes = [{b'node': nodemod.hex(n), + b'phase': repo[n].phasestr(), + } for n in dset[b'divergentnodes']] + troubles.append({b'troubletype': TROUBLES['CONTENTDIVERGENT'], + b'commonprecursor': nodemod.hex(dset[b'commonprecursor']), + b'divergentnodes': divnodes}) fm.data(troubles=troubles) fm.end() @@ -1391,61 +1391,61 @@ if opts['continue']: if opts['any']: - raise error.Abort(_('cannot specify both "--any" and "--continue"')) + raise error.Abort(_(b'cannot specify both "--any" and "--continue"')) if opts['all']: - raise error.Abort(_('cannot specify both "--all" and "--continue"')) + raise error.Abort(_(b'cannot specify both "--all" and "--continue"')) if opts['rev']: - raise error.Abort(_('cannot specify both "--rev" and "--continue"')) + raise error.Abort(_(b'cannot specify both "--rev" and "--continue"')) if opts['stop']: - raise error.Abort(_('cannot specify both "--stop" and' - ' "--continue"')) + raise error.Abort(_(b'cannot specify both "--stop" and' + b' "--continue"')) if opts['abort']: - raise error.Abort(_('cannot specify both "--abort" and' - ' "--continue"')) + raise error.Abort(_(b'cannot specify both "--abort" and' + b' "--continue"')) if opts['stop']: if opts['any']: - raise error.Abort(_('cannot specify both "--any" and "--stop"')) + raise error.Abort(_(b'cannot specify both "--any" and "--stop"')) if opts['all']: - raise error.Abort(_('cannot specify both "--all" and "--stop"')) + raise error.Abort(_(b'cannot specify both "--all" and "--stop"')) if opts['rev']: - raise error.Abort(_('cannot specify both "--rev" and "--stop"')) + raise error.Abort(_(b'cannot specify both "--rev" and "--stop"')) if opts['abort']: - raise error.Abort(_('cannot specify both "--abort" and "--stop"')) + raise error.Abort(_(b'cannot specify both "--abort" and "--stop"')) if opts['abort']: if opts['any']: - raise error.Abort(_('cannot specify both "--any" and "--abort"')) + raise error.Abort(_(b'cannot specify both "--any" and "--abort"')) if opts['all']: - raise error.Abort(_('cannot specify both "--all" and "--abort"')) + raise error.Abort(_(b'cannot specify both "--all" and "--abort"')) if opts['rev']: - raise error.Abort(_('cannot specify both "--rev" and "--abort"')) + raise error.Abort(_(b'cannot specify both "--rev" and "--abort"')) if opts['rev']: if opts['any']: - raise error.Abort(_('cannot specify both "--rev" and "--any"')) + raise error.Abort(_(b'cannot specify both "--rev" and "--any"')) if opts['all']: - raise error.Abort(_('cannot specify both "--rev" and "--all"')) + raise error.Abort(_(b'cannot specify both "--rev" and "--all"')) # Backward compatibility if opts['unstable']: - msg = ("'evolve --unstable' is deprecated, " - "use 'evolve --orphan'") - repo.ui.deprecwarn(msg, '4.4') + msg = (b"'evolve --unstable' is deprecated, " + b"use 'evolve --orphan'") + repo.ui.deprecwarn(msg, b'4.4') opts['orphan'] = opts['divergent'] if opts['divergent']: - msg = ("'evolve --divergent' is deprecated, " - "use 'evolve --content-divergent'") - repo.ui.deprecwarn(msg, '4.4') + msg = (b"'evolve --divergent' is deprecated, " + b"use 'evolve --content-divergent'") + repo.ui.deprecwarn(msg, b'4.4') opts['content_divergent'] = opts['divergent'] if opts['bumped']: - msg = ("'evolve --bumped' is deprecated, " - "use 'evolve --phase-divergent'") - repo.ui.deprecwarn(msg, '4.4') + msg = (b"'evolve --bumped' is deprecated, " + b"use 'evolve --phase-divergent'") + repo.ui.deprecwarn(msg, b'4.4') opts['phase_divergent'] = opts['bumped'] @@ -1458,8 +1458,8 @@ unfi = repo.unfiltered() succ = utility._singlesuccessor(repo, unfi[startnode]) hg.updaterepo(repo, repo[succ].node(), False) - if repo['.'].node() != startnode: - ui.status(_('working directory is now at %s\n') % repo['.']) + if repo[b'.'].node() != startnode: + ui.status(_(b'working directory is now at %s\n') % repo[b'.']) def divergentsets(repo, ctx): """Compute sets of commits divergent with a given one""" @@ -1481,38 +1481,38 @@ divergence = [] for divset, b in base.items(): divergence.append({ - 'divergentnodes': divset, - 'commonprecursor': b + b'divergentnodes': divset, + b'commonprecursor': b }) return divergence @eh.command( - 'evolve|stabilize|solve', - [('n', 'dry-run', False, - _('do not perform actions, just print what would be done')), - ('', 'confirm', False, - _('ask for confirmation before performing the action')), - ('A', 'any', False, - _('also consider troubled changesets unrelated to current working ' - 'directory')), - ('r', 'rev', [], _('solves troubles of these revisions'), _('REV')), - ('', 'bumped', False, _('solves only bumped changesets (DEPRECATED)')), - ('', 'phase-divergent', False, _('solves only phase-divergent changesets')), - ('', 'divergent', False, _('solves only divergent changesets (DEPRECATED)')), - ('', 'content-divergent', False, _('solves only content-divergent changesets')), - ('', 'unstable', False, _('solves only unstable changesets (DEPRECATED)')), - ('', 'orphan', False, _('solves only orphan changesets (default)')), - ('a', 'all', None, _('evolve all troubled changesets related to the current' - ' working directory and its descendants (default)')), - ('', 'update', False, _('update to the head of evolved changesets')), - ('c', 'continue', False, _('continue an interrupted evolution')), - ('', 'stop', False, _('stop the interrupted evolution')), - ('', 'abort', False, _('abort the interrupted evolution')), - ('l', 'list', False, _('provide details on troubled changesets' - ' in the repo')), - ] + mergetoolopts, - _('[OPTIONS]...'), + b'evolve|stabilize|solve', + [(b'n', b'dry-run', False, + _(b'do not perform actions, just print what would be done')), + (b'', b'confirm', False, + _(b'ask for confirmation before performing the action')), + (b'A', b'any', False, + _(b'also consider troubled changesets unrelated to current working ' + b'directory')), + (b'r', b'rev', [], _(b'solves troubles of these revisions'), _(b'REV')), + (b'', b'bumped', False, _(b'solves only bumped changesets (DEPRECATED)')), + (b'', b'phase-divergent', False, _(b'solves only phase-divergent changesets')), + (b'', b'divergent', False, _(b'solves only divergent changesets (DEPRECATED)')), + (b'', b'content-divergent', False, _(b'solves only content-divergent changesets')), + (b'', b'unstable', False, _(b'solves only unstable changesets (DEPRECATED)')), + (b'', b'orphan', False, _(b'solves only orphan changesets (default)')), + (b'a', b'all', None, _(b'evolve all troubled changesets related to the current' + b' working directory and its descendants (default)')), + (b'', b'update', False, _(b'update to the head of evolved changesets')), + (b'c', b'continue', False, _(b'continue an interrupted evolution')), + (b'', b'stop', False, _(b'stop the interrupted evolution')), + (b'', b'abort', False, _(b'abort the interrupted evolution')), + (b'l', b'list', False, _(b'provide details on troubled changesets' + b' in the repo')), + ] + mergetoolopts, + _(b'[OPTIONS]...'), helpbasic=True ) def evolve(ui, repo, **opts): @@ -1607,7 +1607,10 @@ aborts the interrupted evolve and undoes all the resolution which have happened """ + with repo.wlock(), repo.lock(): + return _performevolve(ui, repo, **opts) +def _performevolve(ui, repo, **opts): opts = _checkevolveopts(repo, opts) # Options contopt = opts['continue'] @@ -1615,7 +1618,7 @@ allopt = opts['all'] if allopt is None: allopt = True - startnode = repo['.'].node() + startnode = repo[b'.'].node() dryrunopt = opts['dry_run'] confirmopt = opts['confirm'] revopt = opts['rev'] @@ -1624,56 +1627,56 @@ shouldupdate = opts['update'] troublecategories = { - 'phasedivergent': 'phase_divergent', - 'contentdivergent': 'content_divergent', - 'orphan': 'orphan', + b'phasedivergent': r'phase_divergent', + b'contentdivergent': r'content_divergent', + b'orphan': r'orphan', } specifiedcategories = [k for k, v in troublecategories.items() if opts[v]] if opts['list']: - ui.pager('evolve') + ui.pager(b'evolve') listtroubles(ui, repo, specifiedcategories, **opts) return - targetcat = 'orphan' + targetcat = b'orphan' if 1 < len(specifiedcategories): - msg = _('cannot specify more than one trouble category to solve (yet)') + msg = _(b'cannot specify more than one trouble category to solve (yet)') raise error.Abort(msg) elif len(specifiedcategories) == 1: targetcat = specifiedcategories[0] - ui.setconfig('ui', 'forcemerge', opts.get('tool', ''), 'evolve') + ui.setconfig(b'ui', b'forcemerge', opts.get('tool', r''), b'evolve') evolvestate = state.cmdstate(repo) # Continuation handling if contopt: if not evolvestate: - raise error.Abort(_('no interrupted evolve to continue')) + raise error.Abort(_(b'no interrupted evolve to continue')) evolvestate.load() continueevolve(ui, repo, evolvestate) - if evolvestate['command'] != 'evolve': + if evolvestate[b'command'] != b'evolve': evolvestate.delete() return - startnode = repo.unfiltered()[evolvestate['startnode']] - if 'update' in evolvestate: - shouldupdate = evolvestate['update'] + startnode = repo.unfiltered()[evolvestate[b'startnode']] + if b'update' in evolvestate: + shouldupdate = evolvestate[b'update'] evolvestate.delete() elif stopopt: if not evolvestate: - raise error.Abort(_('no interrupted evolve to stop')) + raise error.Abort(_(b'no interrupted evolve to stop')) evolvestate.load() stopevolve(ui, repo, evolvestate) evolvestate.delete() return elif abortopt: if not evolvestate: - raise error.Abort(_('no interrupted evolve to abort')) + raise error.Abort(_(b'no interrupted evolve to abort')) evolvestate.load() # `hg next --evolve` in play - if evolvestate['command'] != 'evolve': - pctx = repo['.'] + if evolvestate[b'command'] != b'evolve': + pctx = repo[b'.'] hg.updaterepo(repo, pctx.node(), True) - ui.status(_('evolve aborted\n')) - ui.status(_('working directory is now at %s\n') + ui.status(_(b'evolve aborted\n')) + ui.status(_(b'working directory is now at %s\n') % pctx.hex()[:12]) evolvestate.delete() return 0 @@ -1681,7 +1684,7 @@ else: cmdutil.bailifchanged(repo) - obswdir = repo['.'].obsolete() + obswdir = repo[b'.'].obsolete() revs = _selectrevs(repo, allopt, revopt, anyopt, targetcat) if not (revs or obswdir): @@ -1703,37 +1706,36 @@ def progresscb(): if showprogress: - compat.progress(ui, _('evolve'), seen, unit=_('changesets'), + compat.progress(ui, _(b'evolve'), seen, unit=_(b'changesets'), total=count) # Order the revisions revs = _orderrevs(repo, revs) # cbor does not know how to serialize sets, using list for skippedrevs - stateopts = {'category': targetcat, 'replacements': {}, - 'revs': list(revs), 'confirm': confirmopt, - 'startnode': startnode, 'skippedrevs': [], - 'command': 'evolve', 'orphanmerge': False, - 'bookmarkchanges': [], 'temprevs': [], 'obsmarkers': [], - 'update': shouldupdate} + stateopts = {b'category': targetcat, b'replacements': {}, + b'revs': list(revs), b'confirm': confirmopt, + b'startnode': startnode, b'skippedrevs': [], + b'command': b'evolve', b'orphanmerge': False, + b'bookmarkchanges': [], b'temprevs': [], b'obsmarkers': [], + b'update': shouldupdate} evolvestate.addopts(stateopts) # lastsolved: keep track of successor of last troubled cset we evolved # to confirm that if atop msg should be suppressed to remove redundancy lastsolved = None - activetopic = getattr(repo, 'currenttopic', '') - with repo.wlock(), repo.lock(): - tr = repo.transaction("evolve") - with util.acceptintervention(tr): - for rev in revs: - lastsolved = _solveonerev(ui, repo, rev, evolvestate, - activetopic, dryrunopt, - confirmopt, progresscb, - targetcat, lastsolved) - seen += 1 + activetopic = getattr(repo, 'currenttopic', b'') + tr = repo.transaction(b"evolve") + with util.acceptintervention(tr): + for rev in revs: + lastsolved = _solveonerev(ui, repo, rev, evolvestate, + activetopic, dryrunopt, + confirmopt, progresscb, + targetcat, lastsolved) + seen += 1 if showprogress: - compat.progress(ui, _('evolve'), None) + compat.progress(ui, _(b'evolve'), None) _cleanup(ui, repo, startnode, shouldupdate) @@ -1745,7 +1747,7 @@ stabilizes for both parents of orphan merges. """ curctx = repo[rev] - revtopic = getattr(curctx, 'topic', lambda: '')() + revtopic = getattr(curctx, 'topic', lambda: b'')() topicidx = getattr(curctx, 'topicidx', lambda: None)() stacktmplt = False # check if revision being evolved is in active topic to make sure @@ -1757,44 +1759,44 @@ confirmopt, progresscb, targetcat, lastsolved=lastsolved, stacktmplt=stacktmplt) if ret[0]: - evolvestate['replacements'][curctx.node()] = ret[1] + evolvestate[b'replacements'][curctx.node()] = ret[1] lastsolved = ret[1] else: - evolvestate['skippedrevs'].append(curctx.node()) + evolvestate[b'skippedrevs'].append(curctx.node()) - if evolvestate['orphanmerge']: + if evolvestate[b'orphanmerge']: # we were processing an orphan merge with both parents obsolete, # stabilized for second parent, re-stabilize for the first parent ret = _solveone(ui, repo, repo[ret[1]], evolvestate, dryrunopt, confirmopt, progresscb, targetcat, stacktmplt=stacktmplt) if ret[0]: - evolvestate['replacements'][curctx.node()] = ret[1] + evolvestate[b'replacements'][curctx.node()] = ret[1] lastsolved = ret[1] else: - evolvestate['skippedrevs'].append(curctx.node()) + evolvestate[b'skippedrevs'].append(curctx.node()) - evolvestate['orphanmerge'] = False + evolvestate[b'orphanmerge'] = False return lastsolved def solveobswdp(ui, repo, opts): """this function updates to the successor of obsolete wdir parent""" - oldid = repo['.'].node() - startctx = repo['.'] + oldid = repo[b'.'].node() + startctx = repo[b'.'] dryrunopt = opts.get('dry_run', False) displayer = compat.changesetdisplayer(ui, repo, - {'template': shorttemplate}) + {b'template': shorttemplate}) try: - ctx = repo[utility._singlesuccessor(repo, repo['.'])] + ctx = repo[utility._singlesuccessor(repo, repo[b'.'])] except utility.MultipleSuccessorsError as exc: - repo.ui.write_err(_('parent is obsolete with multiple' - ' successors:\n')) + repo.ui.write_err(_(b'parent is obsolete with multiple' + b' successors:\n')) for ln in exc.successorssets: for n in ln: displayer.show(repo[n]) return 2 - ui.status(_('update:')) + ui.status(_(b'update:')) if not ui.quiet: displayer.show(ctx) @@ -1804,33 +1806,33 @@ newid = ctx.node() if ctx != startctx: - with repo.wlock(), repo.lock(), repo.transaction('evolve') as tr: + with repo.wlock(), repo.lock(), repo.transaction(b'evolve') as tr: bmupdater = rewriteutil.bookmarksupdater(repo, oldid, tr) bmupdater(newid) - ui.status(_('working directory is now at %s\n') % ctx) + ui.status(_(b'working directory is now at %s\n') % ctx) return res def stopevolve(ui, repo, evolvestate): """logic for handling of `hg evolve --stop`""" updated = False pctx = None - if (evolvestate['command'] == 'evolve' - and evolvestate['category'] == 'contentdivergent' - and evolvestate['relocated']): - oldother = evolvestate['old-other'] + if (evolvestate[b'command'] == b'evolve' + and evolvestate[b'category'] == b'contentdivergent' + and evolvestate[b'relocated']): + oldother = evolvestate[b'old-other'] if oldother: with repo.wlock(), repo.lock(): repo = repo.unfiltered() hg.updaterepo(repo, oldother, True) - strips = [evolvestate['relocated']] + strips = [evolvestate[b'relocated']] repair.strip(ui, repo, strips, False) updated = True pctx = repo[oldother] if not updated: - pctx = repo['.'] + pctx = repo[b'.'] hg.updaterepo(repo, pctx.node(), True) - ui.status(_('stopped the interrupted evolve\n')) - ui.status(_('working directory is now at %s\n') % pctx) + ui.status(_(b'stopped the interrupted evolve\n')) + ui.status(_(b'working directory is now at %s\n') % pctx) def abortevolve(ui, repo, evolvestate): """ logic for handling of `hg evolve --abort`""" @@ -1840,11 +1842,11 @@ evolvedctx = [] # boolean value to say whether we should strip or not cleanup = True - startnode = evolvestate['startnode'] - for old, new in evolvestate['replacements'].items(): + startnode = evolvestate[b'startnode'] + for old, new in evolvestate[b'replacements'].items(): if new: evolvedctx.append(repo[new]) - for temp in evolvestate['temprevs']: + for temp in evolvestate[b'temprevs']: if temp: evolvedctx.append(repo[temp]) evolvedrevs = [c.rev() for c in evolvedctx] @@ -1852,145 +1854,159 @@ # checking if phase changed of any of the evolved rev immutable = [c for c in evolvedctx if not c.mutable()] if immutable: - repo.ui.warn(_("cannot clean up public changesets: %s\n") - % ', '.join(bytes(c) for c in immutable), - hint=_("see 'hg help phases' for details")) - cleanup = False - - # checking no new changesets are created on evolved revs - descendants = set() - if evolvedrevs: - descendants = set(repo.changelog.descendants(evolvedrevs)) - if descendants - set(evolvedrevs): - repo.ui.warn(_("warning: new changesets detected on destination " - "branch\n")) + repo.ui.warn(_(b"cannot clean up public changesets: %s\n") + % b', '.join(bytes(c) for c in immutable), + hint=_(b"see 'hg help phases' for details")) cleanup = False - # finding the indices of the obsmarkers to be stripped and stripping - # them - if evolvestate['obsmarkers']: - stripmarkers = set() - for m in evolvestate['obsmarkers']: - m = (m[0], m[1]) - stripmarkers.add(m) - indices = [] - allmarkers = obsutil.getmarkers(repo) - for i, m in enumerate(allmarkers): - marker = (m.prednode(), m.succnodes()[0]) - if marker in stripmarkers: - indices.append(i) + # checking no new changesets are created on evolved revs + descendants = set() + if evolvedrevs: + descendants = set(repo.changelog.descendants(evolvedrevs)) + if descendants - set(evolvedrevs): + repo.ui.warn(_(b"warning: new changesets detected on destination " + b"branch\n")) + cleanup = False - repair.deleteobsmarkers(repo.obsstore, indices) - repo.ui.debug('deleted %d obsmarkers\n' % len(indices)) + # finding the indices of the obsmarkers to be stripped and stripping + # them + if evolvestate[b'obsmarkers']: + stripmarkers = set() + for m in evolvestate[b'obsmarkers']: + m = (m[0], m[1]) + stripmarkers.add(m) + indices = [] + allmarkers = obsutil.getmarkers(repo) + for i, m in enumerate(allmarkers): + marker = (m.prednode(), m.succnodes()[0]) + if marker in stripmarkers: + indices.append(i) - if cleanup: - if evolvedrevs: - strippoints = [c.node() - for c in repo.set('roots(%ld)', evolvedrevs)] + repair.deleteobsmarkers(repo.obsstore, indices) + repo.ui.debug(b'deleted %d obsmarkers\n' % len(indices)) + + if cleanup: + if evolvedrevs: + strippoints = [c.node() + for c in repo.set(b'roots(%ld)', evolvedrevs)] + + # updating the working directory + hg.updaterepo(repo, startnode, True) - # updating the working directory - hg.updaterepo(repo, startnode, True) + # Strip from the first evolved revision + if evolvedrevs: + # no backup of evolved cset versions needed + repair.strip(repo.ui, repo, strippoints, False) - # Strip from the first evolved revision - if evolvedrevs: - # no backup of evolved cset versions needed - repair.strip(repo.ui, repo, strippoints, False) + with repo.transaction(b'evolve') as tr: + # restoring bookmarks at there original place + bmchanges = evolvestate[b'bookmarkchanges'] + if bmchanges: + repo._bookmarks.applychanges(repo, tr, bmchanges) - with repo.transaction('evolve') as tr: - # restoring bookmarks at there original place - bmchanges = evolvestate['bookmarkchanges'] - if bmchanges: - repo._bookmarks.applychanges(repo, tr, bmchanges) + evolvestate.delete() + ui.status(_(b'evolve aborted\n')) + ui.status(_(b'working directory is now at %s\n') + % nodemod.hex(startnode)[:12]) + else: + raise error.Abort(_(b"unable to abort interrupted evolve, use 'hg " + b"evolve --stop' to stop evolve")) +def hgabortevolve(ui, repo): + """logic for aborting evolve using 'hg abort'""" + with repo.wlock(), repo.lock(): + evolvestate = state.cmdstate(repo) + evolvestate.load() + if evolvestate[b'command'] != b'evolve': + pctx = repo[b'.'] + hg.updaterepo(repo, pctx.node(), True) + ui.status(_(b'evolve aborted\n')) + ui.status(_(b'working directory is now at %s\n') + % pctx.hex()[:12]) evolvestate.delete() - ui.status(_('evolve aborted\n')) - ui.status(_('working directory is now at %s\n') - % nodemod.hex(startnode)[:12]) - else: - raise error.Abort(_("unable to abort interrupted evolve, use 'hg " - "evolve --stop' to stop evolve")) + return 0 + return abortevolve(ui, repo, evolvestate) def continueevolve(ui, repo, evolvestate): """logic for handling of `hg evolve --continue`""" - with repo.wlock(), repo.lock(): - ms = merge.mergestate.read(repo) - mergeutil.checkunresolved(ms) - if (evolvestate['command'] == 'next' - or evolvestate['category'] == 'orphan'): - _completeorphan(ui, repo, evolvestate) - elif evolvestate['category'] == 'phasedivergent': - _completephasedivergent(ui, repo, evolvestate) - elif evolvestate['category'] == 'contentdivergent': - _continuecontentdivergent(ui, repo, evolvestate, None) - else: - repo.ui.status(_("continuing interrupted '%s' resolution is not yet" - " supported\n") % evolvestate['category']) - return + ms = merge.mergestate.read(repo) + mergeutil.checkunresolved(ms) + if (evolvestate[b'command'] == b'next' + or evolvestate[b'category'] == b'orphan'): + _completeorphan(ui, repo, evolvestate) + elif evolvestate[b'category'] == b'phasedivergent': + _completephasedivergent(ui, repo, evolvestate) + elif evolvestate[b'category'] == b'contentdivergent': + _continuecontentdivergent(ui, repo, evolvestate, None) + else: + repo.ui.status(_(b"continuing interrupted '%s' resolution is not yet" + b" supported\n") % evolvestate[b'category']) + return - # make sure we are continuing evolve and not `hg next --evolve` - if evolvestate['command'] != 'evolve': - return + # make sure we are continuing evolve and not `hg next --evolve` + if evolvestate[b'command'] != b'evolve': + return - # Progress handling - seen = 1 - count = len(evolvestate['revs']) + # Progress handling + seen = 1 + count = len(evolvestate[b'revs']) - def progresscb(): - compat.progress(ui, _('evolve'), seen, unit=_('changesets'), - total=count) + def progresscb(): + compat.progress(ui, _(b'evolve'), seen, unit=_(b'changesets'), + total=count) - category = evolvestate['category'] - confirm = evolvestate['confirm'] - unfi = repo.unfiltered() - # lastsolved: keep track of successor of last troubled cset we - # evolved to confirm that if atop msg should be suppressed to remove - # redundancy - lastsolved = None - activetopic = getattr(repo, 'currenttopic', '') - tr = repo.transaction("evolve") - with util.acceptintervention(tr): - for rev in evolvestate['revs']: - # XXX: prevent this lookup by storing nodes instead of revnums - curctx = unfi[rev] + category = evolvestate[b'category'] + confirm = evolvestate[b'confirm'] + unfi = repo.unfiltered() + # lastsolved: keep track of successor of last troubled cset we + # evolved to confirm that if atop msg should be suppressed to remove + # redundancy + lastsolved = None + activetopic = getattr(repo, 'currenttopic', b'') + tr = repo.transaction(b"evolve") + with util.acceptintervention(tr): + for rev in evolvestate[b'revs']: + # XXX: prevent this lookup by storing nodes instead of revnums + curctx = unfi[rev] - # check if we can use stack template - revtopic = getattr(curctx, 'topic', lambda: '')() - topicidx = getattr(curctx, 'topicidx', lambda: None)() - stacktmplt = False - if (activetopic and (activetopic == revtopic) - and topicidx is not None): - stacktmplt = True + # check if we can use stack template + revtopic = getattr(curctx, 'topic', lambda: b'')() + topicidx = getattr(curctx, 'topicidx', lambda: None)() + stacktmplt = False + if (activetopic and (activetopic == revtopic) + and topicidx is not None): + stacktmplt = True - if (curctx.node() not in evolvestate['replacements'] - and curctx.node() not in evolvestate['skippedrevs']): - newnode = _solveone(ui, repo, curctx, evolvestate, False, - confirm, progresscb, category, - lastsolved=lastsolved, - stacktmplt=stacktmplt) - if newnode[0]: - evolvestate['replacements'][curctx.node()] = newnode[1] - lastsolved = newnode[1] - else: - evolvestate['skippedrevs'].append(curctx.node()) - seen += 1 + if (curctx.node() not in evolvestate[b'replacements'] + and curctx.node() not in evolvestate[b'skippedrevs']): + newnode = _solveone(ui, repo, curctx, evolvestate, False, + confirm, progresscb, category, + lastsolved=lastsolved, + stacktmplt=stacktmplt) + if newnode[0]: + evolvestate[b'replacements'][curctx.node()] = newnode[1] + lastsolved = newnode[1] + else: + evolvestate[b'skippedrevs'].append(curctx.node()) + seen += 1 def _continuecontentdivergent(ui, repo, evolvestate, progresscb): """function to continue the interrupted content-divergence resolution.""" - tr = repo.transaction('evolve') + tr = repo.transaction(b'evolve') with util.acceptintervention(tr): - divergent = evolvestate['divergent'] - base = evolvestate['base'] + divergent = evolvestate[b'divergent'] + base = evolvestate[b'base'] repo = repo.unfiltered() - if evolvestate['relocating']: + if evolvestate[b'relocating']: newother = _completerelocation(ui, repo, evolvestate) - current = repo[evolvestate['current']] + current = repo[evolvestate[b'current']] obsolete.createmarkers(repo, [(current, (repo[newother],))], - operation='evolve') - evolvestate['relocating'] = False - evolvestate['relocated'] = newother - evolvestate['temprevs'].append(newother) - evolvestate['other-divergent'] = newother + operation=b'evolve') + evolvestate[b'relocating'] = False + evolvestate[b'relocated'] = newother + evolvestate[b'temprevs'].append(newother) + evolvestate[b'other-divergent'] = newother # continue the resolution by merging the content-divergence _mergecontentdivergents(repo, progresscb, repo[divergent], @@ -1998,16 +2014,16 @@ repo[base], evolvestate) - other = evolvestate['other-divergent'] + other = evolvestate[b'other-divergent'] ret = _completecontentdivergent(ui, repo, progresscb, repo[divergent], repo[other], repo[base], evolvestate) - origdivergent = evolvestate['orig-divergent'] - evolvestate['replacements'][origdivergent] = ret[1] + origdivergent = evolvestate[b'orig-divergent'] + evolvestate[b'replacements'][origdivergent] = ret[1] # logic to continue the public content-divergent - publicnode = evolvestate.get('public-divergent') + publicnode = evolvestate.get(b'public-divergent') if publicnode: res, newnode = ret if not res: @@ -2030,19 +2046,19 @@ phase-divergence""" # need to start transaction for bookmark changes - with repo.transaction('evolve'): + with repo.transaction(b'evolve'): node = _completerelocation(ui, repo, evolvestate) - evolvestate['temprevs'].append(node) + evolvestate[b'temprevs'].append(node) # resolving conflicts can lead to empty wdir and node can be None in # those cases - ctx = repo[evolvestate['current']] - newctx = repo[node] if node is not None else repo['.'] - obsolete.createmarkers(repo, [(ctx, (newctx,))], operation='evolve') + ctx = repo[evolvestate[b'current']] + newctx = repo[node] if node is not None else repo[b'.'] + obsolete.createmarkers(repo, [(ctx, (newctx,))], operation=b'evolve') # now continuing the phase-divergence resolution part - prec = repo[evolvestate['precursor']] + prec = repo[evolvestate[b'precursor']] retvalue = _resolvephasedivergent(ui, repo, prec, newctx) - evolvestate['replacements'][ctx.node()] = retvalue[1] + evolvestate[b'replacements'][ctx.node()] = retvalue[1] def _completeorphan(ui, repo, evolvestate): """function to complete the interrupted orphan resolution""" @@ -2050,47 +2066,47 @@ node = _completerelocation(ui, repo, evolvestate) # resolving conflicts can lead to empty wdir and node can be None in # those cases - ctx = repo[evolvestate['current']] + ctx = repo[evolvestate[b'current']] if node is None: - repo.ui.status(_("evolution of %d:%s created no changes" - " to commit\n") % (ctx.rev(), ctx)) + repo.ui.status(_(b"evolution of %d:%s created no changes" + b" to commit\n") % (ctx.rev(), ctx)) replacement = () else: replacement = (repo[node],) - obsolete.createmarkers(repo, [(ctx, replacement)], operation='evolve') + obsolete.createmarkers(repo, [(ctx, replacement)], operation=b'evolve') # make sure we are continuing evolve and not `hg next --evolve` - if evolvestate['command'] == 'evolve': - evolvestate['replacements'][ctx.node()] = node - if evolvestate['orphanmerge']: + if evolvestate[b'command'] == b'evolve': + evolvestate[b'replacements'][ctx.node()] = node + if evolvestate[b'orphanmerge']: # processing a merge changeset with both parents obsoleted, # stabilized on second parent, insert in front of list to # re-process to stabilize on first parent - evolvestate['revs'].insert(0, repo[node].rev()) - evolvestate['orphanmerge'] = False + evolvestate[b'revs'].insert(0, repo[node].rev()) + evolvestate[b'orphanmerge'] = False def _completerelocation(ui, repo, evolvestate): """function to complete the interrupted relocation of a commit return the new node formed """ - orig = repo[evolvestate['current']] + orig = repo[evolvestate[b'current']] ctx = orig - source = ctx.extra().get('source') + source = ctx.extra().get(b'source') extra = {} if source: - extra['source'] = source - extra['intermediate-source'] = ctx.hex() + extra[b'source'] = source + extra[b'intermediate-source'] = ctx.hex() else: - extra['source'] = ctx.hex() + extra[b'source'] = ctx.hex() user = ctx.user() date = ctx.date() message = ctx.description() - ui.status(_('evolving %d:%s "%s"\n') % (ctx.rev(), ctx, - message.split('\n', 1)[0])) + ui.status(_(b'evolving %d:%s "%s"\n') % (ctx.rev(), ctx, + message.split(b'\n', 1)[0])) targetphase = max(ctx.phase(), phases.draft) - overrides = {('phases', 'new-commit'): targetphase} + overrides = {(b'phases', b'new-commit'): targetphase} ctxparents = orig.parents() if len(ctxparents) == 2: @@ -2117,7 +2133,7 @@ else: # both the parents were obsoleted, if orphanmerge is set, we # are processing the second parent first (to keep parent order) - if evolvestate.get('orphanmerge'): + if evolvestate.get(b'orphanmerge'): with repo.dirstate.parentchange(): repo.dirstate.setparents(ctxparents[0].node(), currentp1) @@ -2126,7 +2142,7 @@ with repo.dirstate.parentchange(): repo.dirstate.setparents(repo.dirstate.parents()[0], nodemod.nullid) - with repo.ui.configoverride(overrides, 'evolve-continue'): + with repo.ui.configoverride(overrides, b'evolve-continue'): node = repo.commit(text=message, user=user, date=date, extra=extra) return node
--- a/hgext3rd/evolve/exthelper.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/exthelper.py Tue Sep 24 12:42:27 2019 +0200 @@ -83,12 +83,12 @@ self._duckpunchers = [] self.cmdtable = {} self.command = registrar.command(self.cmdtable) - if '^init' in commands.table: + if b'^init' in commands.table: olddoregister = self.command._doregister def _newdoregister(self, name, *args, **kwargs): if kwargs.pop('helpbasic', False): - name = '^' + name + name = r'^' + name return olddoregister(self, name, *args, **kwargs) self.command._doregister = _newdoregister @@ -277,9 +277,9 @@ else: for opt in opts: if not isinstance(opt, tuple): - raise error.ProgrammingError('opts must be list of tuples') + raise error.ProgrammingError(b'opts must be list of tuples') if len(opt) not in (4, 5): - msg = 'each opt tuple must contain 4 or 5 values' + msg = b'each opt tuple must contain 4 or 5 values' raise error.ProgrammingError(msg) def dec(wrapper):
--- a/hgext3rd/evolve/firstmergecache.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/firstmergecache.py Tue Sep 24 12:42:27 2019 +0200 @@ -41,7 +41,7 @@ @localrepo.unfilteredmethod def destroyed(self): - if 'firstmergecach' in vars(self): + if r'firstmergecach' in vars(self): self.firstmergecache.clear() super(firstmergecacherepo, self).destroyed() @@ -56,16 +56,16 @@ class firstmergecache(genericcaches.changelogsourcebase): - _filepath = 'evoext-firstmerge-00' - _cachename = 'evo-ext-firstmerge' + _filepath = b'evoext-firstmerge-00' + _cachename = b'evo-ext-firstmerge' def __init__(self): super(firstmergecache, self).__init__() - self._data = array.array('l') + self._data = array.array(r'l') def get(self, rev): if len(self._data) <= rev: - raise error.ProgrammingError('firstmergecache must be warmed before use') + raise error.ProgrammingError(b'firstmergecache must be warmed before use') return self._data[rev] def _updatefrom(self, repo, data): @@ -75,9 +75,9 @@ total = len(data) def progress(pos, rev=None): - revstr = '' if rev is None else ('rev %d' % rev) - compat.progress(repo.ui, 'updating firstmerge cache', - pos, revstr, unit='revision', total=total) + revstr = b'' if rev is None else (b'rev %d' % rev) + compat.progress(repo.ui, b'updating firstmerge cache', + pos, revstr, unit=b'revision', total=total) progress(0) for idx, rev in enumerate(data, 1): assert rev == len(self._data), (rev, len(self._data)) @@ -108,7 +108,7 @@ Subclasses MUST overide this method to actually affect the cache data. """ super(firstmergecache, self).clear() - self._data = array.array('l') + self._data = array.array(r'l') # crude version of a cache, to show the kind of information we have to store @@ -117,7 +117,7 @@ assert repo.filtername is None data = repo.cachevfs.tryread(self._filepath) - self._data = array.array('l') + self._data = array.array(r'l') if not data: self._cachekey = self.emptykey else: @@ -136,12 +136,12 @@ return try: - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True) + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True) headerdata = self._serializecachekey() cachefile.write(headerdata) cachefile.write(compat.arraytobytes(self._data)) cachefile.close() self._ondiskkey = self._cachekey except (IOError, OSError) as exc: - repo.ui.log('firstmergecache', 'could not write update %s\n' % exc) - repo.ui.debug('firstmergecache: could not write update %s\n' % exc) + repo.ui.log(b'firstmergecache', b'could not write update %s\n' % exc) + repo.ui.debug(b'firstmergecache: could not write update %s\n' % exc)
--- a/hgext3rd/evolve/genericcaches.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/genericcaches.py Tue Sep 24 12:42:27 2019 +0200 @@ -31,7 +31,7 @@ # default key used for an empty cache emptykey = () - _cachekeyspec = '' # used for serialization + _cachekeyspec = b'' # used for serialization _cachename = None # used for debug message @abc.abstractmethod @@ -42,7 +42,7 @@ @util.propertycache def _cachekeystruct(self): # dynamic property to help subclass to change it - return struct.Struct('>' + self._cachekeyspec) + return struct.Struct(b'>' + self._cachekeyspec) @util.propertycache def _cachekeysize(self): @@ -112,7 +112,7 @@ if newkey == self._cachekey: return if reset or self._cachekey is None: - repo.ui.log('cache', 'strip detected, %s cache reset\n' + repo.ui.log(b'cache', b'strip detected, %s cache reset\n' % self._cachename) self.clear(reset=True) @@ -120,7 +120,7 @@ self._updatefrom(repo, data) duration = util.timer() - starttime summary = self._updatesummary(data) - repo.ui.log('cache', 'updated %s in %.4f seconds (%s)\n', + repo.ui.log(b'cache', b'updated %s in %.4f seconds (%s)\n', self._cachename, duration, summary) self._cachekey = newkey @@ -144,7 +144,7 @@ # default key used for an empty cache emptykey = (0, node.nullid) - _cachekeyspec = 'i20s' + _cachekeyspec = b'i20s' _cachename = None # used for debug message # Useful "public" function (no need to override them) @@ -172,4 +172,4 @@ return self._fetchchangelogdata(self._cachekey, repo.changelog) def _updatesummary(self, data): - return '%ir' % len(data) + return b'%ir' % len(data)
--- a/hgext3rd/evolve/hack/drophack.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/hack/drophack.py Tue Sep 24 12:42:27 2019 +0200 @@ -34,7 +34,7 @@ user = ostop[0] - ostart[0] sys = ostop[1] - ostart[1] comb = user + sys - ui.write("%s: wall %f comb %f user %f sys %f\n" + ui.write(b"%s: wall %f comb %f user %f sys %f\n" % (caption, wall, comb, user, sys)) def obsmarkerchainfrom(obsstore, nodes): @@ -66,13 +66,13 @@ repo = repo.unfiltered() repo.destroying() oldmarkers = list(repo.obsstore._all) - util.rename(repo.svfs.join('obsstore'), - repo.vfs.join('obsstore.prestrip')) + util.rename(repo.svfs.join(b'obsstore'), + repo.vfs.join(b'obsstore.prestrip')) del repo.obsstore # drop the cache newstore = repo.obsstore assert not newstore # should be empty after rename newmarkers = [m for m in oldmarkers if m not in markers] - tr = repo.transaction('drophack') + tr = repo.transaction(b'drophack') try: newstore.add(tr, newmarkers) tr.close() @@ -81,7 +81,7 @@ repo.destroyed() -@command('drop', [('r', 'rev', [], 'revision to update')], _('[-r] revs')) +@command(b'drop', [(b'r', b'rev', [], b'revision to update')], _(b'[-r] revs')) def cmddrop(ui, repo, *revs, **opts): """I'm hacky do not use me! @@ -97,11 +97,11 @@ revs = list(revs) revs.extend(opts['rev']) if not revs: - revs = ['.'] + revs = [b'.'] # get the changeset revs = scmutil.revrange(repo, revs) if not revs: - ui.write_err('no revision to drop\n') + ui.write_err(b'no revision to drop\n') return 1 # lock from the beginning to prevent race wlock = lock = None @@ -109,49 +109,49 @@ wlock = repo.wlock() lock = repo.lock() # check they have no children - if repo.revs('%ld and public()', revs): - ui.write_err('cannot drop public revision') + if repo.revs(b'%ld and public()', revs): + ui.write_err(b'cannot drop public revision') return 1 - if repo.revs('children(%ld) - %ld', revs, revs): - ui.write_err('cannot drop revision with children') + if repo.revs(b'children(%ld) - %ld', revs, revs): + ui.write_err(b'cannot drop revision with children') return 1 - if repo.revs('. and %ld', revs): - newrevs = repo.revs('max(::. - %ld)', revs) + if repo.revs(b'. and %ld', revs): + newrevs = repo.revs(b'max(::. - %ld)', revs) if newrevs: assert len(newrevs) == 1 newrev = newrevs.first() else: newrev = -1 commands.update(ui, repo, newrev) - ui.status(_('working directory now at %s\n') % repo[newrev]) + ui.status(_(b'working directory now at %s\n') % repo[newrev]) # get all markers and successors up to root nodes = [repo[r].node() for r in revs] - with timed(ui, 'search obsmarker'): + with timed(ui, b'search obsmarker'): markers = set(obsmarkerchainfrom(repo.obsstore, nodes)) - ui.write('%i obsmarkers found\n' % len(markers)) + ui.write(b'%i obsmarkers found\n' % len(markers)) cl = repo.unfiltered().changelog - with timed(ui, 'search nodes'): + with timed(ui, b'search nodes'): allnodes = set(nodes) allnodes.update(m[0] for m in markers if cl.hasnode(m[0])) - ui.write('%i nodes found\n' % len(allnodes)) + ui.write(b'%i nodes found\n' % len(allnodes)) cl = repo.changelog visiblenodes = set(n for n in allnodes if cl.hasnode(n)) # check constraint again - if repo.revs('%ln and public()', visiblenodes): - ui.write_err('cannot drop public revision') + if repo.revs(b'%ln and public()', visiblenodes): + ui.write_err(b'cannot drop public revision') return 1 - if repo.revs('children(%ln) - %ln', visiblenodes, visiblenodes): - ui.write_err('cannot drop revision with children') + if repo.revs(b'children(%ln) - %ln', visiblenodes, visiblenodes): + ui.write_err(b'cannot drop revision with children') return 1 if markers: # strip them - with timed(ui, 'strip obsmarker'): + with timed(ui, b'strip obsmarker'): stripmarker(ui, repo, markers) # strip the changeset - with timed(ui, 'strip nodes'): - repair.strip(ui, repo, list(allnodes), backup="all", - topic='drophack') + with timed(ui, b'strip nodes'): + repair.strip(ui, repo, list(allnodes), backup=b"all", + topic=b'drophack') finally: lockmod.release(lock, wlock)
--- a/hgext3rd/evolve/legacy.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/legacy.py Tue Sep 24 12:42:27 2019 +0200 @@ -45,20 +45,20 @@ """ if not repo.local(): return - evolveopts = ui.configlist('experimental', 'evolution') + evolveopts = ui.configlist(b'experimental', b'evolution') if not evolveopts: - evolveopts = 'all' - ui.setconfig('experimental', 'evolution', evolveopts) + evolveopts = b'all' + ui.setconfig(b'experimental', b'evolution', evolveopts) for arg in sys.argv: - if 'debugc' in arg: + if r'debugc' in arg: break else: - data = repo.vfs.tryread('obsolete-relations') + data = repo.vfs.tryread(b'obsolete-relations') if not data: - data = repo.svfs.tryread('obsoletemarkers') + data = repo.svfs.tryread(b'obsoletemarkers') if data: - raise error.Abort('old format of obsolete marker detected!\n' - 'run `hg debugconvertobsolete` once.') + raise error.Abort(b'old format of obsolete marker detected!\n' + b'run `hg debugconvertobsolete` once.') def _obsdeserialize(flike): """read a file like object serialized with _obsserialize @@ -77,7 +77,7 @@ cmdtable = {} command = commandfunc(cmdtable) -@command('debugconvertobsolete', [], '') +@command(b'debugconvertobsolete', [], b'') def cmddebugconvertobsolete(ui, repo): """import markers from an .hg/obsolete-relations file""" cnt = 0 @@ -86,13 +86,13 @@ some = False try: unlink = [] - tr = repo.transaction('convert-obsolete') + tr = repo.transaction(b'convert-obsolete') try: repo._importoldobsolete = True store = repo.obsstore ### very first format try: - f = repo.vfs('obsolete-relations') + f = repo.vfs(b'obsolete-relations') try: some = True for line in f: @@ -101,30 +101,30 @@ prec = bin(objhex) sucs = (suc == nullid) and [] or [suc] meta = { - 'date': '%i %i' % makedate(), - 'user': ui.username(), + b'date': b'%i %i' % makedate(), + b'user': ui.username(), } try: store.create(tr, prec, sucs, 0, metadata=meta) cnt += 1 except ValueError: - repo.ui.write_err("invalid old marker line: %s" + repo.ui.write_err(b"invalid old marker line: %s" % (line)) err += 1 finally: f.close() - unlink.append(repo.vfs.join('obsolete-relations')) + unlink.append(repo.vfs.join(b'obsolete-relations')) except IOError: pass ### second (json) format - data = repo.svfs.tryread('obsoletemarkers') + data = repo.svfs.tryread(b'obsoletemarkers') if data: some = True for oldmark in json.loads(data): - del oldmark['id'] # dropped for now - del oldmark['reason'] # unused until then - oldobject = str(oldmark.pop('object')) - oldsubjects = [str(s) for s in oldmark.pop('subjects', [])] + del oldmark[r'id'] # dropped for now + del oldmark[r'reason'] # unused until then + oldobject = str(oldmark.pop(r'object')) + oldsubjects = [str(s) for s in oldmark.pop(r'subjects', [])] lookup_errors = (error.RepoLookupError, error.LookupError) if len(oldobject) != 40: try: @@ -137,7 +137,7 @@ except lookup_errors: pass - oldmark['date'] = '%i %i' % tuple(oldmark['date']) + oldmark[r'date'] = r'%i %i' % tuple(oldmark[r'date']) meta = dict((k.encode('utf-8'), v.encode('utf-8')) for k, v in oldmark.items()) try: @@ -147,11 +147,11 @@ 0, metadata=meta) cnt += 1 except ValueError: - msg = "invalid marker %s -> %s\n" + msg = b"invalid marker %s -> %s\n" msg %= (oldobject, oldsubjects) repo.ui.write_err(msg) err += 1 - unlink.append(repo.svfs.join('obsoletemarkers')) + unlink.append(repo.svfs.join(b'obsoletemarkers')) tr.close() for path in unlink: util.unlink(path) @@ -161,12 +161,12 @@ del repo._importoldobsolete lock.release() if not some: - ui.warn(_('nothing to do\n')) - ui.status('%i obsolete marker converted\n' % cnt) + ui.warn(_(b'nothing to do\n')) + ui.status(b'%i obsolete marker converted\n' % cnt) if err: - ui.write_err('%i conversion failed. check you graph!\n' % err) + ui.write_err(b'%i conversion failed. check you graph!\n' % err) -@command('debugrecordpruneparents', [], '') +@command(b'debugrecordpruneparents', [], b'') def cmddebugrecordpruneparents(ui, repo): """add parent data to prune markers when possible @@ -174,14 +174,14 @@ If the pruned node is locally known, it creates a new marker with parent data. """ - pgop = 'reading markers' + pgop = b'reading markers' # lock from the beginning to prevent race wlock = lock = tr = None try: wlock = repo.wlock() lock = repo.lock() - tr = repo.transaction('recordpruneparents') + tr = repo.transaction(b'recordpruneparents') unfi = repo.unfiltered() nm = unfi.changelog.nodemap store = repo.obsstore @@ -196,7 +196,7 @@ store.create(tr, prec=mark[0], succs=mark[1], flag=mark[2], metadata=dict(mark[3]), parents=parents) if len(store._all) - before: - ui.write(_('created new markers for %i\n') % rev) + ui.write(_(b'created new markers for %i\n') % rev) ui.progress(pgop, idx, total=pgtotal) tr.close() ui.progress(pgop, None)
--- a/hgext3rd/evolve/metadata.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/metadata.py Tue Sep 24 12:42:27 2019 +0200 @@ -5,7 +5,7 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. -__version__ = b'9.1.1.dev' +__version__ = b'9.2.0.dev' testedwith = b'4.5.2 4.6.2 4.7 4.8 4.9 5.0 5.1' minimumhgversion = b'4.5' buglink = b'https://bz.mercurial-scm.org/'
--- a/hgext3rd/evolve/obscache.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/obscache.py Tue Sep 24 12:42:27 2019 +0200 @@ -50,10 +50,10 @@ length, cachekey will be set to None.""" # default value obsstoresize = 0 - keydata = '' + keydata = b'' # try to get actual data from the obsstore try: - with self.svfs('obsstore') as obsfile: + with self.svfs(b'obsstore') as obsfile: obsfile.seek(0, 2) obsstoresize = obsfile.tell() if index is None: @@ -82,11 +82,11 @@ def markersfrom(obsstore, byteoffset, firstmarker): if not firstmarker: return list(obsstore) - elif '_all' in vars(obsstore): + elif r'_all' in vars(obsstore): # if the data are in memory, just use that return obsstore._all[firstmarker:] else: - obsdata = obsstore.svfs.tryread('obsstore') + obsdata = obsstore.svfs.tryread(b'obsstore') return obsolete._readmarkers(obsdata, byteoffset)[1] @@ -178,7 +178,7 @@ reset, revs, obsmarkers, obskeypair = upgrade if reset or self._cachekey is None: - repo.ui.log('evoext-cache', 'strip detected, %s cache reset\n' % self._cachename) + repo.ui.log(b'evoext-cache', b'strip detected, %s cache reset\n' % self._cachename) self.clear(reset=True) starttime = util.timer() @@ -186,7 +186,7 @@ obsmarkers = list(obsmarkers) self._updatefrom(repo, revs, obsmarkers) duration = util.timer() - starttime - repo.ui.log('evoext-cache', 'updated %s in %.4f seconds (%dr, %do)\n', + repo.ui.log(b'evoext-cache', b'updated %s in %.4f seconds (%dr, %do)\n', self._cachename, duration, len(revs), len(obsmarkers)) # update the key from the new data @@ -314,10 +314,10 @@ zero. That would be especially useful for the '.pending' overlay. """ - _filepath = 'evoext-obscache-00' - _headerformat = '>q20sQQ20s' + _filepath = b'evoext-obscache-00' + _headerformat = b'>q20sQQ20s' - _cachename = 'evo-ext-obscache' # used for error message + _cachename = b'evo-ext-obscache' # used for error message def __init__(self, repo): super(obscache, self).__init__() @@ -339,7 +339,7 @@ def _setdata(self, data): """set a new bytearray data, invalidating the 'get' shortcut if needed""" self._data = data - if 'get' in vars(self): + if r'get' in vars(self): del self.get def clear(self, reset=False): @@ -403,15 +403,15 @@ return try: - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True) + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True) headerdata = struct.pack(self._headerformat, *self._cachekey) cachefile.write(headerdata) cachefile.write(self._data) cachefile.close() self._ondiskkey = self._cachekey except (IOError, OSError) as exc: - repo.ui.log('obscache', 'could not write update %s\n' % exc) - repo.ui.debug('obscache: could not write update %s\n' % exc) + repo.ui.log(b'obscache', b'could not write update %s\n' % exc) + repo.ui.debug(b'obscache: could not write update %s\n' % exc) def load(self, repo): """load data from disk""" @@ -447,10 +447,10 @@ # will be about as fast... if not obscache.uptodate(repo): if repo.currenttransaction() is None: - repo.ui.log('evoext-cache', - 'obscache is out of date, ' - 'falling back to slower obsstore version\n') - repo.ui.debug('obscache is out of date\n') + repo.ui.log(b'evoext-cache', + b'obscache is out of date, ' + b'falling back to slower obsstore version\n') + repo.ui.debug(b'obscache is out of date\n') return orig(repo) else: # If a transaction is open, it is worthwhile to update and use @@ -465,9 +465,9 @@ @eh.uisetup def cachefuncs(ui): - orig = obsolete.cachefuncs['obsolete'] + orig = obsolete.cachefuncs[b'obsolete'] wrapped = lambda repo: _computeobsoleteset(orig, repo) - obsolete.cachefuncs['obsolete'] = wrapped + obsolete.cachefuncs[b'obsolete'] = wrapped @eh.reposetup def setupcache(ui, repo): @@ -476,7 +476,7 @@ @localrepo.unfilteredmethod def destroyed(self): - if 'obsstore' in vars(self): + if r'obsstore' in vars(self): self.obsstore.obscache.clear() super(obscacherepo, self).destroyed()
--- a/hgext3rd/evolve/obsdiscovery.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/obsdiscovery.py Tue Sep 24 12:42:27 2019 +0200 @@ -60,11 +60,11 @@ obsexcmsg = utility.obsexcmsg # Config -eh.configitem('experimental', 'evolution.obsdiscovery', True) -eh.configitem('experimental', 'obshashrange', True) -eh.configitem('experimental', 'obshashrange.warm-cache', 'auto') -eh.configitem('experimental', 'obshashrange.max-revs', None) -eh.configitem('experimental', 'obshashrange.lru-size', 2000) +eh.configitem(b'experimental', b'evolution.obsdiscovery', True) +eh.configitem(b'experimental', b'obshashrange', True) +eh.configitem(b'experimental', b'obshashrange.warm-cache', b'auto') +eh.configitem(b'experimental', b'obshashrange.max-revs', None) +eh.configitem(b'experimental', b'obshashrange.lru-size', 2000) ################################## ### Code performing discovery ### @@ -76,7 +76,7 @@ missing = set() starttime = util.timer() - heads = local.revs('heads(%ld)', probeset) + heads = local.revs(b'heads(%ld)', probeset) local.stablerange.warmup(local) rangelength = local.stablerange.rangelength @@ -104,8 +104,8 @@ local.obsstore.rangeobshashcache.update(local) querycount = 0 - compat.progress(ui, _("comparing obsmarker with other"), querycount, - unit=_("queries")) + compat.progress(ui, _(b"comparing obsmarker with other"), querycount, + unit=_(b"queries")) overflow = [] while sample or overflow: if overflow: @@ -117,7 +117,7 @@ overflow = sample[samplesize:] sample = sample[:samplesize] elif len(sample) < samplesize: - ui.debug("query %i; add more sample (target %i, current %i)\n" + ui.debug(b"query %i; add more sample (target %i, current %i)\n" % (querycount, samplesize, len(sample))) # we need more sample ! needed = samplesize - len(sample) @@ -143,7 +143,7 @@ nbsample = len(sample) maxsize = max([rangelength(local, r) for r in sample]) - ui.debug("query %i; sample size is %i, largest range %i\n" + ui.debug(b"query %i; sample size is %i, largest range %i\n" % (querycount, nbsample, maxsize)) nbreplies = 0 replies = list(_queryrange(ui, local, remote, sample)) @@ -160,15 +160,15 @@ addentry(new) assert nbsample == nbreplies querycount += 1 - compat.progress(ui, _("comparing obsmarker with other"), querycount, - unit=_("queries")) - compat.progress(ui, _("comparing obsmarker with other"), None) + compat.progress(ui, _(b"comparing obsmarker with other"), querycount, + unit=_(b"queries")) + compat.progress(ui, _(b"comparing obsmarker with other"), None) local.obsstore.rangeobshashcache.save(local) duration = util.timer() - starttime - logmsg = ('obsdiscovery, %d/%d mismatch' - ' - %d obshashrange queries in %.4f seconds\n') + logmsg = (b'obsdiscovery, %d/%d mismatch' + b' - %d obshashrange queries in %.4f seconds\n') logmsg %= (len(missing), len(probeset), querycount, duration) - ui.log('evoext-obsdiscovery', logmsg) + ui.log(b'evoext-obsdiscovery', logmsg) ui.debug(logmsg) return sorted(missing) @@ -208,9 +208,9 @@ ranges = stablerange.subrangesclosure(repo, repo.stablerange, revs) else: ranges = [(r, 0) for r in revs] - headers = ('rev', 'node', 'index', 'size', 'depth', 'obshash') - linetemplate = '%12d %12s %12d %12d %12d %12s\n' - headertemplate = linetemplate.replace('d', 's') + headers = (b'rev', b'node', b'index', b'size', b'depth', b'obshash') + linetemplate = b'%12d %12s %12d %12d %12d %12s\n' + headertemplate = linetemplate.replace(b'd', b's') ui.status(headertemplate % headers) repo.obsstore.rangeobshashcache.update(repo) for r in ranges: @@ -262,12 +262,12 @@ ### sqlite caching _sqliteschema = [ - """CREATE TABLE obshashrange(rev INTEGER NOT NULL, + r"""CREATE TABLE obshashrange(rev INTEGER NOT NULL, idx INTEGER NOT NULL, obshash BLOB NOT NULL, PRIMARY KEY(rev, idx));""", - "CREATE INDEX range_index ON obshashrange(rev, idx);", - """CREATE TABLE meta(schemaversion INTEGER NOT NULL, + r"CREATE INDEX range_index ON obshashrange(rev, idx);", + r"""CREATE TABLE meta(schemaversion INTEGER NOT NULL, tiprev INTEGER NOT NULL, tipnode BLOB NOT NULL, nbobsmarker INTEGER NOT NULL, @@ -275,17 +275,17 @@ obskey BLOB NOT NULL );""", ] -_queryexist = "SELECT name FROM sqlite_master WHERE type='table' AND name='meta';" -_clearmeta = """DELETE FROM meta;""" -_newmeta = """INSERT INTO meta (schemaversion, tiprev, tipnode, nbobsmarker, obssize, obskey) +_queryexist = r"SELECT name FROM sqlite_master WHERE type='table' AND name='meta';" +_clearmeta = r"""DELETE FROM meta;""" +_newmeta = r"""INSERT INTO meta (schemaversion, tiprev, tipnode, nbobsmarker, obssize, obskey) VALUES (?,?,?,?,?,?);""" -_updateobshash = "INSERT INTO obshashrange(rev, idx, obshash) VALUES (?,?,?);" -_querymeta = "SELECT schemaversion, tiprev, tipnode, nbobsmarker, obssize, obskey FROM meta;" -_queryobshash = "SELECT obshash FROM obshashrange WHERE (rev = ? AND idx = ?);" -_query_max_stored = "SELECT MAX(rev) FROM obshashrange" +_updateobshash = r"INSERT INTO obshashrange(rev, idx, obshash) VALUES (?,?,?);" +_querymeta = r"SELECT schemaversion, tiprev, tipnode, nbobsmarker, obssize, obskey FROM meta;" +_queryobshash = r"SELECT obshash FROM obshashrange WHERE (rev = ? AND idx = ?);" +_query_max_stored = r"SELECT MAX(rev) FROM obshashrange" -_reset = "DELETE FROM obshashrange;" -_delete = "DELETE FROM obshashrange WHERE (rev = ? AND idx = ?);" +_reset = r"DELETE FROM obshashrange;" +_delete = r"DELETE FROM obshashrange WHERE (rev = ? AND idx = ?);" def _affectedby(repo, markers): """return all nodes whose relevant set is affected by this changeset @@ -333,8 +333,8 @@ _schemaversion = 3 - _cachename = 'evo-ext-obshashrange' # used for error message - _filename = 'evoext_obshashrange_v2.sqlite' + _cachename = b'evo-ext-obshashrange' # used for error message + _filename = b'evoext_obshashrange_v2.sqlite' def __init__(self, repo): super(_obshashcache, self).__init__() @@ -353,7 +353,7 @@ self._new.clear() if reset: self._valid = False - if '_con' in vars(self): + if r'_con' in vars(self): del self._con def get(self, rangeid): @@ -362,7 +362,7 @@ # XXX there are issue with cache warming, we hack around it for now if not getattr(self, '_updating', False): if self._cachekey[0] < rangeid[0]: - msg = ('using unwarmed obshashrangecache (%s %s)' + msg = (b'using unwarmed obshashrangecache (%s %s)' % (rangeid[0], self._cachekey[0])) raise error.ProgrammingError(msg) @@ -377,7 +377,7 @@ except (sqlite3.DatabaseError, sqlite3.OperationalError): # something is wrong with the sqlite db # Since this is a cache, we ignore it. - if '_con' in vars(self): + if r'_con' in vars(self): del self._con self._new.clear() return value @@ -406,8 +406,8 @@ affected = [] if RESET_ABOVE < len(obsmarkers): # lots of new obsmarkers, probably smarter to reset the cache - repo.ui.log('evoext-cache', 'obshashcache reset - ' - 'many new markers (%d)\n' + repo.ui.log(b'evoext-cache', b'obshashcache reset - ' + b'many new markers (%d)\n' % len(obsmarkers)) reset = True elif obsmarkers: @@ -420,23 +420,23 @@ if r is not None and r <= max_stored] if RESET_ABOVE < len(affected): - repo.ui.log('evoext-cache', 'obshashcache reset - ' - 'new markers affect many changeset (%d)\n' + repo.ui.log(b'evoext-cache', b'obshashcache reset - ' + b'new markers affect many changeset (%d)\n' % len(affected)) reset = True if affected or reset: if not reset: - repo.ui.log('evoext-cache', 'obshashcache clean - ' - 'new markers affect %d changeset and cached ranges\n' + repo.ui.log(b'evoext-cache', b'obshashcache clean - ' + b'new markers affect %d changeset and cached ranges\n' % len(affected)) if con is not None: # always reset for now, the code detecting affect is buggy # so we need to reset more broadly than we would like. try: if repo.stablerange._con is None: - repo.ui.log('evoext-cache', 'obshashcache reset - ' - 'underlying stablerange cache unavailable\n') + repo.ui.log(b'evoext-cache', b'obshashcache reset - ' + b'underlying stablerange cache unavailable\n') reset = True if reset: con.execute(_reset) @@ -447,7 +447,7 @@ for r in ranges: self._data.pop(r, None) except (sqlite3.DatabaseError, sqlite3.OperationalError) as exc: - repo.ui.log('evoext-cache', 'error while updating obshashrange cache: %s' % exc) + repo.ui.log(b'evoext-cache', b'error while updating obshashrange cache: %s' % exc) del self._updating return @@ -457,7 +457,7 @@ # single revision is quite costly) newrevs = [] stop = self._cachekey[0] # tiprev - for h in repo.filtered('immutable').changelog.headrevs(): + for h in repo.filtered(b'immutable').changelog.headrevs(): if h <= stop and h in affected: newrevs.append(h) newrevs.extend(revs) @@ -467,9 +467,9 @@ total = len(revs) def progress(pos, rev=None): - revstr = '' if rev is None else ('rev %d' % rev) - compat.progress(repo.ui, 'updating obshashrange cache', - pos, revstr, unit='revision', total=total) + revstr = b'' if rev is None else (b'rev %d' % rev) + compat.progress(repo.ui, b'updating obshashrange cache', + pos, revstr, unit=b'revision', total=total) # warm the cache for the new revs progress(0) for idx, r in enumerate(revs): @@ -495,7 +495,7 @@ except OSError: return None con = sqlite3.connect(encoding.strfromlocal(self._path), timeout=30, - isolation_level="IMMEDIATE") + isolation_level=r"IMMEDIATE") con.text_factory = bytes return con @@ -528,14 +528,14 @@ repo = repo.unfiltered() try: with repo.lock(): - if 'stablerange' in vars(repo): + if r'stablerange' in vars(repo): repo.stablerange.save(repo) self._save(repo) except error.LockError: # Exceptionnally we are noisy about it since performance impact # is large We should address that before using this more # widely. - msg = _('obshashrange cache: skipping save unable to lock repo\n') + msg = _(b'obshashrange cache: skipping save unable to lock repo\n') repo.ui.warn(msg) def _save(self, repo): @@ -548,22 +548,22 @@ # # operational error catch read-only and locked database # IntegrityError catch Unique constraint error that may arise - if '_con' in vars(self): + if r'_con' in vars(self): del self._con self._new.clear() - repo.ui.log('evoext-cache', 'error while saving new data: %s' % exc) - repo.ui.debug('evoext-cache: error while saving new data: %s' % exc) + repo.ui.log(b'evoext-cache', b'error while saving new data: %s' % exc) + repo.ui.debug(b'evoext-cache: error while saving new data: %s' % exc) def _trysave(self, repo): if self._con is None: util.unlinkpath(self._path, ignoremissing=True) - if '_con' in vars(self): + if r'_con' in vars(self): del self._con con = self._db() if con is None: - repo.ui.log('evoext-cache', 'unable to write obshashrange cache' - ' - cannot create database') + repo.ui.log(b'evoext-cache', b'unable to write obshashrange cache' + b' - cannot create database') return with con: for req in _sqliteschema: @@ -580,12 +580,12 @@ # drifting is currently an issue because this means another # process might have already added the cache line we are about # to add. This will confuse sqlite - msg = _('obshashrange cache: skipping write, ' - 'database drifted under my feet\n') + msg = _(b'obshashrange cache: skipping write, ' + b'database drifted under my feet\n') repo.ui.warn(msg) self._new.clear() self._valid = False - if '_con' in vars(self): + if r'_con' in vars(self): del self._con self._valid = False return @@ -618,7 +618,7 @@ class obshashrepo(repo.__class__): @localrepo.unfilteredmethod def destroyed(self): - if 'obsstore' in vars(self): + if r'obsstore' in vars(self): self.obsstore.rangeobshashcache.clear() toplevel = not util.safehasattr(self, '_destroying') if toplevel: @@ -667,7 +667,7 @@ return _obshashrange_v0(peer._repo, ranges) -_indexformat = '>I' +_indexformat = b'>I' _indexsize = _calcsize(_indexformat) def _encrange(node_rangeid): """encode a (node) range""" @@ -685,11 +685,11 @@ def peer_obshashrange_v0(self, ranges): binranges = [_encrange(r) for r in ranges] encranges = encodelist(binranges) - d = self._call("evoext_obshashrange_v1", ranges=encranges) + d = self._call(b"evoext_obshashrange_v1", ranges=encranges) try: return decodelist(d) except ValueError: - self._abort(error.ResponseError(_("unexpected response:"), d)) + self._abort(error.ResponseError(_(b"unexpected response:"), d)) @compat.wireprotocommand(eh, b'evoext_obshashrange_v1', b'ranges') def srv_obshashrange_v1(repo, proto, ranges): @@ -699,16 +699,16 @@ return encodelist(hashes) def _useobshashrange(repo): - base = repo.ui.configbool('experimental', 'obshashrange') + base = repo.ui.configbool(b'experimental', b'obshashrange') if base: - maxrevs = repo.ui.configint('experimental', 'obshashrange.max-revs') + maxrevs = repo.ui.configint(b'experimental', b'obshashrange.max-revs') if maxrevs is not None and maxrevs < len(repo.unfiltered()): base = False return base def _canobshashrange(local, remote): return (_useobshashrange(local) - and remote.capable('_evoext_obshashrange_v1')) + and remote.capable(b'_evoext_obshashrange_v1')) def _obshashrange_capabilities(orig, repo, proto): """wrapper to advertise new capability""" @@ -738,11 +738,11 @@ extensions.wrapfunction(wireprotov1server, 'capabilities', _obshashrange_capabilities) # wrap command content - oldcap, args = wireprotov1server.commands['capabilities'] + oldcap, args = wireprotov1server.commands[b'capabilities'] def newcap(repo, proto): return _obshashrange_capabilities(oldcap, repo, proto) - wireprotov1server.commands['capabilities'] = (newcap, args) + wireprotov1server.commands[b'capabilities'] = (newcap, args) ########################################## ### trigger discovery during exchange ### @@ -754,7 +754,7 @@ # exchange of obsmarkers is enabled locally and obsolete.isenabled(pushop.repo, obsolete.exchangeopt) # remote server accept markers - and 'obsolete' in pushop.remote.listkeys('namespaces')) + and b'obsolete' in pushop.remote.listkeys(b'namespaces')) def _pushobshashrange(pushop, commonrevs): repo = pushop.repo.unfiltered() @@ -769,21 +769,21 @@ (_canobshashrange, _pushobshashrange), ] -obsdiscovery_skip_message = """\ +obsdiscovery_skip_message = b"""\ (skipping discovery of obsolescence markers, will exchange everything) (controled by 'experimental.evolution.obsdiscovery' configuration) """ def usediscovery(repo): - return repo.ui.configbool('experimental', 'evolution.obsdiscovery') + return repo.ui.configbool(b'experimental', b'evolution.obsdiscovery') @eh.wrapfunction(exchange, '_pushdiscoveryobsmarkers') def _pushdiscoveryobsmarkers(orig, pushop): if _dopushmarkers(pushop): repo = pushop.repo remote = pushop.remote - obsexcmsg(repo.ui, "computing relevant nodes\n") - revs = list(repo.revs('::%ln', pushop.futureheads)) + obsexcmsg(repo.ui, b"computing relevant nodes\n") + revs = list(repo.revs(b'::%ln', pushop.futureheads)) unfi = repo.unfiltered() if not usediscovery(repo): @@ -803,27 +803,27 @@ # obs markers. return orig(pushop) - obsexcmsg(repo.ui, "looking for common markers in %i nodes\n" + obsexcmsg(repo.ui, b"looking for common markers in %i nodes\n" % len(revs)) - commonrevs = list(unfi.revs('::%ln', pushop.outgoing.commonheads)) + commonrevs = list(unfi.revs(b'::%ln', pushop.outgoing.commonheads)) # find the nodes where the relevant obsmarkers mismatches nodes = discovery(pushop, commonrevs) if nodes: - obsexcmsg(repo.ui, "computing markers relevant to %i nodes\n" + obsexcmsg(repo.ui, b"computing markers relevant to %i nodes\n" % len(nodes)) pushop.outobsmarkers = repo.obsstore.relevantmarkers(nodes) else: - obsexcmsg(repo.ui, "markers already in sync\n") + obsexcmsg(repo.ui, b"markers already in sync\n") pushop.outobsmarkers = [] @eh.extsetup def _installobsmarkersdiscovery(ui): - olddisco = exchange.pushdiscoverymapping['obsmarker'] + olddisco = exchange.pushdiscoverymapping[b'obsmarker'] def newdisco(pushop): _pushdiscoveryobsmarkers(olddisco, pushop) - exchange.pushdiscoverymapping['obsmarker'] = newdisco + exchange.pushdiscoverymapping[b'obsmarker'] = newdisco def buildpullobsmarkersboundaries(pullop, bundle2=True): """small function returning the argument for pull markers call @@ -833,25 +833,25 @@ repo = pullop.repo remote = pullop.remote unfi = repo.unfiltered() - revs = unfi.revs('::(%ln - null)', pullop.common) - boundaries = {'heads': pullop.pulledsubset} + revs = unfi.revs(b'::(%ln - null)', pullop.common) + boundaries = {b'heads': pullop.pulledsubset} if not revs: # nothing common - boundaries['common'] = [node.nullid] + boundaries[b'common'] = [node.nullid] return boundaries if not usediscovery(repo): # discovery disabled by users. repo.ui.status(obsdiscovery_skip_message) - boundaries['common'] = [node.nullid] + boundaries[b'common'] = [node.nullid] return boundaries if bundle2 and _canobshashrange(repo, remote): - obsexcmsg(repo.ui, "looking for common markers in %i nodes\n" + obsexcmsg(repo.ui, b"looking for common markers in %i nodes\n" % len(revs)) - boundaries['missing'] = findmissingrange(repo.ui, unfi, pullop.remote, - revs) + boundaries[b'missing'] = findmissingrange(repo.ui, unfi, pullop.remote, + revs) else: - boundaries['common'] = [node.nullid] + boundaries[b'common'] = [node.nullid] return boundaries # merge later for outer layer wrapping
--- a/hgext3rd/evolve/obsexchange.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/obsexchange.py Tue Sep 24 12:42:27 2019 +0200 @@ -37,7 +37,7 @@ obsexcmsg = utility.obsexcmsg obsexcprg = utility.obsexcprg -eh.configitem('experimental', 'verbose-obsolescence-exchange', False) +eh.configitem(b'experimental', b'verbose-obsolescence-exchange', False) _bestformat = max(obsolete.formats.keys()) @@ -58,46 +58,46 @@ # <= hg 4.5 from mercurial import wireproto gboptsmap = wireproto.gboptsmap - gboptsmap['evo_obscommon'] = 'nodes' - gboptsmap['evo_missing_nodes'] = 'nodes' + gboptsmap[b'evo_obscommon'] = b'nodes' + gboptsmap[b'evo_missing_nodes'] = b'nodes' @eh.wrapfunction(exchange, '_pullbundle2extraprepare') def _addobscommontob2pull(orig, pullop, kwargs): ret = orig(pullop, kwargs) ui = pullop.repo.ui - if ('obsmarkers' in kwargs - and pullop.remote.capable('_evoext_getbundle_obscommon')): + if (b'obsmarkers' in kwargs + and pullop.remote.capable(b'_evoext_getbundle_obscommon')): boundaries = obsdiscovery.buildpullobsmarkersboundaries(pullop) - if 'common' in boundaries: - common = boundaries['common'] + if b'common' in boundaries: + common = boundaries[b'common'] if common != pullop.common: - obsexcmsg(ui, 'request obsmarkers for some common nodes\n') + obsexcmsg(ui, b'request obsmarkers for some common nodes\n') if common != [node.nullid]: - kwargs['evo_obscommon'] = common - elif 'missing' in boundaries: - missing = boundaries['missing'] + kwargs[b'evo_obscommon'] = common + elif b'missing' in boundaries: + missing = boundaries[b'missing'] if missing: - obsexcmsg(ui, 'request obsmarkers for %d common nodes\n' + obsexcmsg(ui, b'request obsmarkers for %d common nodes\n' % len(missing)) - kwargs['evo_missing_nodes'] = missing + kwargs[b'evo_missing_nodes'] = missing return ret def _getbundleobsmarkerpart(orig, bundler, repo, source, **kwargs): - if not (set(['evo_obscommon', 'evo_missing_nodes']) & set(kwargs)): + if not (set([r'evo_obscommon', r'evo_missing_nodes']) & set(kwargs)): return orig(bundler, repo, source, **kwargs) if kwargs.get('obsmarkers', False): heads = kwargs.get('heads') - if 'evo_obscommon' in kwargs: + if r'evo_obscommon' in kwargs: if heads is None: heads = repo.heads() obscommon = kwargs.get('evo_obscommon', ()) assert obscommon - obsset = repo.unfiltered().set('::%ln - ::%ln', heads, obscommon) + obsset = repo.unfiltered().set(b'::%ln - ::%ln', heads, obscommon) subset = [c.node() for c in obsset] else: common = kwargs.get('common') - subset = [c.node() for c in repo.unfiltered().set('only(%ln, %ln)', heads, common)] + subset = [c.node() for c in repo.unfiltered().set(b'only(%ln, %ln)', heads, common)] subset += kwargs['evo_missing_nodes'] markers = repo.obsstore.relevantmarkers(subset) if util.safehasattr(bundle2, 'buildobsmarkerspart'): @@ -138,36 +138,36 @@ from mercurial import wireproto gboptsmap = wireproto.gboptsmap wireprotov1server = wireproto - gboptsmap['evo_obscommon'] = 'nodes' + gboptsmap[b'evo_obscommon'] = b'nodes' # wrap module content - origfunc = exchange.getbundle2partsmapping['obsmarkers'] + origfunc = exchange.getbundle2partsmapping[b'obsmarkers'] def newfunc(*args, **kwargs): return _getbundleobsmarkerpart(origfunc, *args, **kwargs) - exchange.getbundle2partsmapping['obsmarkers'] = newfunc + exchange.getbundle2partsmapping[b'obsmarkers'] = newfunc extensions.wrapfunction(wireprotov1server, 'capabilities', _obscommon_capabilities) # wrap command content - oldcap, args = wireprotov1server.commands['capabilities'] + oldcap, args = wireprotov1server.commands[b'capabilities'] def newcap(repo, proto): return _obscommon_capabilities(oldcap, repo, proto) - wireprotov1server.commands['capabilities'] = (newcap, args) + wireprotov1server.commands[b'capabilities'] = (newcap, args) def _pushobsmarkers(repo, data): tr = lock = None try: lock = repo.lock() - tr = repo.transaction('pushkey: obsolete markers') + tr = repo.transaction(b'pushkey: obsolete markers') new = repo.obsstore.mergemarkers(tr, data) if new is not None: - obsexcmsg(repo.ui, "%i obsolescence markers added\n" % new, True) + obsexcmsg(repo.ui, b"%i obsolescence markers added\n" % new, True) tr.close() finally: lockmod.release(tr, lock) - repo.hook('evolve_pushobsmarkers') + repo.hook(b'evolve_pushobsmarkers') def srv_pushobsmarkers(repo, proto): """wireprotocol command""" @@ -187,18 +187,18 @@ def _getobsmarkersstream(repo, heads=None, common=None): """Get a binary stream for all markers relevant to `::<heads> - ::<common>` """ - revset = '' + revset = b'' args = [] repo = repo.unfiltered() if heads is None: - revset = 'all()' + revset = b'all()' elif heads: - revset += "(::%ln)" + revset += b"(::%ln)" args.append(heads) else: - assert False, 'pulling no heads?' + assert False, b'pulling no heads?' if common: - revset += ' - (::%ln)' + revset += b' - (::%ln)' args.append(common) nodes = [c.node() for c in repo.set(revset, *args)] markers = repo.obsstore.relevantmarkers(nodes) @@ -220,20 +220,20 @@ except (ImportError, AttributeError): from mercurial import wireproto as wireprototypes wireprotov1server = wireprototypes - opts = wireprotov1server.options('', ['heads', 'common'], others) + opts = wireprotov1server.options(b'', [b'heads', b'common'], others) for k, v in opts.items(): - if k in ('heads', 'common'): + if k in (b'heads', b'common'): opts[k] = wireprototypes.decodelist(v) obsdata = _getobsmarkersstream(repo, **opts) finaldata = StringIO() obsdata = obsdata.getvalue() - finaldata.write('%20i' % len(obsdata)) + finaldata.write(b'%20i' % len(obsdata)) finaldata.write(obsdata) finaldata.seek(0) return wireprototypes.streamres(reader=finaldata, v1compressible=True) -abortmsg = "won't exchange obsmarkers through pushkey" -hint = "upgrade your client or server to use the bundle2 protocol" +abortmsg = b"won't exchange obsmarkers through pushkey" +hint = b"upgrade your client or server to use the bundle2 protocol" class HTTPCompatibleAbort(hgwebcommon.ErrorResponse, error.Abort): def __init__(self, message, code, hint=None): @@ -256,4 +256,4 @@ @eh.uisetup def setuppushkeyforbidding(ui): - pushkey._namespaces['obsolete'] = (forbidpushkey, forbidlistkey) + pushkey._namespaces[b'obsolete'] = (forbidpushkey, forbidlistkey)
--- a/hgext3rd/evolve/obshashtree.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/obshashtree.py Tue Sep 24 12:42:27 2019 +0200 @@ -42,14 +42,14 @@ debug command stayed as an inspection tool. It does not seem supseful to upstream the command with the rest of evolve. We can safely drop it.""" if v0 and v1: - raise error.Abort('cannot only specify one format') + raise error.Abort(b'cannot only specify one format') elif v0: treefunc = _obsrelsethashtreefm0 else: treefunc = _obsrelsethashtreefm1 for chg, obs in treefunc(repo): - ui.status('%s %s\n' % (node.hex(chg), node.hex(obs))) + ui.status(b'%s %s\n' % (node.hex(chg), node.hex(obs))) def _obsrelsethashtreefm0(repo): return _obsrelsethashtree(repo, obsolete._fm0encodeonemarker) @@ -61,8 +61,8 @@ cache = [] unfi = repo.unfiltered() markercache = {} - compat.progress(repo.ui, _("preparing locally"), 0, total=len(unfi), - unit=_("changesets")) + compat.progress(repo.ui, _(b"preparing locally"), 0, total=len(unfi), + unit=_(b"changesets")) for i in unfi: ctx = unfi[i] entry = 0 @@ -92,7 +92,7 @@ cache.append((ctx.node(), sha.digest())) else: cache.append((ctx.node(), node.nullid)) - compat.progress(repo.ui, _("preparing locally"), i, total=len(unfi), - unit=_("changesets")) - compat.progress(repo.ui, _("preparing locally"), None) + compat.progress(repo.ui, _(b"preparing locally"), i, total=len(unfi), + unit=_(b"changesets")) + compat.progress(repo.ui, _(b"preparing locally"), None) return cache
--- a/hgext3rd/evolve/obshistory.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/obshistory.py Tue Sep 24 12:42:27 2019 +0200 @@ -31,13 +31,13 @@ eh = exthelper.exthelper() # Config -efd = {'default': True} # pass a default value unless the config is registered +efd = {b'default': True} # pass a default value unless the config is registered @eh.extsetup def enableeffectflags(ui): item = (getattr(ui, '_knownconfig', {}) - .get('experimental', {}) - .get('evolution.effect-flags')) + .get(b'experimental', {}) + .get(b'evolution.effect-flags')) if item is not None: item.default = True efd.clear() @@ -79,10 +79,10 @@ Returns 0 on success. """ - ui.pager('obslog') + ui.pager(b'obslog') revs = list(revs) + opts['rev'] if not revs: - revs = ['.'] + revs = [b'.'] revs = scmutil.revrange(repo, revs) if opts['graph']: @@ -131,7 +131,7 @@ values = [] for sset in fullsuccessorsets: - values.append({'successors': sset, 'markers': sset.markers}) + values.append({b'successors': sset, b'markers': sset.markers}) return values @@ -154,10 +154,10 @@ # Compat 4.6 if not util.safehasattr(self, "_includediff"): - self._includediff = diffopts and diffopts.get('patch') + self._includediff = diffopts and diffopts.get(b'patch') - self.template = diffopts and diffopts.get('template') - self.filter = diffopts and diffopts.get('filternonlocal') + self.template = diffopts and diffopts.get(b'template') + self.filter = diffopts and diffopts.get(b'filternonlocal') def show(self, ctx, copies=None, matchfn=None, **props): if self.buffered: @@ -165,12 +165,12 @@ changenode = ctx.node() - _props = {"template": self.template} - fm = self.ui.formatter('debugobshistory', _props) + _props = {b"template": self.template} + fm = self.ui.formatter(b'debugobshistory', _props) _debugobshistorydisplaynode(fm, self.repo, changenode) - markerfm = fm.nested("markers") + markerfm = fm.nested(b"markers") # Succs markers if self.filter is False: @@ -186,21 +186,21 @@ r = _successorsandmarkers(self.repo, ctx) for succset in sorted(r): - markers = succset["markers"] + markers = succset[b"markers"] if not markers: continue - successors = succset["successors"] + successors = succset[b"successors"] _debugobshistorydisplaysuccsandmarkers(markerfm, successors, markers, ctx.node(), self.repo, self._includediff) markerfm.end() - markerfm.plain('\n') + markerfm.plain(b'\n') fm.end() self.hunk[ctx.node()] = self.ui.popbuffer() else: ### graph output is buffered only - msg = 'cannot be used outside of the graphlog (yet)' + msg = b'cannot be used outside of the graphlog (yet)' raise error.ProgrammingError(msg) def flush(self, ctx): @@ -211,43 +211,43 @@ def patchavailable(node, repo, successors): if node not in repo: - return False, "context is not local" + return False, b"context is not local" if len(successors) == 0: - return False, "no successors" + return False, b"no successors" elif len(successors) > 1: - return False, "too many successors (%d)" % len(successors) + return False, b"too many successors (%d)" % len(successors) succ = successors[0] if succ not in repo: - return False, "successor is unknown locally" + return False, b"successor is unknown locally" # Check that both node and succ have the same parents nodep1, nodep2 = repo[node].p1(), repo[node].p2() succp1, succp2 = repo[succ].p1(), repo[succ].p2() if nodep1 != succp1 or nodep2 != succp2: - return False, "changesets rebased" + return False, b"changesets rebased" return True, succ def getmarkerdescriptionpatch(repo, basedesc, succdesc): # description are stored without final new line, # add one to avoid ugly diff - basedesc += '\n' - succdesc += '\n' + basedesc += b'\n' + succdesc += b'\n' # fake file name - basename = "changeset-description" - succname = "changeset-description" + basename = b"changeset-description" + succname = b"changeset-description" d = compat.strdiff(basedesc, succdesc, basename, succname) uheaders, hunks = d # Copied from patch.diff - text = ''.join(sum((list(hlines) for hrange, hlines in hunks), [])) - patch = "\n".join(uheaders + [text]) + text = b''.join(sum((list(hlines) for hrange, hlines in hunks), [])) + patch = b"\n".join(uheaders + [text]) return patch @@ -333,7 +333,7 @@ # Then choose a random node from the cycle breaknode = sorted(cycle)[0] # And display it by force - repo.ui.debug('obs-cycle detected, forcing display of %s\n' + repo.ui.debug(b'obs-cycle detected, forcing display of %s\n' % nodemod.short(breaknode)) validcandidates = [breaknode] @@ -435,7 +435,7 @@ def _debugobshistoryrevs(ui, repo, revs, opts): """ Display the obsolescence history for revset """ - fm = ui.formatter('debugobshistory', pycompat.byteskwargs(opts)) + fm = ui.formatter(b'debugobshistory', pycompat.byteskwargs(opts)) precursors = repo.obsstore.predecessors successors = repo.obsstore.successors nodec = repo.changelog.node @@ -451,7 +451,7 @@ succs = successors.get(ctxnode, ()) - markerfm = fm.nested("markers") + markerfm = fm.nested(b"markers") for successor in sorted(succs): includediff = opts and opts.get("patch") _debugobshistorydisplaymarker(markerfm, successor, ctxnode, unfi, includediff) @@ -477,24 +477,24 @@ shortdescription = shortdescription.splitlines()[0] fm.startitem() - fm.write('node', '%s', bytes(ctx), - label="evolve.node") - fm.plain(' ') + fm.write(b'node', b'%s', bytes(ctx), + label=b"evolve.node") + fm.plain(b' ') - fm.write('rev', '(%d)', ctx.rev(), - label="evolve.rev") - fm.plain(' ') + fm.write(b'rev', b'(%d)', ctx.rev(), + label=b"evolve.rev") + fm.plain(b' ') - fm.write('shortdescription', '%s', shortdescription, - label="evolve.short_description") - fm.plain('\n') + fm.write(b'shortdescription', b'%s', shortdescription, + label=b"evolve.short_description") + fm.plain(b'\n') def _debugobshistorydisplaymissingctx(fm, nodewithoutctx): hexnode = nodemod.short(nodewithoutctx) fm.startitem() - fm.write('node', '%s', hexnode, - label="evolve.node evolve.missing_change_ctx") - fm.plain('\n') + fm.write(b'node', b'%s', hexnode, + label=b"evolve.node evolve.missing_change_ctx") + fm.plain(b'\n') def _debugobshistorydisplaymarker(fm, marker, node, repo, includediff=False): succnodes = marker[1] @@ -502,18 +502,18 @@ metadata = dict(marker[3]) fm.startitem() - fm.plain(' ') + fm.plain(b' ') # Detect pruned revisions if len(succnodes) == 0: - verb = 'pruned' + verb = b'pruned' else: - verb = 'rewritten' + verb = b'rewritten' - fm.write('verb', '%s', verb, - label="evolve.verb") + fm.write(b'verb', b'%s', verb, + label=b"evolve.verb") - effectflag = metadata.get('ef1') + effectflag = metadata.get(b'ef1') if effectflag is not None: try: effectflag = int(effectflag) @@ -524,50 +524,50 @@ # XXX should be a dict if effectflag & DESCCHANGED: - effect.append('description') + effect.append(b'description') if effectflag & METACHANGED: - effect.append('meta') + effect.append(b'meta') if effectflag & USERCHANGED: - effect.append('user') + effect.append(b'user') if effectflag & DATECHANGED: - effect.append('date') + effect.append(b'date') if effectflag & BRANCHCHANGED: - effect.append('branch') + effect.append(b'branch') if effectflag & PARENTCHANGED: - effect.append('parent') + effect.append(b'parent') if effectflag & DIFFCHANGED: - effect.append('content') + effect.append(b'content') if effect: - fmteffect = fm.formatlist(effect, 'effect', sep=', ') - fm.write('effect', '(%s)', fmteffect) + fmteffect = fm.formatlist(effect, b'effect', sep=b', ') + fm.write(b'effect', b'(%s)', fmteffect) if len(succnodes) > 0: - fm.plain(' as ') + fm.plain(b' as ') shortsnodes = (nodemod.short(succnode) for succnode in sorted(succnodes)) - nodes = fm.formatlist(shortsnodes, 'succnodes', sep=', ') - fm.write('succnodes', '%s', nodes, - label="evolve.node") + nodes = fm.formatlist(shortsnodes, b'succnodes', sep=b', ') + fm.write(b'succnodes', b'%s', nodes, + label=b"evolve.node") - operation = metadata.get('operation') + operation = metadata.get(b'operation') if operation: - fm.plain(' using ') - fm.write('operation', '%s', operation, label="evolve.operation") + fm.plain(b' using ') + fm.write(b'operation', b'%s', operation, label=b"evolve.operation") - fm.plain(' by ') + fm.plain(b' by ') - fm.write('user', '%s', metadata['user'], - label="evolve.user") - fm.plain(' ') + fm.write(b'user', b'%s', metadata[b'user'], + label=b"evolve.user") + fm.plain(b' ') - fm.write('date', '(%s)', fm.formatdate(date), - label="evolve.date") + fm.write(b'date', b'(%s)', fm.formatdate(date), + label=b"evolve.date") # initial support for showing note - if metadata.get('note'): - fm.plain('\n note: ') - fm.write('note', "%s", metadata['note'], label="evolve.note") + if metadata.get(b'note'): + fm.plain(b'\n note: ') + fm.write(b'note', b"%s", metadata[b'note'], label=b"evolve.note") # Patch display if includediff is True: @@ -585,20 +585,20 @@ if descriptionpatch: # add the diffheader - diffheader = "diff -r %s -r %s changeset-description\n" % \ + diffheader = b"diff -r %s -r %s changeset-description\n" %\ (basectx, succctx) descriptionpatch = diffheader + descriptionpatch def tolist(text): return [text] - fm.plain("\n") + fm.plain(b"\n") for chunk, label in patch.difflabel(tolist, descriptionpatch): - chunk = chunk.strip('\t') - if chunk and chunk != '\n': - fm.plain(' ') - fm.write('desc-diff', '%s', chunk, label=label) + chunk = chunk.strip(b'\t') + if chunk and chunk != b'\n': + fm.plain(b' ') + fm.write(b'desc-diff', b'%s', chunk, label=label) # Content patch diffopts = patch.diffallopts(repo.ui, {}) @@ -608,21 +608,21 @@ for chunk, label in patch.diffui(repo, node, succ, matchfn, opts=diffopts): if firstline: - fm.plain('\n') + fm.plain(b'\n') firstline = False if linestart: - fm.plain(' ') + fm.plain(b' ') linestart = False - if chunk == '\n': + if chunk == b'\n': linestart = True - fm.write('patch', '%s', chunk, label=label) + fm.write(b'patch', b'%s', chunk, label=label) else: - nopatch = " (No patch available, %s)" % _patchavailable[1] - fm.plain("\n") + nopatch = b" (No patch available, %s)" % _patchavailable[1] + fm.plain(b"\n") # TODO: should be in json too fm.plain(nopatch) - fm.plain("\n") + fm.plain(b"\n") def _debugobshistorydisplaysuccsandmarkers(fm, succnodes, markers, node, repo, includediff=False): """ @@ -630,17 +630,17 @@ to accept multiple markers as input. """ fm.startitem() - fm.plain(' ') + fm.plain(b' ') # Detect pruned revisions - verb = _successorsetverb(succnodes, markers)["verb"] + verb = _successorsetverb(succnodes, markers)[b"verb"] - fm.write('verb', '%s', verb, - label="evolve.verb") + fm.write(b'verb', b'%s', verb, + label=b"evolve.verb") # Effect flag metadata = [dict(marker[3]) for marker in markers] - ef1 = [data.get('ef1') for data in metadata] + ef1 = [data.get(b'ef1') for data in metadata] effectflag = 0 for ef in ef1: @@ -652,45 +652,45 @@ # XXX should be a dict if effectflag & DESCCHANGED: - effect.append('description') + effect.append(b'description') if effectflag & METACHANGED: - effect.append('meta') + effect.append(b'meta') if effectflag & USERCHANGED: - effect.append('user') + effect.append(b'user') if effectflag & DATECHANGED: - effect.append('date') + effect.append(b'date') if effectflag & BRANCHCHANGED: - effect.append('branch') + effect.append(b'branch') if effectflag & PARENTCHANGED: - effect.append('parent') + effect.append(b'parent') if effectflag & DIFFCHANGED: - effect.append('content') + effect.append(b'content') if effect: - fmteffect = fm.formatlist(effect, 'effect', sep=', ') - fm.write('effect', '(%s)', fmteffect) + fmteffect = fm.formatlist(effect, b'effect', sep=b', ') + fm.write(b'effect', b'(%s)', fmteffect) if len(succnodes) > 0: - fm.plain(' as ') + fm.plain(b' as ') shortsnodes = (nodemod.short(succnode) for succnode in sorted(succnodes)) - nodes = fm.formatlist(shortsnodes, 'succnodes', sep=', ') - fm.write('succnodes', '%s', nodes, - label="evolve.node") + nodes = fm.formatlist(shortsnodes, b'succnodes', sep=b', ') + fm.write(b'succnodes', b'%s', nodes, + label=b"evolve.node") # Operations operations = compat.markersoperations(markers) if operations: - fm.plain(' using ') - fm.write('operation', '%s', ", ".join(operations), label="evolve.operation") + fm.plain(b' using ') + fm.write(b'operation', b'%s', b", ".join(operations), label=b"evolve.operation") - fm.plain(' by ') + fm.plain(b' by ') # Users users = compat.markersusers(markers) - fm.write('user', '%s', ", ".join(users), - label="evolve.user") - fm.plain(' ') + fm.write(b'user', b'%s', b", ".join(users), + label=b"evolve.user") + fm.plain(b' ') # Dates dates = compat.markersdates(markers) @@ -699,10 +699,10 @@ max_date = max(dates) if min_date == max_date: - fm.write("date", "(at %s)", fm.formatdate(min_date), label="evolve.date") + fm.write(b"date", b"(at %s)", fm.formatdate(min_date), label=b"evolve.date") else: - fm.write("date", "(between %s and %s)", fm.formatdate(min_date), - fm.formatdate(max_date), label="evolve.date") + fm.write(b"date", b"(between %s and %s)", fm.formatdate(min_date), + fm.formatdate(max_date), label=b"evolve.date") # initial support for showing note # if metadata.get('note'): @@ -725,20 +725,20 @@ if descriptionpatch: # add the diffheader - diffheader = "diff -r %s -r %s changeset-description\n" % \ + diffheader = b"diff -r %s -r %s changeset-description\n" %\ (basectx, succctx) descriptionpatch = diffheader + descriptionpatch def tolist(text): return [text] - fm.plain("\n") + fm.plain(b"\n") for chunk, label in patch.difflabel(tolist, descriptionpatch): - chunk = chunk.strip('\t') - if chunk and chunk != '\n': - fm.plain(' ') - fm.write('desc-diff', '%s', chunk, label=label) + chunk = chunk.strip(b'\t') + if chunk and chunk != b'\n': + fm.plain(b' ') + fm.write(b'desc-diff', b'%s', chunk, label=label) # Content patch diffopts = patch.diffallopts(repo.ui, {}) @@ -748,21 +748,21 @@ for chunk, label in patch.diffui(repo, node, succ, matchfn, opts=diffopts): if firstline: - fm.plain('\n') + fm.plain(b'\n') firstline = False if linestart: - fm.plain(' ') + fm.plain(b' ') linestart = False - if chunk == '\n': + if chunk == b'\n': linestart = True - fm.write('patch', '%s', chunk, label=label) + fm.write(b'patch', b'%s', chunk, label=label) else: - nopatch = " (No patch available, %s)" % _patchavailable[1] - fm.plain("\n") + nopatch = b" (No patch available, %s)" % _patchavailable[1] + fm.plain(b"\n") # TODO: should be in json too fm.plain(nopatch) - fm.plain("\n") + fm.plain(b"\n") # logic around storing and using effect flags DESCCHANGED = 1 << 0 # action changed the description @@ -774,11 +774,11 @@ BRANCHCHANGED = 1 << 6 # the branch changed METABLACKLIST = [ - re.compile('^__touch-noise__$'), - re.compile('^branch$'), - re.compile('^.*-source$'), - re.compile('^.*_source$'), - re.compile('^source$'), + re.compile(br'^__touch-noise__$'), + re.compile(br'^branch$'), + re.compile(br'^.*-source$'), + re.compile(br'^.*_source$'), + re.compile(br'^source$'), ] def ismetablacklisted(metaitem): @@ -822,17 +822,17 @@ if len(successorssets) == 0: # The commit has been pruned - return 'pruned' + return b'pruned' elif len(successorssets) > 1: - return 'diverged' + return b'diverged' else: # No divergence, only one set of successors successors = successorssets[0] if len(successors) == 1: - return 'superseed' + return b'superseed' else: - return 'superseed_split' + return b'superseed_split' def _getobsfateandsuccs(repo, revnode, successorssets=None): """ Return a tuple containing: @@ -865,8 +865,8 @@ dates = [m[4] for m in markers] return { - 'min_date': min(dates), - 'max_date': max(dates) + b'min_date': min(dates), + b'max_date': max(dates) } def _successorsetusers(successorset, markers): @@ -877,18 +877,18 @@ # Check that user is present in meta markersmeta = [dict(m[3]) for m in markers] - users = set(meta.get('user') for meta in markersmeta if meta.get('user')) + users = set(meta.get(b'user') for meta in markersmeta if meta.get(b'user')) - return {'users': sorted(users)} + return {b'users': sorted(users)} VERBMAPPING = { - DESCCHANGED: "reworded", - METACHANGED: "meta-changed", - USERCHANGED: "reauthored", - DATECHANGED: "date-changed", - BRANCHCHANGED: "branch-changed", - PARENTCHANGED: "rebased", - DIFFCHANGED: "amended" + DESCCHANGED: b"reworded", + METACHANGED: b"meta-changed", + USERCHANGED: b"reauthored", + DATECHANGED: b"date-changed", + BRANCHCHANGED: b"branch-changed", + PARENTCHANGED: b"rebased", + DIFFCHANGED: b"amended" } def _successorsetverb(successorset, markers): @@ -896,12 +896,12 @@ """ verb = None if not successorset: - verb = 'pruned' + verb = b'pruned' elif len(successorset) == 1: # Check for effect flag metadata = [dict(marker[3]) for marker in markers] - ef1 = [data.get('ef1') for data in metadata] + ef1 = [data.get(b'ef1') for data in metadata] if all(ef1): combined = 0 @@ -913,17 +913,17 @@ verb = VERBMAPPING[combined] if verb is None: - verb = 'rewritten' + verb = b'rewritten' else: - verb = 'split' - return {'verb': verb} + verb = b'split' + return {b'verb': verb} # Use a more advanced version of obsfateverb that uses effect-flag if util.safehasattr(obsutil, 'obsfateverb'): @eh.wrapfunction(obsutil, 'obsfateverb') def obsfateverb(orig, *args, **kwargs): - return _successorsetverb(*args, **kwargs)['verb'] + return _successorsetverb(*args, **kwargs)[b'verb'] # Hijack callers of successorsetverb elif util.safehasattr(obsutil, 'obsfateprinter'): @@ -932,7 +932,7 @@ def obsfateprinter(orig, successors, markers, ui): def closure(successors): - return _successorsetverb(successors, markers)['verb'] + return _successorsetverb(successors, markers)[b'verb'] if not util.safehasattr(obsutil, 'successorsetverb'): return orig(successors, markers, ui) @@ -1008,8 +1008,8 @@ # Format basic data data = { - "successors": sorted(successorset), - "markers": sorted(markers) + b"successors": sorted(successorset), + b"markers": sorted(markers) } # Call an extensible list of functions to override or add new data
--- a/hgext3rd/evolve/rewind.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/rewind.py Tue Sep 24 12:42:27 2019 +0200 @@ -32,8 +32,10 @@ (b'', b'exact', None, _(b"only rewind explicitly selected revisions")), (b'', b'from', [], _(b"rewind these revisions to their predecessors"), _(b'REV')), + (b'k', b'keep', None, + _(b"do not modify working directory during rewind")), ], - _(b''), + _(b'[--as-divergence] [--exact] [--keep] [--to REV]... [--from REV]...'), helpbasic=True) def rewind(ui, repo, **opts): """rewind a stack of changesets to a previous state @@ -46,7 +48,7 @@ obsolete the changeset you rewind from). Rewinding "to" will restore the changeset you have selected (and obsolete their latest successors). - By default, we rewind from the working copy parents, restoring its + By default, we rewind from the working directory parents, restoring its predecessor. When we rewind to an obsolete version, we also rewind to all its obsolete @@ -90,15 +92,17 @@ ctx = unfi[rev] ssets = obsutil.successorssets(repo, ctx.node(), sscache) if 1 < len(ssets): - msg = _('rewind confused by divergence on %s') % ctx - hint = _('solve divergence first or use "--as-divergence"') + msg = _(b'rewind confused by divergence on %s') % ctx + hint = _(b'solve divergence first or use "--as-divergence"') raise error.Abort(msg, hint=hint) if ssets and ssets[0]: for succ in ssets[0]: successorsmap[succ].add(ctx.node()) # Check that we can rewind these changesets - with repo.transaction('rewind'): + with repo.transaction(b'rewind'): + oldctx = repo[b'.'] + for rev in sorted(rewinded): ctx = unfi[rev] rewindmap[ctx.node()] = _revive_revision(unfi, rev, rewindmap) @@ -113,15 +117,47 @@ relationships.append(rel) if wctxp.node() == source: update_target = newdest[-1] - obsolete.createmarkers(unfi, relationships, operation='rewind') + obsolete.createmarkers(unfi, relationships, operation=b'rewind') if update_target is not None: - hg.updaterepo(repo, update_target, False) + if opts.get('keep'): + hg.updaterepo(repo, oldctx, True) + + # This is largely the same as the implementation in + # strip.stripcmd() and cmdrewrite.cmdprune(). + + # only reset the dirstate for files that would actually + # change between the working context and the revived cset + newctx = repo[update_target] + changedfiles = [] + for ctx in [oldctx, newctx]: + # blindly reset the files, regardless of what actually + # changed + changedfiles.extend(ctx.files()) - repo.ui.status(_('rewinded to %d changesets\n') % len(rewinded)) + # reset files that only changed in the dirstate too + dirstate = repo.dirstate + dirchanges = [f for f in dirstate if dirstate[f] != 'n'] + changedfiles.extend(dirchanges) + repo.dirstate.rebuild(newctx.node(), newctx.manifest(), + changedfiles) + + # TODO: implement restoration of copies/renames + # Ideally this step should be handled by dirstate.rebuild + # or scmutil.movedirstate, but right now there's no copy + # tracing across obsolescence relation (oldctx <-> newctx). + revertopts = {'no_backup': True, 'all': True, + 'rev': oldctx.node()} + with ui.configoverride({(b'ui', b'quiet'): True}): + cmdutil.revert(repo.ui, repo, oldctx, + repo.dirstate.parents(), **revertopts) + else: + hg.updaterepo(repo, update_target, False) + + repo.ui.status(_(b'rewinded to %d changesets\n') % len(rewinded)) if relationships: - repo.ui.status(_('(%d changesets obsoleted)\n') % len(relationships)) - if update_target is not None: - ui.status(_('working directory is now at %s\n') % repo['.']) + repo.ui.status(_(b'(%d changesets obsoleted)\n') % len(relationships)) + if update_target is not None and not opts.get('keep'): + ui.status(_(b'working directory is now at %s\n') % repo[b'.']) def _select_rewinded(repo, opts): """select the revision we shoudl rewind to @@ -131,18 +167,18 @@ revsto = opts.get('to') revsfrom = opts.get('from') if not (revsto or revsfrom): - revsfrom.append('.') + revsfrom.append(b'.') if revsto: rewinded.update(scmutil.revrange(repo, revsto)) if revsfrom: succs = scmutil.revrange(repo, revsfrom) - rewinded.update(unfi.revs('predecessors(%ld)', succs)) + rewinded.update(unfi.revs(b'predecessors(%ld)', succs)) if not rewinded: - raise error.Abort('no revision to rewind to') + raise error.Abort(b'no revision to rewind to') if not opts['exact']: - rewinded = unfi.revs('obsolete() and ::%ld', rewinded) + rewinded = unfi.revs(b'obsolete() and ::%ld', rewinded) return sorted(rewinded) @@ -152,14 +188,14 @@ ctx = unfi[rev] extra = ctx.extra().copy() # rewind hash should be unique over multiple rewind. - user = unfi.ui.config('devel', 'user.obsmarker') + user = unfi.ui.config(b'devel', b'user.obsmarker') if not user: user = unfi.ui.username() - date = unfi.ui.configdate('devel', 'default-date') + date = unfi.ui.configdate(b'devel', b'default-date') if date is None: date = compat.makedate() - noise = "%s\0%s\0%d\0%d" % (ctx.node(), user, date[0], date[1]) - extra['__rewind-hash__'] = hashlib.sha256(noise).hexdigest() + noise = b"%s\0%s\0%d\0%d" % (ctx.node(), user, date[0], date[1]) + extra[b'__rewind-hash__'] = hashlib.sha256(noise).hexdigest() p1 = ctx.p1().node() p1 = rewindmap.get(p1, p1) @@ -169,13 +205,13 @@ updates = [] if len(ctx.parents()) > 1: updates = ctx.parents() - extradict = {'extra': extra} + extradict = {b'extra': extra} new, unusedvariable = rewriteutil.rewrite(unfi, ctx, updates, ctx, [p1, p2], commitopts=extradict) obsolete.createmarkers(unfi, [(ctx, (unfi[new],))], - flag=identicalflag, operation='rewind') + flag=identicalflag, operation=b'rewind') return new
--- a/hgext3rd/evolve/rewriteutil.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/rewriteutil.py Tue Sep 24 12:42:27 2019 +0200 @@ -44,37 +44,37 @@ numrevs = len(revs) if numrevs < maxrevs: shorts = [node.short(tonode(r)) for r in revs] - summary = ', '.join(shorts) + summary = b', '.join(shorts) else: first = revs.first() - summary = _('%s and %d others') + summary = _(b'%s and %d others') summary %= (node.short(tonode(first)), numrevs - 1) return summary -def precheck(repo, revs, action='rewrite'): +def precheck(repo, revs, action=b'rewrite'): """check if <revs> can be rewritten <action> can be used to control the commit message. """ if node.nullrev in revs: - msg = _("cannot %s the null revision") % (action) - hint = _("no changeset checked out") + msg = _(b"cannot %s the null revision") % (action) + hint = _(b"no changeset checked out") raise error.Abort(msg, hint=hint) if any(util.safehasattr(r, 'rev') for r in revs): - msg = "rewriteutil.precheck called with ctx not revs" + msg = b"rewriteutil.precheck called with ctx not revs" repo.ui.develwarn(msg) revs = (r.rev() for r in revs) - publicrevs = repo.revs('%ld and public()', revs) + publicrevs = repo.revs(b'%ld and public()', revs) if publicrevs: summary = _formatrevs(repo, publicrevs) - msg = _("cannot %s public changesets: %s") % (action, summary) - hint = _("see 'hg help phases' for details") + msg = _(b"cannot %s public changesets: %s") % (action, summary) + hint = _(b"see 'hg help phases' for details") raise error.Abort(msg, hint=hint) newunstable = disallowednewunstable(repo, revs) if newunstable: - msg = _("%s will orphan %i descendants") + msg = _(b"%s will orphan %i descendants") msg %= (action, len(newunstable)) - hint = _("see 'hg help evolution.instability'") + hint = _(b"see 'hg help evolution.instability'") raise error.Abort(msg, hint=hint) def bookmarksupdater(repo, oldid, tr): @@ -96,33 +96,33 @@ allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) if allowunstable: return revset.baseset() - return repo.revs("(%ld::) - %ld", revs, revs) + return repo.revs(b"(%ld::) - %ld", revs, revs) def foldcheck(repo, revs): """check that <revs> can be folded""" - precheck(repo, revs, action='fold') - roots = repo.revs('roots(%ld)', revs) + precheck(repo, revs, action=b'fold') + roots = repo.revs(b'roots(%ld)', revs) if len(roots) > 1: - raise error.Abort(_("cannot fold non-linear revisions " - "(multiple roots given)")) + raise error.Abort(_(b"cannot fold non-linear revisions " + b"(multiple roots given)")) root = repo[roots.first()] if root.phase() <= phases.public: - raise error.Abort(_("cannot fold public revisions")) - heads = repo.revs('heads(%ld)', revs) + raise error.Abort(_(b"cannot fold public revisions")) + heads = repo.revs(b'heads(%ld)', revs) if len(heads) > 1: - raise error.Abort(_("cannot fold non-linear revisions " - "(multiple heads given)")) + raise error.Abort(_(b"cannot fold non-linear revisions " + b"(multiple heads given)")) head = repo[heads.first()] - baseparents = repo.revs('parents(%ld) - %ld', revs, revs) + baseparents = repo.revs(b'parents(%ld) - %ld', revs, revs) if len(baseparents) > 2: - raise error.Abort(_("cannot fold revisions that merge with more than " - "one external changeset (not in revisions)")) - if not repo.ui.configbool('experimental', 'evolution.allowdivergence'): - obsolete = repo.revs('%ld and obsolete()', revs) + raise error.Abort(_(b"cannot fold revisions that merge with more than " + b"one external changeset (not in revisions)")) + if not repo.ui.configbool(b'experimental', b'evolution.allowdivergence'): + obsolete = repo.revs(b'%ld and obsolete()', revs) if obsolete: - msg = _('folding obsolete revisions may cause divergence') - hint = _('set experimental.evolution.allowdivergence=yes' - ' to allow folding them') + msg = _(b'folding obsolete revisions may cause divergence') + hint = _(b'set experimental.evolution.allowdivergence=yes' + b' to allow folding them') raise error.Abort(msg, hint=hint) # root's p1 is already used as the target ctx p1 baseparents -= {root.p1().rev()} @@ -134,14 +134,14 @@ try: wlock = repo.wlock() lock = repo.lock() - tr = repo.transaction('prune') + tr = repo.transaction(b'prune') bmchanges = [] for bookmark in bookmarks: bmchanges.append((bookmark, None)) repo._bookmarks.applychanges(repo, tr, bmchanges) tr.close() for bookmark in sorted(bookmarks): - repo.ui.write(_("bookmark '%s' deleted\n") % bookmark) + repo.ui.write(_(b"bookmark '%s' deleted\n") % bookmark) finally: lockmod.release(tr, lock, wlock) @@ -157,8 +157,8 @@ """ repomarks = repo._bookmarks if not bookmarks.issubset(repomarks): - raise error.Abort(_("bookmark '%s' not found") % - ','.join(sorted(bookmarks - set(repomarks.keys())))) + raise error.Abort(_(b"bookmark '%s' not found") % + b','.join(sorted(bookmarks - set(repomarks.keys())))) # If the requested bookmark is not the only one pointing to a # a revision we have to only delete the bookmark and not strip @@ -184,7 +184,7 @@ try: wlock = repo.wlock() lock = repo.lock() - tr = repo.transaction('rewrite') + tr = repo.transaction(b'rewrite') base = old.p1() updatebookmarks = bookmarksupdater(repo, old.node(), tr) @@ -225,13 +225,13 @@ if not message: message = old.description() - user = commitopts.get('user') or old.user() + user = commitopts.get(b'user') or old.user() # TODO: In case not date is given, we should take the old commit date # if we are working one one changeset or mimic the fold behavior about # date - date = commitopts.get('date') or None - extra = dict(commitopts.get('extra', old.extra())) - extra['branch'] = head.branch() + date = commitopts.get(b'date') or None + extra = dict(commitopts.get(b'extra', old.extra())) + extra[b'branch'] = head.branch() new = context.memctx(repo, parents=newbases, @@ -242,7 +242,7 @@ date=date, extra=extra) - if commitopts.get('edit'): + if commitopts.get(b'edit'): new._text = cmdutil.commitforceeditor(repo, new, []) revcount = len(repo) newid = repo.commitctx(new)
--- a/hgext3rd/evolve/safeguard.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/safeguard.py Tue Sep 24 12:42:27 2019 +0200 @@ -20,9 +20,9 @@ eh = exthelper.exthelper() # hg <= 4.8 -if 'auto-publish' not in configitems.coreitems.get('experimental', {}): +if b'auto-publish' not in configitems.coreitems.get(b'experimental', {}): - eh.configitem('experimental', 'auto-publish', 'publish') + eh.configitem(b'experimental', b'auto-publish', b'publish') @eh.reposetup def setuppublishprevention(ui, repo): @@ -31,25 +31,25 @@ def checkpush(self, pushop): super(noautopublishrepo, self).checkpush(pushop) - behavior = self.ui.config('experimental', 'auto-publish') - nocheck = behavior not in ('warn', 'abort') + behavior = self.ui.config(b'experimental', b'auto-publish') + nocheck = behavior not in (b'warn', b'abort') if nocheck or getattr(pushop, 'publish', False): return - remotephases = pushop.remote.listkeys('phases') - publishing = remotephases.get('publishing', False) + remotephases = pushop.remote.listkeys(b'phases') + publishing = remotephases.get(b'publishing', False) if publishing: if pushop.revs is None: - published = self.filtered('served').revs("not public()") + published = self.filtered(b'served').revs(b"not public()") else: - published = self.revs("::%ln - public()", pushop.revs) + published = self.revs(b"::%ln - public()", pushop.revs) if published: - if behavior == 'warn': - self.ui.warn(_('%i changesets about to be published\n') + if behavior == b'warn': + self.ui.warn(_(b'%i changesets about to be published\n') % len(published)) - elif behavior == 'abort': - msg = _('push would publish 1 changesets') - hint = _("behavior controlled by " - "'experimental.auto-publish' config") + elif behavior == b'abort': + msg = _(b'push would publish 1 changesets') + hint = _(b"behavior controlled by " + b"'experimental.auto-publish' config") raise error.Abort(msg, hint=hint) repo.__class__ = noautopublishrepo
--- a/hgext3rd/evolve/serveronly.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/serveronly.py Tue Sep 24 12:42:27 2019 +0200 @@ -25,7 +25,7 @@ ) except (ValueError, ImportError) as exc: if (isinstance(exc, ValueError) - and str(exc) != 'Attempted relative import in non-package'): + and str(exc) != b'Attempted relative import in non-package'): raise # extension imported using direct path sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) @@ -54,11 +54,11 @@ @eh.reposetup def default2evolution(ui, repo): - evolveopts = repo.ui.configlist('experimental', 'evolution') + evolveopts = repo.ui.configlist(b'experimental', b'evolution') if not evolveopts: - evolveopts = 'all' - repo.ui.setconfig('experimental', 'evolution', evolveopts) - if obsolete.isenabled(repo, 'exchange'): + evolveopts = b'all' + repo.ui.setconfig(b'experimental', b'evolution', evolveopts) + if obsolete.isenabled(repo, b'exchange'): # if no config explicitly set, disable bundle1 - if not isinstance(repo.ui.config('server', 'bundle1'), bytes): - repo.ui.setconfig('server', 'bundle1', False) + if not isinstance(repo.ui.config(b'server', b'bundle1'), bytes): + repo.ui.setconfig(b'server', b'bundle1', False)
--- a/hgext3rd/evolve/stablerange.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/stablerange.py Tue Sep 24 12:42:27 2019 +0200 @@ -6,6 +6,348 @@ # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +"""stable range + +General Goals and Properties +--------------------------- + +Stable-ranges get useful when some logic needs a recursive way to slice the +history of a repository in smaller and smaller group of revisions. Here is +example of such use cases: + +* **bundle caching:** + With an easy way to slice any subsets of history into stable-ranges, we + can cache a small number of bundles covering these ranges and reuse them for + other pull operations. Even if the pull operation have different boudaries. + +* **metadata discovery:** + With a simple way to recursively look at smaller and smaller ranges, an + algorithm can do fine-grained discovery of area of history where some mutable + metadata differ from one repository to another. Such meta data can be + obsolescence markers, CI status, lightweight stag, etc... + +To fix these use cases best, stable-ranges need some important properties: + +* the total number of ranges needed to cover a full repository is well bounded. +* the minimal number of ranges to cover an arbitrary subset of the history is well bounded +* for the same section of history, the range will be the same on any + repositories, +* the ranges are cheap to compute iteratively, each new revisions re-uses the + ranges previous revisions uses. + +Simple introduction to the Concepts +----------------------------------- + +To keep things simple, let us look at the issue on a linear history:: + + A -> B -> C -> D -> E -> F -> G -> H + +To make sure we have range that cover each part of the history with a good +granularity we use some binary recursion. The standard stable range will be: + + [A -> B -> C -> D -> E -> F -> G -> H] size 8 + [A -> B -> C -> D] [E -> F -> G -> H] size 4 + [A -> B] [C -> D] [E -> F] [G -> H] size 2 + [A] [B] [C] [D] [E] [F] [G] [H] size 1 + +Well bounded total number of ranges: +```````````````````````````````````` + +This binary slicing make sure we keep the total number of stable ranges under control. + +As you can see, we have N size 1 ranges. They are trivial and we don't care +about them. Then we have: N/2 size 2 ranges + N/4 size 4 ranges + N/8 size 8 +ranges, etc... So a total of about "length(repo)" standard ranges. + + +Well bounded number of range to cover a subset: +``````````````````````````````````````````````` + +Any subset of the history can be expressed with this standard ranges. + +For example, [A, F] subset, can be covered with 2 ranges:: + + [A ->[B -> C -> D] [E -> F] + +A less strivial example [B, F], still requires a small number of ranges (3):: + + [B] [C -> D] [E -> F] + +In practice, any subset can be expressed in at most "2 x log2(length(subset))" +stable range, well bounded value. + +Cheap incremental updates +````````````````````````` + +The scheme describe above result in 2N subranges for a repository is size N. We +do not want to have to recompute these 2N stable-ranges whenever a new revision +is added to the repository. To achieve these, the stable-ranges are defined by +**fixed boundaries** that are independant from the total size of the +repository. Here is how it looks like in our example. + +We start with a repository having only [A, F]. Notice how we still creates +power of two sized stable range:: + + [A -> B -> C -> D] + [A -> B] [C -> D] [E -> F] + [A] [B] [C] [D] [E] [F] + +This we simply adds a new revision G, we reuse more the range we already have:: + + [A -> B -> C -> D] + [A -> B] [C -> D] [E -> F] + [A] [B] [C] [D] [E] [F] [G] + +Adding H is a bigger even as we read a new boundary. + + [A -> B -> C -> D -> E -> F -> G -> H] + [A -> B -> C -> D] [E -> F -> G -> H] + [A -> B] [C -> D] [E -> F] [G -> H] + [A] [B] [C] [D] [E] [F] [G] [H] + +At most, adding a new revision `R` will introduces `log2(length(::R))` new +stable ranges. + +More advanced elements +---------------------- + +Of course, the history of repository is not as simple as our linear example. So +how do we deal with the branching and merging? To do so, we leverage the +"stable sort" algorithm defined in the `stablesort.py` module. To define the +stable range that compose a set of revison `::R`, we linearize the space by +sorting it. The stable sort algorithm has two important property: + +First, it give the same result on different repository. So we can use it to +algorithm involving multiple peers. + +Second, in case of merge, it reuse the same order as the parents as much as possible. +This is important to keep reusing existing stable range as the repository grow. + +How are ranges defined? +``````````````````````` + +To keep things small, and simple, a stable range always contains the final part +of a `stablesort(::R)` set of revision. It is defined by two items: + +* its head revision, the R in `stablesort(::R)` +* the size of that range... well almost, for implementation reason, it uses the + index of the first included item. Or in other word, the number of excluded + initial item in `stablesort(::R)`. + +Lets look at a practical case. In our initial example, `[A, B, C, D, E, F, G, +H]` is H-0; `[E, F, G, H]` is H-4; `[G, H]` is H-6 and `[H]` is H-7. + +Let us look at a non linar graph:: + + A - B - C - E + | / + -D + +and assume that `stablesort(::E) = [A, B, C, D, E]`. Then `[A, B, C]` is C-0, +`[A, B, D]` is D-0; `[D, E]` is E-3, `[E]` is E-4, etc... + +Slicing in a non linear context +``````````````````````````````` + +Branching can also affect the way we slice things. + +The small example above offers a simple example. For a size 5 (starting at the +root), standard slicing will want a size 4 part and size 1 part. So, in a +simple linear space `[A, B, C, D, E]` would be sliced as `[A, B, C, D] + [E]`. +However, in our non-linear case, `[A, B, C, D]` has two heads (C and D) and +cannot be expressed with a single range. As a result the range will be sliced +into more sub ranges:: + + stdslice(A-0) = [A, B, C] + [D] + [E] = C-0 + D-2 + A-4 + +Yet, this does not mean ranges containing a merge will always result in slicing +with many revision. the sub ranges might also silently contains them. Let us +look at an exemple:: + + A - B - C - D - E --- G - H + | / + ---------- F + +with:: + + `stablesort(::H) == [A, B, C, D, E, F, G, H]` + +then:: + + stdslice(H-0) = [A, B, C, D] + [E, F, G, H] = D-0 + H-4 + +As a result the non linearity will increase the number of subranges involved, +but in practice the impact stay limited. + +The total number of standard subranges stay under control with about +`O(log2(N))` new stable range introduced for each new revision. In practice the +total number of stableranges we have is about `O(length(repo))` + +In addition, it is worth nothing that the head of the extra ranges we have to +use will match the destination of the "jump" cached by the stablesort +algorithm. So, all this slicing can usually be done without iterating over the +stable sorted revision. + +Caching Strategy +---------------- + +The current caching strategy use a very space inefficient sqlite database. +testing show it often take 100x more space than what basic binary storage would +take. The sqlite storage was very useful at the proof of concept stage. + +Since all new stable-ranges introduced by a revision R will be "R headed". So we +could easily store their standard subranges alongside the revision information +to reuse the existing revision index. + +Subrange informatin can be efficiently stored, a naive approach storing all +stable ranges and their subranges would requires just 2 integer per range + 2 +integer for any extra sub-ranges other than the first and last ones. + +We can probably push efficiency further by taking advantage of the large +overlap in subranges for one non-merge revision to the next. This is probably a +premature optimisation until we start getting actual result for a naive binary +storage. + +To use this at a large scale, it would be important to compute these data at +commit time and to exchange them alongside the revision over the network. This +is similar to what we do for other cached data. + +Performance +----------- + +The current implementation has not been especially optimized for performance. +The goal was mostly to get the order of magnitude of the algorithm complexity. +The result are convincing: medium repository get a full cache warming in a +couple of seconds and even very large and branchy repository get a fully warmed +in the order of tens of minutes. We do not observes a complexity explosion +making the algorithm unusable of large repositories. + +A better cache implementation combined with an optimized version of the +algorithm should give much faster performance. Combined with commit-time +computation and exchange over the network, the overall impact of this should be +invisible to the user. + +The stable range is currently successfully used in production for 2 use cases: +* obsolescence markers discovery, +* caching precomputed bundle while serving pulls + +practical data +-------------- + +The evolve repository: + + number of revisions: 4833 + number of heads: 15 + number of merge: 612 ( 12%) + number of range: 4826 + with 2 subranges: 4551 ( 94%) + with 3 subranges: 255 ( 5%) + with 4 subranges: 12 ( 0%) + with 5 subranges: 7 ( 0%) + with 8 subranges: 1 ( 0%) + average range/revs: 0.99 + + Estimated approximative size of a naive compact storage: + 41 056 bytes + Current size of the sqlite cache (for comparison): + 5 312 512 bytes + +The mercurial repository: + + number of revisions: 42849 + number of heads: 2 + number of merge: 2647 ( 6%) + number of range: 41279 + with 2 subranges: 39740 ( 96%) + with 3 subranges: 1494 ( 3%) + with 4 subranges: 39 ( 0%) + with 5 subranges: 5 ( 0%) + with 7 subranges: 1 ( 0%) + average range/revs: 0.96 + Estimated approximative size of a naive compact storage: + 342 968 bytes + Current size of the sqlite cache (for comparison): + 62 803 968 bytes + +The pypy repository (very brancy history): + + number of revisions: 97409 + number of heads: 183 + number of merge: 8371 ( 8%) + number of range: 107025 + with 2 subranges: 100166 ( 93%) + with 3 subranges: 5839 ( 5%) + with 4 subranges: 605 ( 0%) + with 5 subranges: 189 ( 0%) + with 6 subranges: 90 ( 0%) + with 7 subranges: 38 ( 0%) + with 8 subranges: 18 ( 0%) + with 9 subranges: 9 ( 0%) + with 10 subranges: 15 ( 0%) + with 11 subranges: 4 ( 0%) + with 12 subranges: 6 ( 0%) + with 13 subranges: 7 ( 0%) + with 14 subranges: 6 ( 0%) + with 15 subranges: 1 ( 0%) + with 16 subranges: 2 ( 0%) + with 17 subranges: 2 ( 0%) + with 18 subranges: 3 ( 0%) + with 19 subranges: 2 ( 0%) + with 20 subranges: 3 ( 0%) + with 25 subranges: 1 ( 0%) + with 27 subranges: 1 ( 0%) + with 31 subranges: 3 ( 0%) + with 32 subranges: 2 ( 0%) + with 33 subranges: 1 ( 0%) + with 35 subranges: 1 ( 0%) + with 43 subranges: 1 ( 0%) + with 44 subranges: 1 ( 0%) + with 45 subranges: 2 ( 0%) + with 47 subranges: 1 ( 0%) + with 51 subranges: 1 ( 0%) + with 52 subranges: 1 ( 0%) + with 57 subranges: 1 ( 0%) + with 65 subranges: 1 ( 0%) + with 73 subranges: 1 ( 0%) + with 79 subranges: 1 ( 0%) + average range/revs: 1.10 + Estimated approximative size of a naive compact storage: + 934 176 bytes + Current size of the sqlite cache (for comparison): + 201 236 480 bytes + +A private and branchy repository: + + number of revisions: 605011 + number of heads: 14061 + number of merge: 118109 ( 19%) + number of range: 747625 + with 2 subranges: 595985 ( 79%) + with 3 subranges: 130196 ( 17%) + with 4 subranges: 14093 ( 1%) + with 5 subranges: 4090 ( 0%) + with 6 subranges: 741 ( 0%) + with 7 subranges: 826 ( 0%) + with 8 subranges: 1313 ( 0%) + with 9 subranges: 83 ( 0%) + with 10 subranges: 22 ( 0%) + with 11 subranges: 9 ( 0%) + with 12 subranges: 26 ( 0%) + with 13 subranges: 5 ( 0%) + with 14 subranges: 9 ( 0%) + with 15 subranges: 3 ( 0%) + with 16 subranges: 212 ( 0%) + with 18 subranges: 6 ( 0%) + with 19 subranges: 3 ( 0%) + with 24 subranges: 1 ( 0%) + with 27 subranges: 1 ( 0%) + with 32 subranges: 1 ( 0%) + average range/revs: 1.23 + Estimated approximative size of a naive compact storage: + 7 501 928 bytes + Current size of the sqlite cache (for comparison): + 1 950 310 400 bytes +""" import abc import functools @@ -64,11 +406,11 @@ return ranges _stablerangemethodmap = { - 'branchpoint': lambda repo: stablerange(), - 'default': lambda repo: repo.stablerange, - 'basic-branchpoint': lambda repo: stablerangebasic(), - 'basic-mergepoint': lambda repo: stablerangedummy_mergepoint(), - 'mergepoint': lambda repo: stablerange_mergepoint(), + b'branchpoint': lambda repo: stablerange(), + b'default': lambda repo: repo.stablerange, + b'basic-branchpoint': lambda repo: stablerangebasic(), + b'basic-mergepoint': lambda repo: stablerangedummy_mergepoint(), + b'mergepoint': lambda repo: stablerange_mergepoint(), } @eh.command( @@ -90,9 +432,9 @@ short = nodemod.short revs = scmutil.revrange(repo, opts['rev']) if not revs: - raise error.Abort('no revisions specified') + raise error.Abort(b'no revisions specified') if ui.verbose: - template = '%s-%d (%d, %d, %d)' + template = b'%s-%d (%d, %d, %d)' def _rangestring(repo, rangeid): return template % ( @@ -103,7 +445,7 @@ length(unfi, rangeid) ) else: - template = '%s-%d' + template = b'%s-%d' def _rangestring(repo, rangeid): return template % ( @@ -117,7 +459,7 @@ method = opts['method'] getstablerange = _stablerangemethodmap.get(method) if getstablerange is None: - raise error.Abort('unknown stable sort method: "%s"' % method) + raise error.Abort(b'unknown stable sort method: "%s"' % method) stablerange = getstablerange(unfi) depth = stablerange.depthrev @@ -132,21 +474,21 @@ for r in ranges: subs = subranges(unfi, r) - subsstr = ', '.join(_rangestring(unfi, s) for s in subs) + subsstr = b', '.join(_rangestring(unfi, s) for s in subs) rstr = _rangestring(unfi, r) if opts['verify']: - status = 'leaf' + status = b'leaf' if 1 < length(unfi, r): - status = 'complete' + status = b'complete' revs = set(stablerange.revsfromrange(unfi, r)) subrevs = set() for s in subs: subrevs.update(stablerange.revsfromrange(unfi, s)) if revs != subrevs: - status = 'missing' - ui.status('%s [%s] - %s\n' % (rstr, status, subsstr)) + status = b'missing' + ui.status(b'%s [%s] - %s\n' % (rstr, status, subsstr)) else: - ui.status('%s - %s\n' % (rstr, subsstr)) + ui.status(b'%s - %s\n' % (rstr, subsstr)) class abstractstablerange(object): """The official API for a stablerange""" @@ -214,7 +556,7 @@ def depthrev(self, repo, rev): """depth a revision""" - return len(repo.revs('::%d', rev)) + return len(repo.revs(b'::%d', rev)) def revsfromrange(self, repo, rangeid): """return revision contained in a range @@ -620,12 +962,12 @@ rangeheap = [] for idx, r in enumerate(revs): if not idx % 1000: - compat.progress(ui, _("filling depth cache"), idx, total=nbrevs, - unit=_("changesets")) + compat.progress(ui, _(b"filling depth cache"), idx, total=nbrevs, + unit=_(b"changesets")) # warm up depth self.depthrev(repo, r) rangeheap.append((-r, (r, 0))) - compat.progress(ui, _("filling depth cache"), None, total=nbrevs) + compat.progress(ui, _(b"filling depth cache"), None, total=nbrevs) heappop = heapq.heappop heappush = heapq.heappush @@ -646,8 +988,8 @@ progress_new = time.time() if (1 < progress_each) and (0.1 < progress_new - progress_last): progress_each /= 10 - compat.progress(ui, _("filling stablerange cache"), seen, - total=nbrevs, unit=_("changesets")) + compat.progress(ui, _(b"filling stablerange cache"), seen, + total=nbrevs, unit=_(b"changesets")) progress_last = progress_new seen += 1 original.remove(value) # might have been added from other source @@ -656,13 +998,13 @@ for sub in self.subranges(repo, rangeid): if self._getsub(sub) is None: heappush(rangeheap, (-sub[0], sub)) - compat.progress(ui, _("filling stablerange cache"), None, total=nbrevs) + compat.progress(ui, _(b"filling stablerange cache"), None, total=nbrevs) self._tiprev = upto self._tipnode = cl.node(upto) duration = util.timer() - starttime - repo.ui.log('evoext-cache', 'updated stablerange cache in %.4f seconds\n', + repo.ui.log(b'evoext-cache', b'updated stablerange cache in %.4f seconds\n', duration) def subranges(self, repo, rangeid):
--- a/hgext3rd/evolve/stablerangecache.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/stablerangecache.py Tue Sep 24 12:42:27 2019 +0200 @@ -14,6 +14,7 @@ import time from mercurial import ( + commands, encoding, error, localrepo, @@ -35,7 +36,7 @@ LONG_WARNING_TIME = 60 -LONG_MESSAGE = """Stable range cache is taking a while to load +LONG_MESSAGE = b"""Stable range cache is taking a while to load Your repository is probably big. @@ -99,8 +100,8 @@ warned_long = True if (1 < progress_each) and (0.1 < progress_new - progress_last): progress_each /= 10 - compat.progress(ui, _("filling stablerange cache"), seen, - total=total, unit=_("changesets")) + compat.progress(ui, _(b"filling stablerange cache"), seen, + total=total, unit=_(b"changesets")) progress_last = progress_new seen += 1 original.remove(rangeid) # might have been added from other source @@ -109,7 +110,7 @@ for sub in self.subranges(repo, rangeid): if self._getsub(sub) is None: heappush(rangeheap, sub) - compat.progress(ui, _("filling stablerange cache"), None, total=total) + compat.progress(ui, _(b"filling stablerange cache"), None, total=total) def clear(self, reset=False): super(stablerangeondiskbase, self).clear() @@ -120,10 +121,10 @@ ############################# _sqliteschema = [ - """CREATE TABLE range(rev INTEGER NOT NULL, + r"""CREATE TABLE range(rev INTEGER NOT NULL, idx INTEGER NOT NULL, PRIMARY KEY(rev, idx));""", - """CREATE TABLE subranges(listidx INTEGER NOT NULL, + r"""CREATE TABLE subranges(listidx INTEGER NOT NULL, suprev INTEGER NOT NULL, supidx INTEGER NOT NULL, subrev INTEGER NOT NULL, @@ -132,37 +133,37 @@ FOREIGN KEY (suprev, supidx) REFERENCES range(rev, idx), FOREIGN KEY (subrev, subidx) REFERENCES range(rev, idx) );""", - "CREATE INDEX subranges_index ON subranges (suprev, supidx);", - "CREATE INDEX superranges_index ON subranges (subrev, subidx);", - "CREATE INDEX range_index ON range (rev, idx);", - """CREATE TABLE meta(schemaversion INTEGER NOT NULL, + r"CREATE INDEX subranges_index ON subranges (suprev, supidx);", + r"CREATE INDEX superranges_index ON subranges (subrev, subidx);", + r"CREATE INDEX range_index ON range (rev, idx);", + r"""CREATE TABLE meta(schemaversion INTEGER NOT NULL, tiprev INTEGER NOT NULL, tipnode BLOB NOT NULL );""", ] -_newmeta = "INSERT INTO meta (schemaversion, tiprev, tipnode) VALUES (?,?,?);" -_updatemeta = "UPDATE meta SET tiprev = ?, tipnode = ?;" -_updaterange = "INSERT INTO range(rev, idx) VALUES (?,?);" -_updatesubranges = """INSERT +_newmeta = r"INSERT INTO meta (schemaversion, tiprev, tipnode) VALUES (?,?,?);" +_updatemeta = r"UPDATE meta SET tiprev = ?, tipnode = ?;" +_updaterange = r"INSERT INTO range(rev, idx) VALUES (?,?);" +_updatesubranges = r"""INSERT INTO subranges(listidx, suprev, supidx, subrev, subidx) VALUES (?,?,?,?,?);""" -_queryexist = "SELECT name FROM sqlite_master WHERE type='table' AND name='meta';" -_querymeta = "SELECT schemaversion, tiprev, tipnode FROM meta;" -_queryrange = "SELECT * FROM range WHERE (rev = ? AND idx = ?);" -_querysubranges = """SELECT subrev, subidx +_queryexist = r"SELECT name FROM sqlite_master WHERE type='table' AND name='meta';" +_querymeta = r"SELECT schemaversion, tiprev, tipnode FROM meta;" +_queryrange = r"SELECT * FROM range WHERE (rev = ? AND idx = ?);" +_querysubranges = r"""SELECT subrev, subidx FROM subranges WHERE (suprev = ? AND supidx = ?) ORDER BY listidx;""" -_querysuperrangesmain = """SELECT DISTINCT suprev, supidx +_querysuperrangesmain = r"""SELECT DISTINCT suprev, supidx FROM subranges WHERE %s;""" -_querysuperrangesbody = '(subrev = %d and subidx = %d)' +_querysuperrangesbody = r'(subrev = %d and subidx = %d)' def _make_querysuperranges(ranges): # building a tree of OR would allow for more ranges - body = ' OR '.join(_querysuperrangesbody % r for r in ranges) + body = r' OR '.join(_querysuperrangesbody % r for r in ranges) return _querysuperrangesmain % body class stablerangesqlbase(stablerange.stablerangecached): @@ -223,7 +224,7 @@ except (sqlite3.DatabaseError, sqlite3.OperationalError): # something is wrong with the sqlite db # Since this is a cache, we ignore it. - if '_con' in vars(self): + if r'_con' in vars(self): del self._con self._unsavedsubranges.clear() @@ -240,7 +241,7 @@ except OSError: return None con = sqlite3.connect(encoding.strfromlocal(self._path), timeout=30, - isolation_level="IMMEDIATE") + isolation_level=r"IMMEDIATE") con.text_factory = bytes return con @@ -278,11 +279,11 @@ # # operational error catch read-only and locked database # IntegrityError catch Unique constraint error that may arise - if '_con' in vars(self): + if r'_con' in vars(self): del self._con self._unsavedsubranges.clear() - repo.ui.log('evoext-cache', 'error while saving new data: %s' % exc) - repo.ui.debug('evoext-cache: error while saving new data: %s' % exc) + repo.ui.log(b'evoext-cache', b'error while saving new data: %s' % exc) + repo.ui.debug(b'evoext-cache: error while saving new data: %s' % exc) def _trysave(self, repo): repo = repo.unfiltered() @@ -294,7 +295,7 @@ if self._con is None: util.unlinkpath(self._path, ignoremissing=True) - if '_con' in vars(self): + if r'_con' in vars(self): del self._con con = self._db() @@ -319,9 +320,9 @@ # drifting is currently an issue because this means another # process might have already added the cache line we are about # to add. This will confuse sqlite - msg = _('stable-range cache: skipping write, ' - 'database drifted under my feet\n') - hint = _('(disk: %s-%s vs mem: %s-%s)\n') + msg = _(b'stable-range cache: skipping write, ' + b'database drifted under my feet\n') + hint = _(b'(disk: %s-%s vs mem: %s-%s)\n') data = (nodemod.hex(meta[2]), meta[1], nodemod.hex(self._ondisktipnode), self._ondisktiprev) repo.ui.warn(msg) @@ -375,7 +376,7 @@ def clear(self, reset=False): super(stablerangesql, self).clear(reset=reset) - if '_con' in vars(self): + if r'_con' in vars(self): del self._con self._subrangescache.clear() @@ -396,13 +397,13 @@ class mergepointsql(stablerangesql, stablerange.stablerange_mergepoint): _schemaversion = 3 - _cachefile = 'evoext_stablerange_v2.sqlite' - _cachename = 'evo-ext-stablerange-mergepoint' + _cachefile = b'evoext_stablerange_v2.sqlite' + _cachename = b'evo-ext-stablerange-mergepoint' class sqlstablerange(stablerangesqlbase, stablerange.stablerange): _schemaversion = 1 - _cachefile = 'evoext_stablerange_v1.sqlite' + _cachefile = b'evoext_stablerange_v1.sqlite' def warmup(self, repo, upto=None): self._con # make sure the data base is loaded @@ -419,10 +420,81 @@ except error.LockError: # Exceptionnally we are noisy about it since performance impact is # large We should address that before using this more widely. - repo.ui.warn('stable-range cache: unable to lock repo while warming\n') - repo.ui.warn('(cache will not be saved)\n') + repo.ui.warn(b'stable-range cache: unable to lock repo while warming\n') + repo.ui.warn(b'(cache will not be saved)\n') super(sqlstablerange, self).warmup(repo, upto) +@eh.command( + b'debugstablerangecache', + [] + commands.formatteropts, + _(b'')) +def debugstablerangecache(ui, repo, **opts): + """display data about the stable sort cache of a repository + """ + unfi = repo.unfiltered() + revs = unfi.revs('all()') + nbrevs = len(revs) + ui.write('number of revisions: %12d\n' % nbrevs) + heads = unfi.revs('heads(all())') + nbheads = len(heads) + ui.write('number of heads: %12d\n' % nbheads) + merge = unfi.revs('merge()') + nbmerge = len(merge) + ui.write('number of merge: %12d (%3d%%)\n' + % (nbmerge, 100 * nbmerge / nbrevs)) + cache = unfi.stablerange + allsubranges = stablerange.subrangesclosure(unfi, cache, heads) + nbsubranges = len(allsubranges) - nbrevs # we remove leafs + ui.write('number of range: %12d\n' % nbsubranges) + import collections + subsizedistrib = collections.defaultdict(lambda: 0) + + def smallsize(r): + # This is computing the size it would take to store a range for a + # revision + # + # one int for the initial/top skip + # two int per middle ranges + # one int for the revision of the bottom part + return 4 * (2 + ((len(r) - 2) * 2)) + + totalsize = 0 + + allmiddleranges = [] + for s in allsubranges: + sr = cache.subranges(repo, s) + srl = len(sr) + if srl == 0: + # leaf range are not interresting + continue + subsizedistrib[srl] += 1 + allmiddleranges.append(s) + totalsize += smallsize(sr) + + for ss in sorted(subsizedistrib): + ssc = subsizedistrib[ss] + ssp = ssc * 100 // nbsubranges + ui.write(' with %3d subranges: %12d (%3d%%)\n' % (ss, ssc, ssp)) + + depth = repo.depthcache.get + stdslice = 0 + oddslice = 0 + + for s in allmiddleranges: + head, skip = s + d = depth(head) + k = d - 1 + if (skip & k) == skip: + stdslice += 1 + else: + oddslice += 1 + + ui.write('standard slice point cut: %12d (%3d%%)\n' + % (stdslice, stdslice * 100 // nbsubranges)) + ui.write('other slice point cut: %12d (%3d%%)\n' + % (oddslice, oddslice * 100 // nbsubranges)) + ui.write('est. naive compact store: %12d bytes\n' % totalsize) + @eh.reposetup def setupcache(ui, repo): @@ -436,7 +508,7 @@ @localrepo.unfilteredmethod def destroyed(self): - if 'stablerange' in vars(self): + if r'stablerange' in vars(self): self.stablerange.clear() del self.stablerange super(stablerangerepo, self).destroyed()
--- a/hgext3rd/evolve/stablesort.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/stablesort.py Tue Sep 24 12:42:27 2019 +0200 @@ -7,6 +7,133 @@ # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. +"""Stable sorting for the mercurial graph + +The goal is to provided an efficient, revnum independant way, to sort revisions +in a topologicaly. Having it independant from revnum is important to make it +stable from one repository to another, unlocking various capabilities. For +example it can be used for discovery purposes. + +This docstring describe the currently preferred solution: + +Basic principle: +---------------- + +We are always talking about set of revision defined by a single heads +(eg: `stablesort(::r)`) + +For non merge revisions, the definition is simple:: + + stablesort(::r) == stablesort(p1(r)) + r + +For merge revision, we reuse as much as possible of the parents order: + + pl = stablemin(parents(m)) + ph = stablemax(parents(m)) + stablesort(::m) == stablesort(pl) + + [i for in in stablesort(ph) if in ph % pl] + + m + +The `ph % pl` set of revision is called the "exclusive part". In this area we +try to reuse as much as the stable-sorted order for `ph`. In simple case, the +`[i for i in stablesort(ph) if i in ph % pl]` is just the contiguous final range of +`stablesort(ph)`. However in more advance case, this will not be contiguous and +we'll need to skip over multiple parts of `stablesort(ph)` to cover `ph % pl`. + +Another important details is that, in practice, the sorted revision are always +walked backward, from the head of the set of revisions. + +preexisting cached data +----------------------- + +The stable sort assume we already have 2 important property cached for each +changesets: + +1) changeset depth == len(::r) +2) first merge == max(merge() and ::r) + +Caching strategy +---------------- + +Since we always walk from the head, the iteration mostly have to follow the +unique parent of non merge revision. For merge revision, we need to iterate +through one of the parent before coming back to the other parent eventually. + +To effiently cache the path we need to walk, we records "jumps". A jump is a +point where the next revision will not be a parent of the current revision, but +another point in the graph. This correspond to point were we need to "come back +to the other parent". + +Jumps are recorded using the following formats: + + (jump-point, jump-destination, section-size) + +* jump-point is the last revision number we should iterate over before jumping, +* jump-destination is the next revision we should iterate over after the jump point, +* section-size is the number of revision to be iterated before reaching jump-point. + +the section-size is not directly used when doing a stable-sorted walk. However +it is useful for higher level piece of code to take decision without having to +actually walk the graph, (see stable range documentation). + +For each merge, we store the set of jumps that cover the exclusive side. + +Practical data +-------------- + +The mercurial repository has simple branching and few jumps: + + number of revisions: 69771 + number of merge: 2734 + number of jumps: 2950 + average jumps: 1.079 + median jumps: 1 + 90% jumps: 1 + 99% jumps: 3 + max jumps: 6 + jump cache size: 35 400 bytes + +Mozilla's branching is fairly simple too: + + number of revisions: 435078 + number of merge: 21035 + number of jumps: 31434 + average jumps: 1.494 + median jumps: 1 + 90% jumps: 2 + 99% jumps: 9 + max jumps: 169 + jump cache size: 377 208 bytes + +Pypy has a more complicated branching history but jumps cache remains reasonable + + number of revisions: 95010 + number of merge: 7911 + number of jumps: 24326 + average jumps: 3.075 + median jumps: 1 + 90% jumps: 5 + 99% jumps: 40 + max jumps: 329 + jump cache size: 291 912 bytes + +This still apply to larger private project: + + number of revisions: 605011 + number of merge: 118109 + number of jumps: 314925 + average jumps: 2.667 + median jumps: 1 + 90% jumps: 3 + 99% jumps: 34 + max jumps: 660 + jump cache size: 3 779 100 bytes + +It is worth noting that the last jump could be computed form other information, +removing one jump storage per merge. However this does not seems to be an issue +worth the troubles for now. +""" + import array import collections import struct @@ -69,9 +196,9 @@ method = opts['method'] sorting = _methodmap.get(method) if sorting is None: - valid_method = ', '.join(sorted(_methodmap)) - raise error.Abort('unknown sorting method: "%s"' % method, - hint='pick one of: %s' % valid_method) + valid_method = b', '.join(sorted(_methodmap)) + raise error.Abort(b'unknown sorting method: "%s"' % method, + hint=b'pick one of: %s' % valid_method) displayer = compat.changesetdisplayer(ui, repo, pycompat.byteskwargs(opts), buffered=True) @@ -84,6 +211,46 @@ displayer.flush(ctx) displayer.close() +@eh.command( + b'debugstablesortcache', + [] + commands.formatteropts, + _(b'')) +def debugstablesortcache(ui, repo, **opts): + """display data about the stable sort cache of a repository + """ + unfi = repo.unfiltered() + revs = unfi.revs('all()') + nbrevs = len(revs) + ui.write('number of revisions: %12d\n' % nbrevs) + merge = unfi.revs('merge()') + nbmerge = len(merge) + cache = unfi.stablesort + ui.write('number of merge: %12d\n' % nbmerge) + alljumps = [] + alljumpssize = [] + for r in merge: + jumps = cache.getjumps(unfi, r) + if jumps is None: + continue # not a merge + jumps = list(jumps) + alljumps.append(jumps) + alljumpssize.append(len(jumps)) + nbjumps = sum(alljumpssize) + ui.write('number of jumps: %12d\n' % nbjumps) + if not nbjumps: + return 0 + avgjumps = nbjumps / float(len(alljumpssize)) + ui.write('average jumps: %6.3f\n' % avgjumps) + alljumpssize.sort() + medianjumps = alljumpssize[len(alljumpssize) // 2] + ui.write('median jumps: %12d\n' % medianjumps) + tensjumps = alljumpssize[len(alljumpssize) * 9 // 10] + ui.write('90%% jumps: %12d\n' % tensjumps) + centsjumps = alljumpssize[len(alljumpssize) * 99 // 100] + ui.write('99%% jumps: %12d\n' % centsjumps) + ui.write('max jumps: %12d\n' % max(alljumpssize)) + ui.write('jump cache size: %12d bytes\n' % (nbjumps * 12)) + def stablesort_branchpoint(repo, revs, mergecallback=None): """return '::revs' topologically sorted in "stable" order @@ -180,7 +347,7 @@ heads = list(sorted(revs)) else: # keeps heads only - heads = sorted(repo.revs('sort(heads(%ld::%ld))', revs, revs), key=tiebreaker) + heads = sorted(repo.revs(b'sort(heads(%ld::%ld))', revs, revs), key=tiebreaker) results = [] while heads: @@ -246,24 +413,24 @@ return result def stablesort_mergepoint_head_basic(repo, revs, limit=None): - heads = repo.revs('sort(heads(%ld))', revs) + heads = repo.revs(b'sort(heads(%ld))', revs) if not heads: return [] elif 2 < len(heads): - raise error.Abort('cannot use head based merging, %d heads found' + raise error.Abort(b'cannot use head based merging, %d heads found' % len(heads)) head = heads.first() - revs = stablesort_mergepoint_bounded(repo, head, repo.revs('::%d', head)) + revs = stablesort_mergepoint_bounded(repo, head, repo.revs(b'::%d', head)) if limit is None: return revs return revs[-limit:] def stablesort_mergepoint_head_debug(repo, revs, limit=None): - heads = repo.revs('sort(heads(%ld))', revs) + heads = repo.revs(b'sort(heads(%ld))', revs) if not heads: return [] elif 2 < len(heads): - raise error.Abort('cannot use head based merging, %d heads found' + raise error.Abort(b'cannot use head based merging, %d heads found' % len(heads)) head = heads.first() revs = stablesort_mergepoint_head(repo, head) @@ -294,7 +461,7 @@ ps = sorted(ps, key=tiebreaker) # get the part from the highest parent. This is the part that changes - mid_revs = repo.revs('only(%d, %d)', ps[1], ps[0]) + mid_revs = repo.revs(b'only(%d, %d)', ps[1], ps[0]) if mid_revs: mid = stablesort_mergepoint_bounded(repo, ps[1], mid_revs) @@ -304,20 +471,20 @@ return bottom + mid + top def stablesort_mergepoint_head_cached(repo, revs, limit=None): - heads = repo.revs('sort(heads(%ld))', revs) + heads = repo.revs(b'sort(heads(%ld))', revs) if not heads: return [] elif 2 < len(heads): - raise error.Abort('cannot use head based merging, %d heads found' + raise error.Abort(b'cannot use head based merging, %d heads found' % len(heads)) head = heads.first() cache = stablesortcache() first = list(cache.get(repo, head, limit=limit)) second = list(cache.get(repo, head, limit=limit)) if first != second: - repo.ui.warn('stablesort-cache: initial run different from re-run:\n' - ' %s\n' - ' %s\n' % (first, second)) + repo.ui.warn(b'stablesort-cache: initial run different from re-run:\n' + b' %s\n' + b' %s\n' % (first, second)) return second class stablesortcache(object): @@ -504,11 +671,11 @@ recordjump(previous, lower, size) def stablesort_mergepoint_head_ondisk(repo, revs, limit=None): - heads = repo.revs('sort(heads(%ld))', revs) + heads = repo.revs(b'sort(heads(%ld))', revs) if not heads: return [] elif 2 < len(heads): - raise error.Abort('cannot use head based merging, %d heads found' + raise error.Abort(b'cannot use head based merging, %d heads found' % len(heads)) head = heads.first() unfi = repo.unfiltered() @@ -516,22 +683,22 @@ cache.save(unfi) return cache.get(repo, head, limit=limit) -S_INDEXSIZE = struct.Struct('>I') +S_INDEXSIZE = struct.Struct(b'>I') class ondiskstablesortcache(stablesortcache, genericcaches.changelogsourcebase): - _filepath = 'evoext-stablesortcache-00' - _cachename = 'evo-ext-stablesort' + _filepath = b'evoext-stablesortcache-00' + _cachename = b'evo-ext-stablesort' def __init__(self): super(ondiskstablesortcache, self).__init__() - self._index = array.array('l') - self._data = array.array('l') + self._index = array.array(r'l') + self._data = array.array(r'l') del self._jumps def getjumps(self, repo, rev): if len(self._index) < rev: - msg = 'stablesortcache must be warmed before use (%d < %d)' + msg = b'stablesortcache must be warmed before use (%d < %d)' msg %= (len(self._index), rev) raise error.ProgrammingError(msg) return self._getjumps(rev) @@ -579,10 +746,11 @@ total = len(data) def progress(pos, rev=None): - revstr = '' if rev is None else ('rev %d' % rev) - compat.progress(repo.ui, 'updating stablesort cache', - pos, revstr, unit='revision', total=total) + revstr = b'' if rev is None else (b'rev %d' % rev) + compat.progress(repo.ui, b'updating stablesort cache', + pos, revstr, unit=b'revision', total=total) + progress(0) for idx, rev in enumerate(data): parents = filterparents(repo.changelog.parentrevs(rev)) if len(parents) <= 1: @@ -600,8 +768,8 @@ def clear(self, reset=False): super(ondiskstablesortcache, self).clear() - self._index = array.array('l') - self._data = array.array('l') + self._index = array.array(r'l') + self._data = array.array(r'l') def load(self, repo): """load data from disk @@ -611,8 +779,8 @@ assert repo.filtername is None data = repo.cachevfs.tryread(self._filepath) - self._index = array.array('l') - self._data = array.array('l') + self._index = array.array(r'l') + self._data = array.array(r'l') if not data: self._cachekey = self.emptykey else: @@ -636,7 +804,7 @@ if self._cachekey is None or self._cachekey == self._ondiskkey: return try: - cachefile = repo.cachevfs(self._filepath, 'w', atomictemp=True) + cachefile = repo.cachevfs(self._filepath, b'w', atomictemp=True) # data to write headerdata = self._serializecachekey() @@ -652,8 +820,8 @@ cachefile.close() self._ondiskkey = self._cachekey except (IOError, OSError) as exc: - repo.ui.log('stablesortcache', 'could not write update %s\n' % exc) - repo.ui.debug('stablesortcache: could not write update %s\n' % exc) + repo.ui.log(b'stablesortcache', b'could not write update %s\n' % exc) + repo.ui.debug(b'stablesortcache: could not write update %s\n' % exc) @eh.reposetup def setupcache(ui, repo): @@ -668,7 +836,7 @@ @localrepo.unfilteredmethod def destroyed(self): - if 'stablesort' in vars(self): + if r'stablesort' in vars(self): self.stablesort.clear() super(stablesortrepo, self).destroyed() @@ -682,12 +850,12 @@ repo.__class__ = stablesortrepo _methodmap = { - 'branchpoint': stablesort_branchpoint, - 'basic-mergepoint': stablesort_mergepoint_multirevs, - 'basic-headstart': stablesort_mergepoint_head_basic, - 'headstart': stablesort_mergepoint_head_debug, - 'headcached': stablesort_mergepoint_head_cached, - 'headondisk': stablesort_mergepoint_head_ondisk, + b'branchpoint': stablesort_branchpoint, + b'basic-mergepoint': stablesort_mergepoint_multirevs, + b'basic-headstart': stablesort_mergepoint_head_basic, + b'headstart': stablesort_mergepoint_head_debug, + b'headcached': stablesort_mergepoint_head_cached, + b'headondisk': stablesort_mergepoint_head_ondisk, } # merge last so that repo setup wrap after that one.
--- a/hgext3rd/evolve/state.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/state.py Tue Sep 24 12:42:27 2019 +0200 @@ -37,7 +37,7 @@ can populate the object data reading that file """ - def __init__(self, repo, path='evolvestate', opts={}): + def __init__(self, repo, path=b'evolvestate', opts={}): self._repo = repo self.path = path self.opts = opts @@ -65,7 +65,7 @@ op = self._read() if isinstance(op, dict): self.opts.update(op) - elif self.path == 'evolvestate': + elif self.path == b'evolvestate': # it is the old evolvestate file oldop = _oldevolvestateread(self._repo) self.opts.update(oldop) @@ -79,13 +79,13 @@ we use third-party library cbor to serialize data to write in the file. """ - with self._repo.vfs(self.path, 'wb', atomictemp=True) as fp: + with self._repo.vfs(self.path, b'wb', atomictemp=True) as fp: cbor.dump(self.opts, fp) def _read(self): """reads the evolvestate file and returns a dictionary which contain data in the same format as it was before storing""" - with self._repo.vfs(self.path, 'rb') as fp: + with self._repo.vfs(self.path, b'rb') as fp: return cbor.load(fp) def delete(self): @@ -101,20 +101,20 @@ This exists for BC reasons.""" try: - f = repo.vfs('evolvestate') + f = repo.vfs(b'evolvestate') except IOError as err: if err.errno != errno.ENOENT: raise try: versionblob = f.read(4) if len(versionblob) < 4: - repo.ui.debug('ignoring corrupted evolvestate (file contains %i bits)' + repo.ui.debug(b'ignoring corrupted evolvestate (file contains %i bits)' % len(versionblob)) return None - version = struct._unpack('>I', versionblob)[0] + version = struct._unpack(b'>I', versionblob)[0] if version != 0: - msg = _('unknown evolvestate version %i') % version - raise error.Abort(msg, hint=_('upgrade your evolve')) + msg = _(b'unknown evolvestate version %i') % version + raise error.Abort(msg, hint=_(b'upgrade your evolve')) records = [] data = f.read() off = 0 @@ -122,22 +122,22 @@ while off < end: rtype = data[off] off += 1 - length = struct._unpack('>I', data[off:(off + 4)])[0] + length = struct._unpack(b'>I', data[off:(off + 4)])[0] off += 4 record = data[off:(off + length)] off += length - if rtype == 't': + if rtype == b't': rtype, record = record[0], record[1:] records.append((rtype, record)) state = {} for rtype, rdata in records: - if rtype == 'C': - state['current'] = rdata + if rtype == b'C': + state[b'current'] = rdata elif rtype.lower(): - repo.ui.debug('ignore evolve state record type %s' % rtype) + repo.ui.debug(b'ignore evolve state record type %s' % rtype) else: - raise error.Abort(_("unknown evolvestate field type '%s'") - % rtype, hint=_('upgrade your evolve')) + raise error.Abort(_(b"unknown evolvestate field type '%s'") + % rtype, hint=_(b'upgrade your evolve')) return state finally: f.close()
--- a/hgext3rd/evolve/templatekw.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/templatekw.py Tue Sep 24 12:42:27 2019 +0200 @@ -28,31 +28,31 @@ def showinstabilities(context, mapping): """List of strings. Evolution instabilities affecting the changeset (zero or more of "orphan", "content-divergent" or "phase-divergent").""" - ctx = context.resource(mapping, 'ctx') - return templatekw.compatlist(context, mapping, 'instability', + ctx = context.resource(mapping, b'ctx') + return templatekw.compatlist(context, mapping, b'instability', ctx.instabilities(), - plural='instabilities') + plural=b'instabilities') @eh.templatekeyword(b'troubles', requires=set([b'ctx', b'templ'])) def showtroubles(context, mapping): # legacy name for instabilities - ctx = context.resource(mapping, 'ctx') - return templatekw.compatlist(context, mapping, 'trouble', - ctx.instabilities(), plural='troubles') + ctx = context.resource(mapping, b'ctx') + return templatekw.compatlist(context, mapping, b'trouble', + ctx.instabilities(), plural=b'troubles') else: # older template API in hg < 4.6 @eh.templatekeyword(b'instabilities') def showinstabilities(**args): """List of strings. Evolution instabilities affecting the changeset (zero or more of "orphan", "content-divergent" or "phase-divergent").""" - ctx = args['ctx'] - return templatekw.showlist('instability', ctx.instabilities(), args, - plural='instabilities') + ctx = args[b'ctx'] + return templatekw.showlist(b'instability', ctx.instabilities(), args, + plural=b'instabilities') @eh.templatekeyword(b'troubles') def showtroubles(**args): - ctx = args['ctx'] - return templatekw.showlist('trouble', ctx.instabilities(), args, - plural='troubles') + ctx = args[b'ctx'] + return templatekw.showlist(b'trouble', ctx.instabilities(), args, + plural=b'troubles') _sp = templatekw.showpredecessors if util.safehasattr(_sp, '_requires'): @@ -91,24 +91,24 @@ """ Returns a dict with the default templates for obs fate """ # Prepare templates - verbtempl = '{verb}' - usertempl = '{if(users, " by {join(users, ", ")}")}' - succtempl = '{if(successors, " as ")}{successors}' # Bypass if limitation - datetempleq = ' (at {min_date|isodate})' - datetemplnoteq = ' (between {min_date|isodate} and {max_date|isodate})' - datetempl = '{if(max_date, "{ifeq(min_date, max_date, "%s", "%s")}")}' % (datetempleq, datetemplnoteq) + verbtempl = b'{verb}' + usertempl = b'{if(users, " by {join(users, ", ")}")}' + succtempl = b'{if(successors, " as ")}{successors}' # Bypass if limitation + datetempleq = b' (at {min_date|isodate})' + datetemplnoteq = b' (between {min_date|isodate} and {max_date|isodate})' + datetempl = b'{if(max_date, "{ifeq(min_date, max_date, "%s", "%s")}")}' % (datetempleq, datetemplnoteq) optionalusertempl = usertempl username = _getusername(ui) if username is not None: - optionalusertempl = ('{ifeq(join(users, "\0"), "%s", "", "%s")}' + optionalusertempl = (b'{ifeq(join(users, "\0"), "%s", "", "%s")}' % (username, usertempl)) # Assemble them return { - 'obsfate_quiet': verbtempl + succtempl, - 'obsfate': verbtempl + succtempl + optionalusertempl, - 'obsfate_verbose': verbtempl + succtempl + usertempl + datetempl, + b'obsfate_quiet': verbtempl + succtempl, + b'obsfate': verbtempl + succtempl + optionalusertempl, + b'obsfate_verbose': verbtempl + succtempl + usertempl + datetempl, } def obsfatedata(repo, ctx): @@ -158,18 +158,18 @@ line = [] # Verb - line.append(obsfateline['verb']) + line.append(obsfateline[b'verb']) # Successors - successors = obsfateline["successors"] + successors = obsfateline[b"successors"] if successors: fmtsuccessors = map(lambda s: s[:12], successors) - line.append(" as %s" % ", ".join(fmtsuccessors)) + line.append(b" as %s" % b", ".join(fmtsuccessors)) # Users - if (verbose or normal) and 'users' in obsfateline: - users = obsfateline['users'] + if (verbose or normal) and b'users' in obsfateline: + users = obsfateline[b'users'] if not verbose: # If current user is the only user, do not show anything if not in @@ -179,24 +179,24 @@ users = None if users: - line.append(" by %s" % ", ".join(users)) + line.append(b" by %s" % b", ".join(users)) # Date if verbose: - min_date = obsfateline['min_date'] - max_date = obsfateline['max_date'] + min_date = obsfateline[b'min_date'] + max_date = obsfateline[b'max_date'] if min_date == max_date: - fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2') - line.append(" (at %s)" % fmtmin_date) + fmtmin_date = util.datestr(min_date, b'%Y-%m-%d %H:%M %1%2') + line.append(b" (at %s)" % fmtmin_date) else: - fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2') - fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2') - line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date)) + fmtmin_date = util.datestr(min_date, b'%Y-%m-%d %H:%M %1%2') + fmtmax_date = util.datestr(max_date, b'%Y-%m-%d %H:%M %1%2') + line.append(b" (between %s and %s)" % (fmtmin_date, fmtmax_date)) - return "".join(line) + return b"".join(line) -def obsfateprinter(obsfate, ui, prefix=""): +def obsfateprinter(obsfate, ui, prefix=b""): lines = [] for raw in obsfate: lines.append(obsfatelineprinter(raw, ui)) @@ -204,7 +204,7 @@ if prefix: lines = [prefix + line for line in lines] - return "\n".join(lines) + return b"\n".join(lines) if not util.safehasattr(templatekw, 'obsfateverb'): # <= hg-4.5 @eh.templatekeyword(b"obsfatedata") @@ -213,7 +213,7 @@ values = obsfatedata(repo, ctx) if values is None: - return templatekw.showlist("obsfatedata", [], args) + return templatekw.showlist(b"obsfatedata", [], args) return _showobsfatedata(repo, ctx, values, **args) @@ -224,36 +224,36 @@ # As we can't do something like # "{join(map(nodeshort, successors), ', '}" in template, manually # create a correct textual representation - gen = ', '.join(n[:12] for n in raw['successors']) + gen = b', '.join(n[:12] for n in raw[b'successors']) - makemap = lambda x: {'successor': x} - joinfmt = lambda d: "%s" % d['successor'] - raw['successors'] = templatekw._hybrid(gen, raw['successors'], makemap, - joinfmt) + makemap = lambda x: {b'successor': x} + joinfmt = lambda d: b"%s" % d[b'successor'] + raw[b'successors'] = templatekw._hybrid(gen, raw[b'successors'], makemap, + joinfmt) # And then format them # Insert default obsfate templates - args['templ'].cache.update(obsfatedefaulttempl(repo.ui)) + args[b'templ'].cache.update(obsfatedefaulttempl(repo.ui)) if repo.ui.quiet: - name = "obsfate_quiet" + name = b"obsfate_quiet" elif repo.ui.verbose: - name = "obsfate_verbose" + name = b"obsfate_verbose" elif repo.ui.debugflag: - name = "obsfate_debug" + name = b"obsfate_debug" else: - name = "obsfate" + name = b"obsfate" # Format a single value def fmt(d): nargs = args.copy() nargs.update(d[name]) - templ = args['templ'] + templ = args[b'templ'] # HG 4.6 if hasattr(templ, "generate"): return templ.generate(name, nargs) else: - return args['templ'](name, **nargs) + return args[b'templ'](name, **nargs) # Generate a good enough string representation using templater gen = [] @@ -268,8 +268,8 @@ except StopIteration: pass - gen.append("".join(chunkstr)) - gen = "; ".join(gen) + gen.append(b"".join(chunkstr)) + gen = b"; ".join(gen) return templatekw._hybrid(gen, values, lambda x: {name: x}, fmt)
--- a/hgext3rd/evolve/thirdparty/cbor.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/thirdparty/cbor.py Tue Sep 24 12:42:27 2019 +0200 @@ -79,23 +79,23 @@ CBOR_TAG_MIME = 36 # following text is MIME message, headers, separators and all CBOR_TAG_CBOR_FILEHEADER = 55799 # can open a file with 0xd9d9f7 -_CBOR_TAG_BIGNUM_BYTES = struct.pack('B', CBOR_TAG | CBOR_TAG_BIGNUM) +_CBOR_TAG_BIGNUM_BYTES = struct.pack(b'B', CBOR_TAG | CBOR_TAG_BIGNUM) def dumps_int(val): - "return bytes representing int val in CBOR" + b"return bytes representing int val in CBOR" if val >= 0: # CBOR_UINT is 0, so I'm lazy/efficient about not OR-ing it in. if val <= 23: - return struct.pack('B', val) + return struct.pack(b'B', val) if val <= 0x0ff: - return struct.pack('BB', CBOR_UINT8_FOLLOWS, val) + return struct.pack(b'BB', CBOR_UINT8_FOLLOWS, val) if val <= 0x0ffff: - return struct.pack('!BH', CBOR_UINT16_FOLLOWS, val) + return struct.pack(b'!BH', CBOR_UINT16_FOLLOWS, val) if val <= 0x0ffffffff: - return struct.pack('!BI', CBOR_UINT32_FOLLOWS, val) + return struct.pack(b'!BI', CBOR_UINT32_FOLLOWS, val) if val <= 0x0ffffffffffffffff: - return struct.pack('!BQ', CBOR_UINT64_FOLLOWS, val) + return struct.pack(b'!BQ', CBOR_UINT64_FOLLOWS, val) outb = _dumps_bignum_to_bytearray(val) return _CBOR_TAG_BIGNUM_BYTES + _encode_type_num(CBOR_BYTES, len(outb)) + outb val = -1 - val @@ -119,28 +119,28 @@ def dumps_float(val): - return struct.pack("!Bd", CBOR_FLOAT64, val) + return struct.pack(b"!Bd", CBOR_FLOAT64, val) -_CBOR_TAG_NEGBIGNUM_BYTES = struct.pack('B', CBOR_TAG | CBOR_TAG_NEGBIGNUM) +_CBOR_TAG_NEGBIGNUM_BYTES = struct.pack(b'B', CBOR_TAG | CBOR_TAG_NEGBIGNUM) def _encode_type_num(cbor_type, val): """For some CBOR primary type [0..7] and an auxiliary unsigned number, return CBOR encoded bytes""" assert val >= 0 if val <= 23: - return struct.pack('B', cbor_type | val) + return struct.pack(b'B', cbor_type | val) if val <= 0x0ff: - return struct.pack('BB', cbor_type | CBOR_UINT8_FOLLOWS, val) + return struct.pack(b'BB', cbor_type | CBOR_UINT8_FOLLOWS, val) if val <= 0x0ffff: - return struct.pack('!BH', cbor_type | CBOR_UINT16_FOLLOWS, val) + return struct.pack(b'!BH', cbor_type | CBOR_UINT16_FOLLOWS, val) if val <= 0x0ffffffff: - return struct.pack('!BI', cbor_type | CBOR_UINT32_FOLLOWS, val) + return struct.pack(b'!BI', cbor_type | CBOR_UINT32_FOLLOWS, val) if (((cbor_type == CBOR_NEGINT) and (val <= 0x07fffffffffffffff)) or ((cbor_type != CBOR_NEGINT) and (val <= 0x0ffffffffffffffff))): - return struct.pack('!BQ', cbor_type | CBOR_UINT64_FOLLOWS, val) + return struct.pack(b'!BQ', cbor_type | CBOR_UINT64_FOLLOWS, val) if cbor_type != CBOR_NEGINT: - raise Exception("value too big for CBOR unsigned number: {0!r}".format(val)) + raise Exception(b"value too big for CBOR unsigned number: {0!r}".format(val)) outb = _dumps_bignum_to_bytearray(val) return _CBOR_TAG_NEGBIGNUM_BYTES + _encode_type_num(CBOR_BYTES, len(outb)) + outb @@ -201,8 +201,8 @@ def dumps_bool(b): if b: - return struct.pack('B', CBOR_TRUE) - return struct.pack('B', CBOR_FALSE) + return struct.pack(b'B', CBOR_TRUE) + return struct.pack(b'B', CBOR_FALSE) def dumps_tag(t, sort_keys=False): @@ -223,7 +223,7 @@ def dumps(ob, sort_keys=False): if ob is None: - return struct.pack('B', CBOR_NULL) + return struct.pack(b'B', CBOR_NULL) if isinstance(ob, bool): return dumps_bool(ob) if _is_stringish(ob): @@ -239,7 +239,7 @@ return dumps_int(ob) if isinstance(ob, Tag): return dumps_tag(ob, sort_keys=sort_keys) - raise Exception("don't know how to cbor serialize object of type %s", type(ob)) + raise Exception(b"don't know how to cbor serialize object of type %s", type(ob)) # same basic signature as json.dump, but with no options (yet) @@ -260,7 +260,7 @@ self.value = value def __repr__(self): - return "Tag({0!r}, {1!r})".format(self.tag, self.value) + return b"Tag({0!r}, {1!r})".format(self.tag, self.value) def __eq__(self, other): if not isinstance(other, Tag): @@ -273,7 +273,7 @@ Parse CBOR bytes and return Python objects. """ if data is None: - raise ValueError("got None for buffer to decode in loads") + raise ValueError(b"got None for buffer to decode in loads") fp = StringIO(data) return _loads(fp)[0] @@ -296,22 +296,22 @@ aux = tag_aux elif tag_aux == CBOR_UINT8_FOLLOWS: data = fp.read(1) - aux = struct.unpack_from("!B", data, 0)[0] + aux = struct.unpack_from(b"!B", data, 0)[0] bytes_read += 1 elif tag_aux == CBOR_UINT16_FOLLOWS: data = fp.read(2) - aux = struct.unpack_from("!H", data, 0)[0] + aux = struct.unpack_from(b"!H", data, 0)[0] bytes_read += 2 elif tag_aux == CBOR_UINT32_FOLLOWS: data = fp.read(4) - aux = struct.unpack_from("!I", data, 0)[0] + aux = struct.unpack_from(b"!I", data, 0)[0] bytes_read += 4 elif tag_aux == CBOR_UINT64_FOLLOWS: data = fp.read(8) - aux = struct.unpack_from("!Q", data, 0)[0] + aux = struct.unpack_from(b"!Q", data, 0)[0] bytes_read += 8 else: - assert tag_aux == CBOR_VAR_FOLLOWS, "bogus tag {0:02x}".format(tb) + assert tag_aux == CBOR_VAR_FOLLOWS, b"bogus tag {0:02x}".format(tb) aux = None return tag, tag_aux, aux, bytes_read @@ -385,9 +385,9 @@ return ob, bytes_read def _loads(fp, limit=None, depth=0, returntags=False): - "return (object, bytes read)" + b"return (object, bytes read)" if depth > _MAX_DEPTH: - raise Exception("hit CBOR loads recursion depth limit") + raise Exception(b"hit CBOR loads recursion depth limit") tb = _read_byte(fp) @@ -397,16 +397,16 @@ # Some special cases of CBOR_7 best handled by special struct.unpack logic here if tb == CBOR_FLOAT16: data = fp.read(2) - hibyte, lowbyte = struct.unpack_from("BB", data, 0) + hibyte, lowbyte = struct.unpack_from(b"BB", data, 0) exp = (hibyte >> 2) & 0x1F mant = ((hibyte & 0x03) << 8) | lowbyte if exp == 0: val = mant * (2.0 ** -24) elif exp == 31: if mant == 0: - val = float('Inf') + val = float(b'Inf') else: - val = float('NaN') + val = float(b'NaN') else: val = (mant + 1024.0) * (2 ** (exp - 25)) if hibyte & 0x80: @@ -414,11 +414,11 @@ return (val, 3) elif tb == CBOR_FLOAT32: data = fp.read(4) - pf = struct.unpack_from("!f", data, 0) + pf = struct.unpack_from(b"!f", data, 0) return (pf[0], 5) elif tb == CBOR_FLOAT64: data = fp.read(8) - pf = struct.unpack_from("!d", data, 0) + pf = struct.unpack_from(b"!d", data, 0) return (pf[0], 9) tag, tag_aux, aux, bytes_read = _tag_aux(fp, tb) @@ -461,7 +461,7 @@ return (None, bytes_read) if tb == CBOR_UNDEFINED: return (None, bytes_read) - raise ValueError("unknown cbor tag 7 byte: {:02x}".format(tb)) + raise ValueError(b"unknown cbor tag 7 byte: {:02x}".format(tb)) def loads_bytes(fp, aux, btag=CBOR_BYTES): @@ -481,7 +481,7 @@ total_bytes_read += 1 break tag, tag_aux, aux, bytes_read = _tag_aux(fp, tb) - assert tag == btag, 'variable length value contains unexpected component' + assert tag == btag, b'variable length value contains unexpected component' ob = fp.read(aux) chunklist.append(ob) total_bytes_read += bytes_read + aux
--- a/hgext3rd/evolve/utility.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/evolve/utility.py Tue Sep 24 12:42:27 2019 +0200 @@ -17,20 +17,20 @@ compat, ) -shorttemplate = "[{label('evolve.rev', rev)}] {desc|firstline}\n" -stacktemplate = """[{label('evolve.rev', if(topicidx, "s{topicidx}", rev))}] {desc|firstline}\n""" +shorttemplate = b"[{label('evolve.rev', rev)}] {desc|firstline}\n" +stacktemplate = b"""[{label('evolve.rev', if(topicidx, "s{topicidx}", rev))}] {desc|firstline}\n""" def obsexcmsg(ui, message, important=False): - verbose = ui.configbool('experimental', 'verbose-obsolescence-exchange') + verbose = ui.configbool(b'experimental', b'verbose-obsolescence-exchange') if verbose: - message = 'OBSEXC: ' + message + message = b'OBSEXC: ' + message if important or verbose: ui.status(message) def obsexcprg(ui, *args, **kwargs): - topic = 'obsmarkers exchange' - if ui.configbool('experimental', 'verbose-obsolescence-exchange'): - topic = 'OBSEXC' + topic = b'obsmarkers exchange' + if ui.configbool(b'experimental', b'verbose-obsolescence-exchange'): + topic = b'OBSEXC' compat.progress(ui, topic, *args, **kwargs) def filterparents(parents): @@ -50,28 +50,28 @@ def shouldwarmcache(repo, tr): configbool = repo.ui.configbool config = repo.ui.config - desc = getattr(tr, 'desc', '') + desc = getattr(tr, 'desc', b'') autocase = False if tr is None and not getattr(repo, '_destroying', False): autocase = True - elif desc.startswith('serve'): + elif desc.startswith(b'serve'): autocase = True - elif desc.startswith('push') and not desc.startswith('push-response'): + elif desc.startswith(b'push') and not desc.startswith(b'push-response'): autocase = True - autocache = config('experimental', 'obshashrange.warm-cache', - 'auto') == 'auto' + autocache = config(b'experimental', b'obshashrange.warm-cache', + b'auto') == b'auto' if autocache: warm = autocase else: # note: we should not get to the default case - warm = configbool('experimental', 'obshashrange.warm-cache') - if not configbool('experimental', 'obshashrange'): + warm = configbool(b'experimental', b'obshashrange.warm-cache') + if not configbool(b'experimental', b'obshashrange'): return False if not warm: return False - maxrevs = repo.ui.configint('experimental', 'obshashrange.max-revs') + maxrevs = repo.ui.configint(b'experimental', b'obshashrange.max-revs') if maxrevs is not None and maxrevs < len(repo.unfiltered()): return False return True @@ -123,8 +123,8 @@ newer = obsutil.successorssets(repo, obs.node()) # search of a parent which is not killed while not newer: - ui.debug("stabilize target %s is plain dead," - " trying to stabilize on its parent\n" % + ui.debug(b"stabilize target %s is plain dead," + b" trying to stabilize on its parent\n" % obs) obs = obs.parents()[0] newer = obsutil.successorssets(repo, obs.node()) @@ -141,7 +141,7 @@ for successorsset in exc.successorssets for node in successorsset} -def revselectionprompt(ui, repo, revs, customheader=""): +def revselectionprompt(ui, repo, revs, customheader=b""): """function to prompt user to choose a revision from all the revs and return that revision for further tasks @@ -161,29 +161,29 @@ if not ui.interactive(): return None - promptmsg = customheader + "\n" + promptmsg = customheader + b"\n" for idx, rev in enumerate(revs): curctx = repo[rev] - revmsg = "%d: [%s] %s\n" % (idx + 1, curctx, - curctx.description().split("\n")[0]) + revmsg = b"%d: [%s] %s\n" % (idx + 1, curctx, + curctx.description().split(b"\n")[0]) promptmsg += revmsg - promptmsg += _("q: quit the prompt\n") - promptmsg += _("enter the index of the revision you want to select:") + promptmsg += _(b"q: quit the prompt\n") + promptmsg += _(b"enter the index of the revision you want to select:") idxselected = ui.prompt(promptmsg) intidx = None try: intidx = int(idxselected) except ValueError: - if idxselected == 'q': + if idxselected == b'q': return None - ui.write_err(_("invalid value '%s' entered for index\n") % idxselected) + ui.write_err(_(b"invalid value '%s' entered for index\n") % idxselected) return None if intidx > len(revs) or intidx <= 0: # we can make this error message better - ui.write_err(_("invalid value '%d' entered for index\n") % intidx) + ui.write_err(_(b"invalid value '%d' entered for index\n") % intidx) return None return revs[intidx - 1] @@ -206,7 +206,7 @@ # all three are different, lets concatenate the two authors # XXX: should we let the user know about concatenation of authors # by printing some message (or maybe in verbose mode) - users = set(divuser.split(', ')) - users.update(othuser.split(', ')) - user = ', '.join(sorted(users)) + users = set(divuser.split(b', ')) + users.update(othuser.split(b', ')) + user = b', '.join(sorted(users)) return user
--- a/hgext3rd/pullbundle.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/pullbundle.py Tue Sep 24 12:42:27 2019 +0200 @@ -92,10 +92,10 @@ from mercurial.i18n import _ -__version__ = '0.1.1' -testedwith = '4.4 4.5 4.6 4.7.1' -minimumhgversion = '4.4' -buglink = 'https://bz.mercurial-scm.org/' +__version__ = b'0.1.1' +testedwith = b'4.4 4.5 4.6 4.7.1' +minimumhgversion = b'4.4' +buglink = b'https://bz.mercurial-scm.org/' cmdtable = {} command = registrar.command(cmdtable) @@ -103,14 +103,14 @@ configtable = {} configitem = registrar.configitem(configtable) -configitem('pullbundle', 'cache-directory', +configitem(b'pullbundle', b'cache-directory', default=None, ) # generic wrapping def uisetup(ui): - exchange.getbundle2partsmapping['changegroup'] = _getbundlechangegrouppart + exchange.getbundle2partsmapping[b'changegroup'] = _getbundlechangegrouppart def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, common=None, **kwargs): @@ -118,13 +118,13 @@ if not kwargs.get(r'cg', True): return - version = '01' - cgversions = b2caps.get('changegroup') + version = b'01' + cgversions = b2caps.get(b'changegroup') if cgversions: # 3.1 and 3.2 ship with an empty value cgversions = [v for v in cgversions if v in changegroup.supportedoutgoingversions(repo)] if not cgversions: - raise ValueError(_('no common changegroup version')) + raise ValueError(_(b'no common changegroup version')) version = max(cgversions) outgoing = exchange._computeoutgoing(repo, heads, common) @@ -145,20 +145,20 @@ # END OF ALTERED PART if kwargs.get(r'narrow', False) and (include or exclude): - narrowspecpart = bundler.newpart('narrow:spec') + narrowspecpart = bundler.newpart(b'narrow:spec') if include: narrowspecpart.addparam( - 'include', '\n'.join(include), mandatory=True) + b'include', b'\n'.join(include), mandatory=True) if exclude: narrowspecpart.addparam( - 'exclude', '\n'.join(exclude), mandatory=True) + b'exclude', b'\n'.join(exclude), mandatory=True) def makeallcgpart(newpart, repo, outgoing, version, source, bundlecaps, filematcher, cgversions): pullbundle = not filematcher if pullbundle and not util.safehasattr(repo, 'stablerange'): - repo.ui.warn('pullbundle: required extension "evolve" are missing, skipping pullbundle\n') + repo.ui.warn(b'pullbundle: required extension "evolve" are missing, skipping pullbundle\n') pullbundle = False if filematcher: makeonecgpart(newpart, repo, None, outgoing, version, source, bundlecaps, @@ -167,8 +167,8 @@ start = util.timer() slices = sliceoutgoing(repo, outgoing) end = util.timer() - msg = _('pullbundle-cache: "missing" set sliced into %d subranges ' - 'in %f seconds\n') + msg = _(b'pullbundle-cache: "missing" set sliced into %d subranges ' + b'in %f seconds\n') repo.ui.write(msg % (len(slices), end - start)) for sliceid, sliceout in slices: makeonecgpart(newpart, repo, sliceid, sliceout, version, source, bundlecaps, @@ -192,7 +192,7 @@ missingheads = [rev(n) for n in sorted(outgoing.missingheads, reverse=True)] for head in missingheads: localslices = [] - localmissing = set(repo.revs('%ld and ::%d', missingrevs, head)) + localmissing = set(repo.revs(b'%ld and ::%d', missingrevs, head)) thisrunmissing = localmissing.copy() while localmissing: slicerevs = [] @@ -207,11 +207,11 @@ missingrevs.difference_update(slicerevs) localmissing.difference_update(slicerevs) if localmissing: - heads = list(repo.revs('heads(%ld)', localmissing)) + heads = list(repo.revs(b'heads(%ld)', localmissing)) heads.sort(key=node) head = heads.pop() if heads: - thisrunmissing = repo.revs('%ld and only(%d, %ld)', + thisrunmissing = repo.revs(b'%ld and only(%d, %ld)', localmissing, head, heads) @@ -220,15 +220,15 @@ if DEBUG: for s in reversed(ss): ms -= set(s) - missingbase = repo.revs('parents(%ld) and %ld', s, ms) + missingbase = repo.revs(b'parents(%ld) and %ld', s, ms) if missingbase: - repo.ui.write_err('!!! rev bundled while parents missing\n') - repo.ui.write_err(' parent: %s\n' % list(missingbase)) - pb = repo.revs('%ld and children(%ld)', s, missingbase) - repo.ui.write_err(' children: %s\n' % list(pb)) - h = repo.revs('heads(%ld)', s) - repo.ui.write_err(' heads: %s\n' % list(h)) - raise error.ProgrammingError('issuing a range before its parents') + repo.ui.write_err(b'!!! rev bundled while parents missing\n') + repo.ui.write_err(b' parent: %s\n' % list(missingbase)) + pb = repo.revs(b'%ld and children(%ld)', s, missingbase) + repo.ui.write_err(b' children: %s\n' % list(pb)) + h = repo.revs(b'heads(%ld)', s) + repo.ui.write_err(b' heads: %s\n' % list(h)) + raise error.ProgrammingError(b'issuing a range before its parents') for s in reversed(localslices): allslices.extend(s) @@ -381,8 +381,8 @@ # changegroup part construction def _changegroupinfo(repo, nodes, source): - if repo.ui.verbose or source == 'bundle': - repo.ui.status(_("%d changesets found\n") % len(nodes)) + if repo.ui.verbose or source == b'bundle': + repo.ui.status(_(b"%d changesets found\n") % len(nodes)) def _makenewstream(newpart, repo, outgoing, version, source, bundlecaps, filematcher, cgversions): @@ -408,23 +408,23 @@ def _makepartfromstream(newpart, repo, cgstream, nbchanges, version): # same as upstream code - part = newpart('changegroup', data=cgstream) + part = newpart(b'changegroup', data=cgstream) if version: - part.addparam('version', version) + part.addparam(b'version', version) - part.addparam('nbchanges', '%d' % nbchanges, + part.addparam(b'nbchanges', b'%d' % nbchanges, mandatory=False) - if 'treemanifest' in repo.requirements: - part.addparam('treemanifest', '1') + if b'treemanifest' in repo.requirements: + part.addparam(b'treemanifest', b'1') # cache management def cachedir(repo): - cachedir = repo.ui.config('pullbundle', 'cache-directory') + cachedir = repo.ui.config(b'pullbundle', b'cache-directory') if cachedir is not None: return cachedir - return repo.cachevfs.join('pullbundles') + return repo.cachevfs.join(b'pullbundles') def getcache(repo, bundlename): cdir = cachedir(repo) @@ -436,7 +436,7 @@ # opening too many file will not work. def data(): - with open(bundlepath, 'rb') as fd: + with open(bundlepath, r'rb') as fd: for chunk in util.filechunkiter(fd): yield chunk return data() @@ -454,7 +454,7 @@ cachefile.write(chunk) yield chunk -BUNDLEMASK = "%s-%s-%010iskip-%010isize.hg" +BUNDLEMASK = b"%s-%s-%010iskip-%010isize.hg" def makeonecgpart(newpart, repo, rangeid, outgoing, version, source, bundlecaps, filematcher, cgversions): @@ -472,19 +472,19 @@ cgstream = cachewriter(repo, bundlename, partdata[0]) partdata = (cgstream,) + partdata[1:] else: - if repo.ui.verbose or source == 'bundle': - repo.ui.status(_("%d changesets found in caches\n") % nbchanges) + if repo.ui.verbose or source == b'bundle': + repo.ui.status(_(b"%d changesets found in caches\n") % nbchanges) pversion = None if cgversions: pversion = version partdata = (cachedata, nbchanges, pversion) return _makepartfromstream(newpart, repo, *partdata) -@command('debugpullbundlecacheoverlap', - [('', 'count', 100, _('of "client" pulling')), - ('', 'min-cache', 1, _('minimum size of cached bundle')), - ], - _('hg debugpullbundlecacheoverlap [--client 100] REVSET')) +@command(b'debugpullbundlecacheoverlap', + [(b'', b'count', 100, _(b'of "client" pulling')), + (b'', b'min-cache', 1, _(b'minimum size of cached bundle')), + ], + _(b'hg debugpullbundlecacheoverlap [--client 100] REVSET')) def debugpullbundlecacheoverlap(ui, repo, *revs, **opts): '''Display statistic on bundle cache hit @@ -494,7 +494,7 @@ ''' actionrevs = scmutil.revrange(repo, revs) if not revs: - raise error.Abort('No revision selected') + raise error.Abort(b'No revision selected') count = opts['count'] min_cache = opts['min_cache'] @@ -503,12 +503,12 @@ rlen = lambda rangeid: repo.stablerange.rangelength(repo, rangeid) - repo.ui.write("gathering %d sample pulls within %d revisions\n" + repo.ui.write(b"gathering %d sample pulls within %d revisions\n" % (count, len(actionrevs))) if 1 < min_cache: - repo.ui.write(" not caching ranges smaller than %d changesets\n" % min_cache) + repo.ui.write(b" not caching ranges smaller than %d changesets\n" % min_cache) for i in range(count): - repo.ui.progress('gathering data', i, total=count) + repo.ui.progress(b'gathering data', i, total=count) outgoing = takeonesample(repo, actionrevs) ranges = sliceoutgoing(repo, outgoing) hitranges = 0 @@ -532,7 +532,7 @@ hitranges, ) pullstats.append(stats) - repo.ui.progress('gathering data', None) + repo.ui.progress(b'gathering data', None) sizes = [] changesmissing = [] @@ -563,36 +563,36 @@ cachedhits.append(hits) sizesdist = distribution(sizes) - repo.ui.write(fmtdist('pull size', sizesdist)) + repo.ui.write(fmtdist(b'pull size', sizesdist)) changesmissingdist = distribution(changesmissing) - repo.ui.write(fmtdist('non-cached changesets', changesmissingdist)) + repo.ui.write(fmtdist(b'non-cached changesets', changesmissingdist)) changesratiodist = distribution(changesratio) - repo.ui.write(fmtdist('ratio of cached changesets', changesratiodist)) + repo.ui.write(fmtdist(b'ratio of cached changesets', changesratiodist)) bundlecountdist = distribution(bundlecount) - repo.ui.write(fmtdist('bundle count', bundlecountdist)) + repo.ui.write(fmtdist(b'bundle count', bundlecountdist)) rangesratiodist = distribution(rangesratio) - repo.ui.write(fmtdist('ratio of cached bundles', rangesratiodist)) + repo.ui.write(fmtdist(b'ratio of cached bundles', rangesratiodist)) - repo.ui.write('changesets served:\n') - repo.ui.write(' total: %7d\n' % totalchanges) - repo.ui.write(' from cache: %7d (%2d%%)\n' + repo.ui.write(b'changesets served:\n') + repo.ui.write(b' total: %7d\n' % totalchanges) + repo.ui.write(b' from cache: %7d (%2d%%)\n' % (totalcached, (totalcached * 100 // totalchanges))) - repo.ui.write(' bundle: %7d\n' % sum(bundlecount)) + repo.ui.write(b' bundle: %7d\n' % sum(bundlecount)) cachedsizesdist = distribution(cachedsizes) - repo.ui.write(fmtdist('size of cached bundles', cachedsizesdist)) + repo.ui.write(fmtdist(b'size of cached bundles', cachedsizesdist)) cachedhitsdist = distribution(cachedhits) - repo.ui.write(fmtdist('hit on cached bundles', cachedhitsdist)) + repo.ui.write(fmtdist(b'hit on cached bundles', cachedhitsdist)) def takeonesample(repo, revs): node = repo.changelog.node pulled = random.sample(revs, max(4, len(revs) // 1000)) - pulled = repo.revs('%ld::%ld', pulled, pulled) + pulled = repo.revs(b'%ld::%ld', pulled, pulled) nodes = [node(r) for r in pulled] return outgoingfromnodes(repo, nodes) @@ -600,17 +600,17 @@ data.sort() length = len(data) return { - 'min': data[0], - '10%': data[length // 10], - '25%': data[length // 4], - '50%': data[length // 2], - '75%': data[(length // 4) * 3], - '90%': data[(length // 10) * 9], - '95%': data[(length // 20) * 19], - 'max': data[-1], + b'min': data[0], + b'10%': data[length // 10], + b'25%': data[length // 4], + b'50%': data[length // 2], + b'75%': data[(length // 4) * 3], + b'90%': data[(length // 10) * 9], + b'95%': data[(length // 20) * 19], + b'max': data[-1], } -STATSFORMAT = """{name}: +STATSFORMAT = b"""{name}: min: {min} 10%: {10%} 25%: {25%}
--- a/hgext3rd/serverminitopic.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/serverminitopic.py Tue Sep 24 12:42:27 2019 +0200 @@ -33,7 +33,7 @@ configtable = {} configitem = registrar.configitem(configtable) - configitem('experimental', 'server-mini-topic', + configitem(b'experimental', b'server-mini-topic', default=False, ) @@ -44,7 +44,7 @@ """ enabled = getattr(repo, '_hasminitopic', None) if enabled is None: - enabled = (repo.ui.configbool('experimental', 'server-mini-topic') + enabled = (repo.ui.configbool(b'experimental', b'server-mini-topic') and not repo.publishing()) repo._hasminitopic = enabled return enabled @@ -54,10 +54,10 @@ def topicbranch(orig, self): branch = orig(self) if hasminitopic(self._repo) and self.phase(): - topic = self._changeset.extra.get('topic') + topic = self._changeset.extra.get(b'topic') if topic is not None: topic = encoding.tolocal(topic) - branch = '%s:%s' % (branch, topic) + branch = b'%s:%s' % (branch, topic) return branch ### avoid caching topic data in rev-branch-cache @@ -67,7 +67,7 @@ def _init__(self, *args, **kwargs): super(revbranchcacheoverlay, self).__init__(*args, **kwargs) - if 'branchinfo' in vars(self): + if r'branchinfo' in vars(self): del self.branchinfo def branchinfo(self, rev, changelog=None): @@ -95,7 +95,7 @@ class topicawarerbc(revbranchcacheoverlay, cache.__class__): pass cache.__class__ = topicawarerbc - if 'branchinfo' in vars(cache): + if r'branchinfo' in vars(cache): del cache.branchinfo self._revbranchcache = cache return self._revbranchcache @@ -120,7 +120,7 @@ if revs: s = hashlib.sha1() for rev in revs: - s.update('%d;' % rev) + s.update(b'%d;' % rev) key = s.digest() return key @@ -138,8 +138,8 @@ branchmap.branchcache = previous _publiconly = set([ - 'base', - 'immutable', + b'base', + b'immutable', ]) def mighttopic(repo): @@ -216,7 +216,7 @@ def wireprotocaps(orig, repo, proto): caps = orig(repo, proto) if hasminitopic(repo): - caps.append('topics') + caps.append(b'topics') return caps # wrap the necessary bit
--- a/hgext3rd/topic/__init__.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/__init__.py Tue Sep 24 12:42:27 2019 +0200 @@ -160,34 +160,34 @@ cmdtable = {} command = registrar.command(cmdtable) -colortable = {'topic.active': 'green', - 'topic.list.unstablecount': 'red', - 'topic.list.headcount.multiple': 'yellow', - 'topic.list.behindcount': 'cyan', - 'topic.list.behinderror': 'red', - 'stack.index': 'yellow', - 'stack.index.base': 'none dim', - 'stack.desc.base': 'none dim', - 'stack.shortnode.base': 'none dim', - 'stack.state.base': 'dim', - 'stack.state.clean': 'green', - 'stack.index.current': 'cyan', # random pick - 'stack.state.current': 'cyan bold', # random pick - 'stack.desc.current': 'cyan', # random pick - 'stack.shortnode.current': 'cyan', # random pick - 'stack.state.orphan': 'red', - 'stack.state.content-divergent': 'red', - 'stack.state.phase-divergent': 'red', - 'stack.summary.behindcount': 'cyan', - 'stack.summary.behinderror': 'red', - 'stack.summary.headcount.multiple': 'yellow', +colortable = {b'topic.active': b'green', + b'topic.list.unstablecount': b'red', + b'topic.list.headcount.multiple': b'yellow', + b'topic.list.behindcount': b'cyan', + b'topic.list.behinderror': b'red', + b'stack.index': b'yellow', + b'stack.index.base': b'none dim', + b'stack.desc.base': b'none dim', + b'stack.shortnode.base': b'none dim', + b'stack.state.base': b'dim', + b'stack.state.clean': b'green', + b'stack.index.current': b'cyan', # random pick + b'stack.state.current': b'cyan bold', # random pick + b'stack.desc.current': b'cyan', # random pick + b'stack.shortnode.current': b'cyan', # random pick + b'stack.state.orphan': b'red', + b'stack.state.content-divergent': b'red', + b'stack.state.phase-divergent': b'red', + b'stack.summary.behindcount': b'cyan', + b'stack.summary.behinderror': b'red', + b'stack.summary.headcount.multiple': b'yellow', # default color to help log output and thg # (first pick I could think off, update as needed - 'log.topic': 'green_background', - 'topic.active': 'green', - } + b'log.topic': b'green_background', + b'topic.active': b'green', + } -__version__ = b'0.16.1.dev' +__version__ = b'0.17.0.dev' testedwith = b'4.5.2 4.6.2 4.7 4.8 4.9 5.0 5.1' minimumhgversion = b'4.5' @@ -200,25 +200,25 @@ configtable = {} configitem = registrar.configitem(configtable) - configitem('experimental', 'enforce-topic', + configitem(b'experimental', b'enforce-topic', default=False, ) - configitem('experimental', 'enforce-single-head', + configitem(b'experimental', b'enforce-single-head', default=False, ) - configitem('experimental', 'topic-mode', + configitem(b'experimental', b'topic-mode', default=None, ) - configitem('experimental', 'topic.publish-bare-branch', + configitem(b'experimental', b'topic.publish-bare-branch', default=False, ) - configitem('experimental', 'topic.allow-publish', + configitem(b'experimental', b'topic.allow-publish', default=configitems.dynamicdefault, ) - configitem('_internal', 'keep-topic', + configitem(b'_internal', b'keep-topic', default=False, ) - configitem('experimental', 'topic-mode.server', + configitem(b'experimental', b'topic-mode.server', default=configitems.dynamicdefault, ) @@ -229,25 +229,25 @@ # nobody else did so far. from mercurial import configitems extraitem = functools.partial(configitems._register, ui._knownconfig) - if ('experimental' not in ui._knownconfig - or not ui._knownconfig['experimental'].get('thg.displaynames')): - extraitem('experimental', 'thg.displaynames', + if (b'experimental' not in ui._knownconfig + or not ui._knownconfig[b'experimental'].get(b'thg.displaynames')): + extraitem(b'experimental', b'thg.displaynames', default=None, ) - if ('devel' not in ui._knownconfig - or not ui._knownconfig['devel'].get('random')): - extraitem('devel', 'randomseed', + if (b'devel' not in ui._knownconfig + or not ui._knownconfig[b'devel'].get(b'random')): + extraitem(b'devel', b'randomseed', default=None, ) # we need to do old style declaration for <= 4.5 templatekeyword = registrar.templatekeyword() -post45template = 'requires=' in templatekeyword.__doc__ +post45template = r'requires=' in templatekeyword.__doc__ def _contexttopic(self, force=False): if not (force or self.mutable()): - return '' - return self.extra().get(constants.extrakey, '') + return b'' + return self.extra().get(constants.extrakey, b'') context.basectx.topic = _contexttopic def _contexttopicidx(self): @@ -264,8 +264,8 @@ return None context.basectx.topicidx = _contexttopicidx -stackrev = re.compile(r'^s\d+$') -topicrev = re.compile(r'^t\d+$') +stackrev = re.compile(br'^s\d+$') +topicrev = re.compile(br'^t\d+$') hastopicext = common.hastopicext @@ -275,38 +275,38 @@ idx = int(name[1:]) tname = topic = repo.currenttopic if topic: - ttype = 'topic' + ttype = b'topic' revs = list(stack.stack(repo, topic=topic)) else: - ttype = 'branch' + ttype = b'branch' tname = branch = repo[None].branch() revs = list(stack.stack(repo, branch=branch)) elif topicrev.match(name): idx = int(name[1:]) - ttype = 'topic' + ttype = b'topic' tname = topic = repo.currenttopic if not tname: - raise error.Abort(_('cannot resolve "%s": no active topic') % name) + raise error.Abort(_(b'cannot resolve "%s": no active topic') % name) revs = list(stack.stack(repo, topic=topic)) if revs is not None: try: r = revs[idx] except IndexError: - if ttype == 'topic': - msg = _('cannot resolve "%s": %s "%s" has only %d changesets') - elif ttype == 'branch': - msg = _('cannot resolve "%s": %s "%s" has only %d non-public changesets') + if ttype == b'topic': + msg = _(b'cannot resolve "%s": %s "%s" has only %d changesets') + elif ttype == b'branch': + msg = _(b'cannot resolve "%s": %s "%s" has only %d non-public changesets') raise error.Abort(msg % (name, ttype, tname, len(revs) - 1)) # t0 or s0 can be None if r == -1 and idx == 0: - msg = _('the %s "%s" has no %s') + msg = _(b'the %s "%s" has no %s') raise error.Abort(msg % (ttype, tname, name)) return [repo[r].node()] if name not in repo.topics: return [] node = repo.changelog.node - return [node(rev) for rev in repo.revs('topic(%s)', name)] + return [node(rev) for rev in repo.revs(b'topic(%s)', name)] def _nodemap(repo, node): ctx = repo[node] @@ -321,22 +321,22 @@ topicmap.modsetup(ui) setupimportexport(ui) - extensions.afterloaded('rebase', _fixrebase) + extensions.afterloaded(b'rebase', _fixrebase) flow.installpushflag(ui) - entry = extensions.wrapcommand(commands.table, 'commit', commitwrap) - entry[1].append(('t', 'topic', '', - _("use specified topic"), _('TOPIC'))) + entry = extensions.wrapcommand(commands.table, b'commit', commitwrap) + entry[1].append((b't', b'topic', b'', + _(b"use specified topic"), _(b'TOPIC'))) - entry = extensions.wrapcommand(commands.table, 'push', pushoutgoingwrap) - entry[1].append(('t', 'topic', '', - _("topic to push"), _('TOPIC'))) + entry = extensions.wrapcommand(commands.table, b'push', pushoutgoingwrap) + entry[1].append((b't', b'topic', b'', + _(b"topic to push"), _(b'TOPIC'))) - entry = extensions.wrapcommand(commands.table, 'outgoing', + entry = extensions.wrapcommand(commands.table, b'outgoing', pushoutgoingwrap) - entry[1].append(('t', 'topic', '', - _("topic to push"), _('TOPIC'))) + entry[1].append((b't', b'topic', b'', + _(b"topic to push"), _(b'TOPIC'))) extensions.wrapfunction(cmdutil, 'buildcommittext', committextwrap) extensions.wrapfunction(merge, 'update', mergeupdatewrap) @@ -344,20 +344,20 @@ # behaviour of changing topic and I can't find a better way # to do that as scmutil.revsingle returns the rev number and hence we can't # plug into logic for this into mergemod.update(). - extensions.wrapcommand(commands.table, 'update', checkt0) + extensions.wrapcommand(commands.table, b'update', checkt0) try: - evolve = extensions.find('evolve') + evolve = extensions.find(b'evolve') extensions.wrapfunction(evolve.rewriteutil, "presplitupdate", presplitupdatetopic) except (KeyError, AttributeError): pass - cmdutil.summaryhooks.add('topic', summaryhook) + cmdutil.summaryhooks.add(b'topic', summaryhook) if not post45template: - templatekw.keywords['topic'] = topickw - templatekw.keywords['topicidx'] = topicidxkw + templatekw.keywords[b'topic'] = topickw + templatekw.keywords[b'topicidx'] = topicidxkw # Wrap workingctx extra to return the topic name extensions.wrapfunction(context.workingctx, '__init__', wrapinit) # Wrap changelog.add to drop empty topic @@ -369,9 +369,9 @@ repo = repo.unfiltered() - if repo.ui.config('experimental', 'thg.displaynames') is None: - repo.ui.setconfig('experimental', 'thg.displaynames', 'topics', - source='topic-extension') + if repo.ui.config(b'experimental', b'thg.displaynames') is None: + repo.ui.setconfig(b'experimental', b'thg.displaynames', b'topics', + source=b'topic-extension') class topicrepo(repo.__class__): @@ -380,15 +380,15 @@ def _restrictcapabilities(self, caps): caps = super(topicrepo, self)._restrictcapabilities(caps) - caps.add('topics') + caps.add(b'topics') return caps def commit(self, *args, **kwargs): - backup = self.ui.backupconfig('ui', 'allowemptycommit') + backup = self.ui.backupconfig(b'ui', b'allowemptycommit') try: - if self.currenttopic != self['.'].topic(): + if self.currenttopic != self[b'.'].topic(): # bypass the core "nothing changed" logic - self.ui.setconfig('ui', 'allowemptycommit', True) + self.ui.setconfig(b'ui', b'allowemptycommit', True) return super(topicrepo, self).commit(*args, **kwargs) finally: self.ui.restoreconfig(backup) @@ -404,7 +404,7 @@ if current: ctx.extra()[constants.extrakey] = current if (isinstance(ctx, context.memctx) - and ctx.extra().get('amend_source') + and ctx.extra().get(b'amend_source') and ctx.topic() and not self.currenttopic): # we are amending and need to remove a topic @@ -415,16 +415,16 @@ def topics(self): if self._topics is not None: return self._topics - topics = set(['', self.currenttopic]) - for c in self.set('not public()'): + topics = set([b'', self.currenttopic]) + for c in self.set(b'not public()'): topics.add(c.topic()) - topics.remove('') + topics.remove(b'') self._topics = topics return topics @property def currenttopic(self): - return self.vfs.tryread('topic') + return self.vfs.tryread(b'topic') # overwritten at the instance level by topicmap.py _autobranchmaptopic = True @@ -441,7 +441,7 @@ if branch is None: branch = self[None].branch() if self.currenttopic: - branch = "%s:%s" % (branch, self.currenttopic) + branch = b"%s:%s" % (branch, self.currenttopic) return super(topicrepo, self).branchheads(branch=branch, start=start, closed=closed) @@ -464,11 +464,11 @@ def transaction(self, desc, *a, **k): ctr = self.currenttransaction() tr = super(topicrepo, self).transaction(desc, *a, **k) - if desc in ('strip', 'repair') or ctr is not None: + if desc in (b'strip', b'repair') or ctr is not None: return tr reporef = weakref.ref(self) - if self.ui.configbool('experimental', 'enforce-single-head'): + if self.ui.configbool(b'experimental', b'enforce-single-head'): if util.safehasattr(tr, 'validator'): # hg <= 4.7 origvalidator = tr.validator else: @@ -484,10 +484,10 @@ else: tr._validator = validator - topicmodeserver = self.ui.config('experimental', - 'topic-mode.server', 'ignore') - ispush = (desc.startswith('push') or desc.startswith('serve')) - if (topicmodeserver != 'ignore' and ispush): + topicmodeserver = self.ui.config(b'experimental', + b'topic-mode.server', b'ignore') + ispush = (desc.startswith(b'push') or desc.startswith(b'serve')) + if (topicmodeserver != b'ignore' and ispush): if util.safehasattr(tr, 'validator'): # hg <= 4.7 origvalidator = tr.validator else: @@ -502,9 +502,9 @@ else: tr._validator = validator - elif (self.ui.configbool('experimental', 'topic.publish-bare-branch') - and (desc.startswith('push') - or desc.startswith('serve')) + elif (self.ui.configbool(b'experimental', b'topic.publish-bare-branch') + and (desc.startswith(b'push') + or desc.startswith(b'serve')) ): origclose = tr.close trref = weakref.ref(tr) @@ -515,8 +515,8 @@ flow.publishbarebranch(repo, tr2) origclose() tr.close = close - allow_publish = self.ui.configbool('experimental', - 'topic.allow-publish', + allow_publish = self.ui.configbool(b'experimental', + b'topic.allow-publish', True) if not allow_publish: if util.safehasattr(tr, 'validator'): # hg <= 4.7 @@ -547,62 +547,62 @@ csetcount = stack.stack(repo, topic=ct).changesetcount empty = csetcount == 0 if empty and not ctwasempty: - ui.status("active topic '%s' is now empty\n" % ct) + ui.status(b"active topic '%s' is now empty\n" % ct) trnames = getattr(tr, 'names', getattr(tr, '_names', ())) - if ('phase' in trnames - or any(n.startswith('push-response') + if (b'phase' in trnames + or any(n.startswith(b'push-response') for n in trnames)): - ui.status(_("(use 'hg topic --clear' to clear it if needed)\n")) - hint = _("(see 'hg help topics' for more information)\n") + ui.status(_(b"(use 'hg topic --clear' to clear it if needed)\n")) + hint = _(b"(see 'hg help topics' for more information)\n") if ctwasempty and not empty: if csetcount == 1: - msg = _("active topic '%s' grew its first changeset\n%s") + msg = _(b"active topic '%s' grew its first changeset\n%s") ui.status(msg % (ct, hint)) else: - msg = _("active topic '%s' grew its %s first changesets\n%s") + msg = _(b"active topic '%s' grew its %s first changesets\n%s") ui.status(msg % (ct, csetcount, hint)) - tr.addpostclose('signalcurrenttopicempty', currenttopicempty) + tr.addpostclose(b'signalcurrenttopicempty', currenttopicempty) return tr repo.__class__ = topicrepo repo._topics = None if util.safehasattr(repo, 'names'): repo.names.addnamespace(namespaces.namespace( - 'topics', 'topic', namemap=_namemap, nodemap=_nodemap, + b'topics', b'topic', namemap=_namemap, nodemap=_nodemap, listnames=lambda repo: repo.topics)) if post45template: @templatekeyword(b'topic', requires={b'ctx'}) def topickw(context, mapping): """:topic: String. The topic of the changeset""" - ctx = context.resource(mapping, 'ctx') + ctx = context.resource(mapping, b'ctx') return ctx.topic() @templatekeyword(b'topicidx', requires={b'ctx'}) def topicidxkw(context, mapping): """:topicidx: Integer. Index of the changeset as a stack alias""" - ctx = context.resource(mapping, 'ctx') + ctx = context.resource(mapping, b'ctx') return ctx.topicidx() else: def topickw(**args): """:topic: String. The topic of the changeset""" - return args['ctx'].topic() + return args[b'ctx'].topic() def topicidxkw(**args): """:topicidx: Integer. Index of the changeset as a stack alias""" - return args['ctx'].topicidx() + return args[b'ctx'].topicidx() def wrapinit(orig, self, repo, *args, **kwargs): orig(self, repo, *args, **kwargs) if not hastopicext(repo): return if constants.extrakey not in self._extra: - if getattr(repo, 'currenttopic', ''): + if getattr(repo, 'currenttopic', b''): self._extra[constants.extrakey] = repo.currenttopic else: # Empty key will be dropped from extra by another hack at the changegroup level - self._extra[constants.extrakey] = '' + self._extra[constants.extrakey] = b'' def wrapadd(orig, cl, manifest, files, desc, transaction, p1, p2, user, date=None, extra=None, p1copies=None, p2copies=None, @@ -679,13 +679,13 @@ age = opts.get('age') if current and topic: - raise error.Abort(_("cannot use --current when setting a topic")) + raise error.Abort(_(b"cannot use --current when setting a topic")) if current and clear: - raise error.Abort(_("cannot use --current and --clear")) + raise error.Abort(_(b"cannot use --current and --clear")) if clear and topic: - raise error.Abort(_("cannot use --clear when setting a topic")) + raise error.Abort(_(b"cannot use --clear when setting a topic")) if age and topic: - raise error.Abort(_("cannot use --age while setting a topic")) + raise error.Abort(_(b"cannot use --age while setting a topic")) touchedrevs = set() if rev: @@ -694,50 +694,50 @@ if topic: topic = topic.strip() if not topic: - raise error.Abort(_("topic name cannot consist entirely of whitespaces")) + raise error.Abort(_(b"topic name cannot consist entirely of whitespaces")) # Have some restrictions on the topic name just like bookmark name - scmutil.checknewlabel(repo, topic, 'topic') + scmutil.checknewlabel(repo, topic, b'topic') rmatch = re.match(br'[-_.\w]+', topic) if not rmatch or rmatch.group(0) != topic: - helptxt = _("topic names can only consist of alphanumeric, '-'" - " '_' and '.' characters") - raise error.Abort(_("invalid topic name: '%s'") % topic, hint=helptxt) + helptxt = _(b"topic names can only consist of alphanumeric, '-'" + b" '_' and '.' characters") + raise error.Abort(_(b"invalid topic name: '%s'") % topic, hint=helptxt) if list: - ui.pager('topics') + ui.pager(b'topics') if clear or rev: - raise error.Abort(_("cannot use --clear or --rev with --list")) + raise error.Abort(_(b"cannot use --clear or --rev with --list")) if not topic: topic = repo.currenttopic if not topic: - raise error.Abort(_('no active topic to list')) + raise error.Abort(_(b'no active topic to list')) return stack.showstack(ui, repo, topic=topic, opts=pycompat.byteskwargs(opts)) if touchedrevs: if not obsolete.isenabled(repo, obsolete.createmarkersopt): - raise error.Abort(_('must have obsolete enabled to change topics')) + raise error.Abort(_(b'must have obsolete enabled to change topics')) if clear: topic = None elif opts.get('current'): topic = repo.currenttopic elif not topic: - raise error.Abort('changing topic requires a topic name or --clear') - if repo.revs('%ld and public()', touchedrevs): - raise error.Abort("can't change topic of a public change") + raise error.Abort(b'changing topic requires a topic name or --clear') + if repo.revs(b'%ld and public()', touchedrevs): + raise error.Abort(b"can't change topic of a public change") wl = lock = txn = None try: wl = repo.wlock() lock = repo.lock() - txn = repo.transaction('rewrite-topics') + txn = repo.transaction(b'rewrite-topics') rewrote = _changetopics(ui, repo, touchedrevs, topic) txn.close() if topic is None: - ui.status('cleared topic on %d changesets\n' % rewrote) + ui.status(b'cleared topic on %d changesets\n' % rewrote) else: - ui.status('changed topic on %d changesets to "%s"\n' % (rewrote, - topic)) + ui.status(b'changed topic on %d changesets to "%s"\n' % (rewrote, + topic)) finally: lockmod.release(txn, lock, wl) repo.invalidate() @@ -748,37 +748,37 @@ if ct: st = stack.stack(repo, topic=ct) if not st: - ui.status(_('clearing empty topic "%s"\n') % ct) + ui.status(_(b'clearing empty topic "%s"\n') % ct) return _changecurrenttopic(repo, None) if topic: if not ct: - ui.status(_('marked working directory as topic: %s\n') % topic) + ui.status(_(b'marked working directory as topic: %s\n') % topic) return _changecurrenttopic(repo, topic) - ui.pager('topics') + ui.pager(b'topics') # `hg topic --current` ret = 0 if current and not ct: - ui.write_err(_('no active topic\n')) + ui.write_err(_(b'no active topic\n')) ret = 1 elif current: - fm = ui.formatter('topic', pycompat.byteskwargs(opts)) - namemask = '%s\n' - label = 'topic.active' + fm = ui.formatter(b'topic', pycompat.byteskwargs(opts)) + namemask = b'%s\n' + label = b'topic.active' fm.startitem() - fm.write('topic', namemask, ct, label=label) + fm.write(b'topic', namemask, ct, label=label) fm.end() else: _listtopics(ui, repo, opts) return ret -@command('stack', [ - ('c', 'children', None, - _('display data about children outside of the stack')) +@command(b'stack', [ + (b'c', b'children', None, + _(b'display data about children outside of the stack')) ] + commands.formatteropts, - _('hg stack [TOPIC]')) -def cmdstack(ui, repo, topic='', **opts): + _(b'hg stack [TOPIC]')) +def cmdstack(ui, repo, topic=b'', **opts): """list all changesets in a topic and other information List the current topic by default. @@ -792,15 +792,15 @@ topic = repo.currenttopic if topic is None: branch = repo[None].branch() - ui.pager('stack') + ui.pager(b'stack') return stack.showstack(ui, repo, branch=branch, topic=topic, opts=pycompat.byteskwargs(opts)) -@command('debugcb|debugconvertbookmark', [ - ('b', 'bookmark', '', _('bookmark to convert to topic')), - ('', 'all', None, _('convert all bookmarks to topics')), +@command(b'debugcb|debugconvertbookmark', [ + (b'b', b'bookmark', b'', _(b'bookmark to convert to topic')), + (b'', b'all', None, _(b'convert all bookmarks to topics')), ], - _('[-b BOOKMARK] [--all]')) + _(b'[-b BOOKMARK] [--all]')) def debugconvertbookmark(ui, repo, **opts): """Converts a bookmark to a topic with the same name. """ @@ -809,9 +809,9 @@ convertall = opts.get('all') if convertall and bookmark: - raise error.Abort(_("cannot use '--all' and '-b' together")) + raise error.Abort(_(b"cannot use '--all' and '-b' together")) if not (convertall or bookmark): - raise error.Abort(_("you must specify either '--all' or '-b'")) + raise error.Abort(_(b"you must specify either '--all' or '-b'")) bmstore = repo._bookmarks @@ -836,12 +836,12 @@ try: node = bmstore[bookmark] except KeyError: - raise error.Abort(_("no such bookmark exists: '%s'") % bookmark) + raise error.Abort(_(b"no such bookmark exists: '%s'") % bookmark) revnum = repo[node].rev() if len(nodetobook[node]) > 1: - ui.status(_("skipping revision '%d' as it has multiple bookmarks " - "on it\n") % revnum) + ui.status(_(b"skipping revision '%d' as it has multiple bookmarks " + b"on it\n") % revnum) return targetrevs = _findconvertbmarktopic(repo, bookmark) if targetrevs: @@ -853,11 +853,11 @@ if revnum in skipped: continue if len(nodetobook[revnode]) > 1: - ui.status(_("skipping '%d' as it has multiple bookmarks on" - " it\n") % revnum) + ui.status(_(b"skipping '%d' as it has multiple bookmarks on" + b" it\n") % revnum) skipped.append(revnum) continue - if bmark == '@': + if bmark == b'@': continue targetrevs = _findconvertbmarktopic(repo, bmark) if targetrevs: @@ -865,7 +865,7 @@ if actions: try: - tr = repo.transaction('debugconvertbookmark') + tr = repo.transaction(b'debugconvertbookmark') for ((bmark, revnum), targetrevs) in sorted(actions.items()): _applyconvertbmarktopic(ui, repo, targetrevs, revnum, bmark, tr) tr.close() @@ -875,7 +875,7 @@ lockmod.release(lock, wlock) # inspired from mercurial.repair.stripbmrevset -CONVERTBOOKREVSET = """ +CONVERTBOOKREVSET = b""" not public() and ( ancestors(bookmark(%s)) and not ancestors( @@ -913,9 +913,9 @@ # changeset if rewrote == 0: return - ui.status(_('changed topic to "%s" on %d revisions\n') % (bmark, + ui.status(_(b'changed topic to "%s" on %d revisions\n') % (bmark, rewrote)) - ui.debug('removing bookmark "%s" from "%d"' % (bmark, old)) + ui.debug(b'removing bookmark "%s" from "%d"' % (bmark, old)) bookmarks.delete(repo, tr, [bmark]) def _changecurrenttopic(repo, newtopic): @@ -923,11 +923,11 @@ if newtopic: with repo.wlock(): - with repo.vfs.open('topic', 'w') as f: + with repo.vfs.open(b'topic', b'w') as f: f.write(newtopic) else: - if repo.vfs.exists('topic'): - repo.vfs.unlink('topic') + if repo.vfs.exists(b'topic'): + repo.vfs.unlink(b'topic') def _changetopics(ui, repo, revs, newtopic): """ Changes topic to newtopic of all the revisions in the revset and return @@ -946,8 +946,8 @@ except error.ManifestLookupError: return None fixedextra = dict(c.extra()) - ui.debug('old node id is %s\n' % node.hex(c.node())) - ui.debug('origextra: %r\n' % fixedextra) + ui.debug(b'old node id is %s\n' % node.hex(c.node())) + ui.debug(b'origextra: %r\n' % fixedextra) oldtopic = fixedextra.get(constants.extrakey, None) if oldtopic == newtopic: continue @@ -956,16 +956,16 @@ else: fixedextra[constants.extrakey] = newtopic fixedextra[constants.changekey] = c.hex() - if 'amend_source' in fixedextra: + if b'amend_source' in fixedextra: # TODO: right now the commitctx wrapper in # topicrepo overwrites the topic in extra if # amend_source is set to support 'hg commit # --amend'. Support for amend should be adjusted # to not be so invasive. - del fixedextra['amend_source'] - ui.debug('changing topic of %s from %s to %s\n' % ( - c, oldtopic or '<none>', newtopic or '<none>')) - ui.debug('fixedextra: %r\n' % fixedextra) + del fixedextra[b'amend_source'] + ui.debug(b'changing topic of %s from %s to %s\n' % ( + c, oldtopic or b'<none>', newtopic or b'<none>')) + ui.debug(b'fixedextra: %r\n' % fixedextra) # While changing topic of set of linear commits, make sure that # we base our commits on new parent rather than old parent which # was obsoleted while changing the topic @@ -986,18 +986,18 @@ # phase handling commitphase = c.phase() - overrides = {('phases', 'new-commit'): commitphase} - with repo.ui.configoverride(overrides, 'changetopic'): + overrides = {(b'phases', b'new-commit'): commitphase} + with repo.ui.configoverride(overrides, b'changetopic'): newnode = repo.commitctx(mc) successors[c.node()] = (newnode,) - ui.debug('new node id is %s\n' % node.hex(newnode)) + ui.debug(b'new node id is %s\n' % node.hex(newnode)) rewrote += 1 # create obsmarkers and move bookmarks # XXX we should be creating marker as we go instead of only at the end, # this makes the operations more modulars - scmutil.cleanupnodes(repo, successors, 'changetopics') + scmutil.cleanupnodes(repo, successors, b'changetopics') # move the working copy too wctx = repo[None] @@ -1009,12 +1009,12 @@ return rewrote def _listtopics(ui, repo, opts): - fm = ui.formatter('topics', pycompat.byteskwargs(opts)) + fm = ui.formatter(b'topics', pycompat.byteskwargs(opts)) activetopic = repo.currenttopic - namemask = '%s' + namemask = b'%s' if repo.topics: maxwidth = max(len(t) for t in repo.topics) - namemask = '%%-%is' % maxwidth + namemask = b'%%-%is' % maxwidth if opts.get('age'): # here we sort by age and topic name topicsdata = sorted(_getlasttouched(repo, repo.topics)) @@ -1026,70 +1026,70 @@ ) for age, topic, date, user in topicsdata: fm.startitem() - marker = ' ' - label = 'topic' + marker = b' ' + label = b'topic' active = (topic == activetopic) if active: - marker = '*' - label = 'topic.active' + marker = b'*' + label = b'topic.active' if not ui.quiet: # registering the active data is made explicitly later - fm.plain(' %s ' % marker, label=label) - fm.write('topic', namemask, topic, label=label) + fm.plain(b' %s ' % marker, label=label) + fm.write(b'topic', namemask, topic, label=label) fm.data(active=active) if ui.quiet: - fm.plain('\n') + fm.plain(b'\n') continue - fm.plain(' (') + fm.plain(b' (') if date: if age == -1: - timestr = 'empty and active' + timestr = b'empty and active' else: timestr = templatefilters.age(date) - fm.write('lasttouched', '%s', timestr, label='topic.list.time') + fm.write(b'lasttouched', b'%s', timestr, label=b'topic.list.time') if user: - fm.write('usertouched', ' by %s', user, label='topic.list.user') + fm.write(b'usertouched', b' by %s', user, label=b'topic.list.user') if date: - fm.plain(', ') + fm.plain(b', ') data = stack.stack(repo, topic=topic) if ui.verbose: - fm.write('branches+', 'on branch: %s', - '+'.join(data.branches), # XXX use list directly after 4.0 is released - label='topic.list.branches') + fm.write(b'branches+', b'on branch: %s', + b'+'.join(data.branches), # XXX use list directly after 4.0 is released + label=b'topic.list.branches') - fm.plain(', ') - fm.write('changesetcount', '%d changesets', data.changesetcount, - label='topic.list.changesetcount') + fm.plain(b', ') + fm.write(b'changesetcount', b'%d changesets', data.changesetcount, + label=b'topic.list.changesetcount') if data.unstablecount: - fm.plain(', ') - fm.write('unstablecount', '%d unstable', + fm.plain(b', ') + fm.write(b'unstablecount', b'%d unstable', data.unstablecount, - label='topic.list.unstablecount') + label=b'topic.list.unstablecount') headcount = len(data.heads) if 1 < headcount: - fm.plain(', ') - fm.write('headcount', '%d heads', + fm.plain(b', ') + fm.write(b'headcount', b'%d heads', headcount, - label='topic.list.headcount.multiple') + label=b'topic.list.headcount.multiple') if ui.verbose: # XXX we should include the data even when not verbose behindcount = data.behindcount if 0 < behindcount: - fm.plain(', ') - fm.write('behindcount', '%d behind', + fm.plain(b', ') + fm.write(b'behindcount', b'%d behind', behindcount, - label='topic.list.behindcount') + label=b'topic.list.behindcount') elif -1 == behindcount: - fm.plain(', ') - fm.write('behinderror', '%s', - _('ambiguous destination: %s') % data.behinderror, - label='topic.list.behinderror') - fm.plain(')\n') + fm.plain(b', ') + fm.write(b'behinderror', b'%s', + _(b'ambiguous destination: %s') % data.behinderror, + label=b'topic.list.behinderror') + fm.plain(b')\n') fm.end() def _getlasttouched(repo, topics): @@ -1102,7 +1102,7 @@ age = -1 user = None maxtime = (0, 0) - trevs = repo.revs("topic(%s)", topic) + trevs = repo.revs(b"topic(%s)", topic) # Need to check for the time of all changesets in the topic, whether # they are obsolete of non-heads # XXX: can we just rely on the max rev number for this @@ -1119,7 +1119,7 @@ for marker in obsmarkers: rt = marker.date() if rt[0] > maxtime[0]: - user = marker.metadata().get('user', user) + user = marker.metadata().get(b'user', user) maxtime = rt username = stack.parseusername(user) @@ -1129,31 +1129,31 @@ yield (age, topic, maxtime, username) def summaryhook(ui, repo): - t = getattr(repo, 'currenttopic', '') + t = getattr(repo, 'currenttopic', b'') if not t: return # i18n: column positioning for "hg summary" - ui.write(_("topic: %s\n") % ui.label(t, 'topic.active')) + ui.write(_(b"topic: %s\n") % ui.label(t, b'topic.active')) _validmode = [ - 'ignore', - 'warning', - 'enforce', - 'enforce-all', - 'random', - 'random-all', + b'ignore', + b'warning', + b'enforce', + b'enforce-all', + b'random', + b'random-all', ] def _configtopicmode(ui): """ Parse the config to get the topicmode """ - topicmode = ui.config('experimental', 'topic-mode') + topicmode = ui.config(b'experimental', b'topic-mode') # Fallback to read enforce-topic if topicmode is None: - enforcetopic = ui.configbool('experimental', 'enforce-topic') + enforcetopic = ui.configbool(b'experimental', b'enforce-topic') if enforcetopic: - topicmode = "enforce" + topicmode = b"enforce" if topicmode not in _validmode: topicmode = _validmode[0] @@ -1167,37 +1167,37 @@ ismergecommit = len(repo[None].parents()) == 2 notopic = not repo.currenttopic - mayabort = (topicmode == "enforce" and not ismergecommit) - maywarn = (topicmode == "warning" - or (topicmode == "enforce" and ismergecommit)) + mayabort = (topicmode == b"enforce" and not ismergecommit) + maywarn = (topicmode == b"warning" + or (topicmode == b"enforce" and ismergecommit)) mayrandom = False - if topicmode == "random": + if topicmode == b"random": mayrandom = not ismergecommit - elif topicmode == "random-all": + elif topicmode == b"random-all": mayrandom = True - if topicmode == 'enforce-all': + if topicmode == b'enforce-all': ismergecommit = False mayabort = True maywarn = False - hint = _("see 'hg help -e topic.topic-mode' for details") + hint = _(b"see 'hg help -e topic.topic-mode' for details") if opts.get('topic'): t = opts['topic'] - with repo.vfs.open('topic', 'w') as f: + with repo.vfs.open(b'topic', b'w') as f: f.write(t) elif opts.get('amend'): pass elif notopic and mayabort: - msg = _("no active topic") + msg = _(b"no active topic") raise error.Abort(msg, hint=hint) elif notopic and maywarn: - ui.warn(_("warning: new draft commit without topic\n")) + ui.warn(_(b"warning: new draft commit without topic\n")) if not ui.quiet: - ui.warn(("(%s)\n") % hint) + ui.warn((b"(%s)\n") % hint) elif notopic and mayrandom: - with repo.vfs.open('topic', 'w') as f: + with repo.vfs.open(b'topic', b'w') as f: f.write(randomname.randomtopicname(ui)) return orig(ui, repo, *args, **opts) @@ -1206,13 +1206,13 @@ if hastopicext(repo): t = repo.currenttopic if t: - ret = ret.replace("\nHG: branch", - "\nHG: topic '%s'\nHG: branch" % t) + ret = ret.replace(b"\nHG: branch", + b"\nHG: topic '%s'\nHG: branch" % t) return ret def pushoutgoingwrap(orig, ui, repo, *args, **opts): if opts.get('topic'): - topicrevs = repo.revs('topic(%s) - obsolete()', opts['topic']) + topicrevs = repo.revs(b'topic(%s) - obsolete()', opts['topic']) opts.setdefault('rev', []).extend(topicrevs) return orig(ui, repo, *args, **opts) @@ -1232,37 +1232,37 @@ # rebased commit. We have explicitly stored in config if rebase is # running. ot = repo.currenttopic - if repo.ui.hasconfig('experimental', 'topicrebase'): + if repo.ui.hasconfig(b'experimental', b'topicrebase'): isrebase = True - if repo.ui.configbool('_internal', 'keep-topic'): + if repo.ui.configbool(b'_internal', b'keep-topic'): ist0 = True if ((not partial and not branchmerge) or isrebase) and not ist0: - t = '' + t = b'' pctx = repo[node] if pctx.phase() > phases.public: t = pctx.topic() - with repo.vfs.open('topic', 'w') as f: + with repo.vfs.open(b'topic', b'w') as f: f.write(t) if t and t != ot: - repo.ui.status(_("switching to topic %s\n") % t) + repo.ui.status(_(b"switching to topic %s\n") % t) if ot and not t: st = stack.stack(repo, topic=ot) if not st: - repo.ui.status(_('clearing empty topic "%s"\n') % ot) + repo.ui.status(_(b'clearing empty topic "%s"\n') % ot) elif ist0: - repo.ui.status(_("preserving the current topic '%s'\n") % ot) + repo.ui.status(_(b"preserving the current topic '%s'\n") % ot) return ret finally: wlock.release() def checkt0(orig, ui, repo, node=None, rev=None, *args, **kwargs): - thezeros = set(['t0', 'b0', 's0']) - backup = repo.ui.backupconfig('_internal', 'keep-topic') + thezeros = set([b't0', b'b0', b's0']) + backup = repo.ui.backupconfig(b'_internal', b'keep-topic') try: if node in thezeros or rev in thezeros: - repo.ui.setconfig('_internal', 'keep-topic', 'yes', - source='topic-extension') + repo.ui.setconfig(b'_internal', b'keep-topic', b'yes', + source=b'topic-extension') return orig(ui, repo, node=node, rev=rev, *args, **kwargs) finally: repo.ui.restoreconfig(backup) @@ -1276,8 +1276,8 @@ extra[constants.extrakey] = ctx.topic() def setrebaseconfig(orig, ui, repo, **opts): - repo.ui.setconfig('experimental', 'topicrebase', 'yes', - source='topic-extension') + repo.ui.setconfig(b'experimental', b'topicrebase', b'yes', + source=b'topic-extension') return orig(ui, repo, **opts) def new_init(orig, *args, **kwargs): @@ -1289,12 +1289,12 @@ return runtime try: - rebase = extensions.find("rebase") + rebase = extensions.find(b"rebase") extensions.wrapfunction(rebase.rebaseruntime, '__init__', new_init) # This exists to store in the config that rebase is running so that we can # update the topic according to rebase. This is a hack and should be removed # when we have better options. - extensions.wrapcommand(rebase.cmdtable, 'rebase', setrebaseconfig) + extensions.wrapcommand(rebase.cmdtable, b'rebase', setrebaseconfig) except KeyError: pass @@ -1303,20 +1303,20 @@ def _exporttopic(seq, ctx): topic = ctx.topic() if topic: - return 'EXP-Topic %s' % topic + return b'EXP-Topic %s' % topic return None def _importtopic(repo, patchdata, extra, opts): - if 'topic' in patchdata: - extra['topic'] = patchdata['topic'] + if b'topic' in patchdata: + extra[b'topic'] = patchdata[b'topic'] def setupimportexport(ui): """run at ui setup time to install import/export logic""" - cmdutil.extraexport.append('topic') - cmdutil.extraexportmap['topic'] = _exporttopic - cmdutil.extrapreimport.append('topic') - cmdutil.extrapreimportmap['topic'] = _importtopic - patch.patchheadermap.append(('EXP-Topic', 'topic')) + cmdutil.extraexport.append(b'topic') + cmdutil.extraexportmap[b'topic'] = _exporttopic + cmdutil.extrapreimport.append(b'topic') + cmdutil.extrapreimportmap[b'topic'] = _importtopic + patch.patchheadermap.append((b'EXP-Topic', b'topic')) ## preserve topic during split
--- a/hgext3rd/topic/compat.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/compat.py Tue Sep 24 12:42:27 2019 +0200 @@ -30,5 +30,7 @@ def branchmapitems(branchmap): return branchmap.items() else: + # py3-transform: off def branchmapitems(branchmap): return branchmap.iteritems() + # py3-transform: on
--- a/hgext3rd/topic/constants.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/constants.py Tue Sep 24 12:42:27 2019 +0200 @@ -1,2 +1,2 @@ -extrakey = 'topic' -changekey = '_rewrite_noise' +extrakey = b'topic' +changekey = b'_rewrite_noise'
--- a/hgext3rd/topic/destination.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/destination.py Tue Sep 24 12:42:27 2019 +0200 @@ -13,11 +13,11 @@ ) from .evolvebits import builddependencies -def _destmergebranch(orig, repo, action='merge', sourceset=None, +def _destmergebranch(orig, repo, action=b'merge', sourceset=None, onheadcheck=True, destspace=None): # XXX: take destspace into account if sourceset is None: - p1 = repo['.'] + p1 = repo[b'.'] else: # XXX: using only the max here is flacky. That code should eventually # be updated to take care of the whole sourceset. @@ -26,11 +26,11 @@ if common.hastopicext(repo): top = p1.topic() if top: - revs = repo.revs('topic(%s) - obsolete()', top) + revs = repo.revs(b'topic(%s) - obsolete()', top) deps, rdeps = builddependencies(repo, revs) heads = [r for r in revs if not rdeps[r]] if onheadcheck and p1.rev() not in heads: - raise error.Abort(_("not at topic head, update or explicit")) + raise error.Abort(_(b"not at topic head, update or explicit")) # prune heads above the source otherheads = set(heads) @@ -43,20 +43,20 @@ # nothing to do at the topic level bhead = ngtip(repo, p1.branch(), all=True) if not bhead: - raise error.NoMergeDestAbort(_("nothing to merge")) + raise error.NoMergeDestAbort(_(b"nothing to merge")) elif 1 == len(bhead): return bhead[0] else: - msg = _("branch '%s' has %d heads " - "- please merge with an explicit rev") - hint = _("run 'hg heads .' to see heads") + msg = _(b"branch '%s' has %d heads " + b"- please merge with an explicit rev") + hint = _(b"run 'hg heads .' to see heads") raise error.ManyMergeDestAbort(msg % (p1.branch(), len(bhead)), hint=hint) elif len(otherheads) == 1: return otherheads.pop() else: - msg = _("topic '%s' has %d heads " - "- please merge with an explicit rev") % (top, len(heads)) + msg = _(b"topic '%s' has %d heads " + b"- please merge with an explicit rev") % (top, len(heads)) raise error.ManyMergeDestAbort(msg) return orig(repo, action, sourceset, onheadcheck, destspace=destspace) @@ -67,23 +67,23 @@ movemark = node = None topic = repo.currenttopic if topic: - revs = repo.revs('.::topic(%s)', topic) + revs = repo.revs(b'.::topic(%s)', topic) else: revs = [] if not revs: return None, None, None node = revs.last() if bookmarks.isactivewdirparent(repo): - movemark = repo['.'].node() + movemark = repo[b'.'].node() return node, movemark, None def desthistedit(orig, ui, repo): if not common.hastopicext(repo): return None - if not (ui.config('histedit', 'defaultrev', None) is None + if not (ui.config(b'histedit', b'defaultrev', None) is None and repo.currenttopic): return orig(ui, repo) - revs = repo.revs('::. and stack()') + revs = repo.revs(b'::. and stack()') if revs: return revs.min() return None @@ -107,7 +107,7 @@ def modsetup(ui): """run a uisetup time to install all destinations wrapping""" extensions.wrapfunction(destutil, '_destmergebranch', _destmergebranch) - bridx = destutil.destupdatesteps.index('branch') - destutil.destupdatesteps.insert(bridx, 'topic') - destutil.destupdatestepmap['topic'] = _destupdatetopic + bridx = destutil.destupdatesteps.index(b'branch') + destutil.destupdatesteps.insert(bridx, b'topic') + destutil.destupdatestepmap[b'topic'] = _destupdatetopic extensions.wrapfunction(destutil, 'desthistedit', desthistedit)
--- a/hgext3rd/topic/discovery.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/discovery.py Tue Sep 24 12:42:27 2019 +0200 @@ -28,12 +28,12 @@ repo = pushop.repo.unfiltered() remote = pushop.remote - publishing = ('phases' not in remote.listkeys('namespaces') - or bool(remote.listkeys('phases').get('publishing', False))) + publishing = (b'phases' not in remote.listkeys(b'namespaces') + or bool(remote.listkeys(b'phases').get(b'publishing', False))) if not common.hastopicext(pushop.repo): return orig(pushop, *args, **kwargs) - elif ((publishing or not remote.capable('topics')) + elif ((publishing or not remote.capable(b'topics')) and not getattr(pushop, 'publish', False)): return orig(pushop, *args, **kwargs) @@ -41,7 +41,7 @@ remotebranchmap = None origremotebranchmap = remote.branchmap publishednode = [c.node() for c in pushop.outdatedphases] - publishedset = repo.revs('ancestors(%ln + %ln)', + publishedset = repo.revs(b'ancestors(%ln + %ln)', publishednode, pushop.remotephases.publicheads) @@ -51,17 +51,17 @@ # drop topic information from changeset about to be published result = collections.defaultdict(list) for branch, heads in compat.branchmapitems(origremotebranchmap()): - if ':' not in branch: + if b':' not in branch: result[branch].extend(heads) else: - namedbranch = branch.split(':', 1)[0] + namedbranch = branch.split(b':', 1)[0] for h in heads: r = rev(h) if r is not None and r in publishedset: result[namedbranch].append(h) else: result[branch].append(h) - for heads in result.itervalues(): + for heads in result.values(): heads.sort() return result @@ -78,7 +78,7 @@ return branch topic = ctx.topic() if topic: - branch = "%s:%s" % (branch, topic) + branch = b"%s:%s" % (branch, topic) return branch ctx.branch = branch @@ -96,7 +96,7 @@ return branch, close topic = repo[rev].topic() if topic: - branch = "%s:%s" % (branch, topic) + branch = b"%s:%s" % (branch, topic) return branch, close rbc.branchinfo = branchinfo @@ -107,17 +107,17 @@ repo.__class__ = repocls if remotebranchmap is not None: remote.branchmap = remotebranchmap - unxx = repo.filtered('unfiltered-topic') + unxx = repo.filtered(b'unfiltered-topic') repo.unfiltered = lambda: unxx pushop.repo = repo summary = orig(pushop) for key, value in summary.items(): - if ':' in key: # This is a topic + if b':' in key: # This is a topic if value[0] is None and value[1]: summary[key] = ([value[1][0]], ) + value[1:] return summary finally: - if 'unfiltered' in vars(repo): + if r'unfiltered' in vars(repo): del repo.unfiltered repo.__class__ = oldrepocls if remotebranchmap is not None: @@ -147,7 +147,7 @@ def _nbheads(repo): data = {} for b in repo.branchmap().iterbranches(): - if ':' in b[0]: + if b':' in b[0]: continue data[b[0]] = len(b[1]) return data @@ -158,7 +158,7 @@ if not common.hastopicext(op.repo) or op.repo.publishing(): return tr = op.gettransaction() - if tr.hookargs['source'] not in ('push', 'serve'): # not a push + if tr.hookargs[b'source'] not in (b'push', b'serve'): # not a push return tr._prepushheads = _nbheads(op.repo) reporef = weakref.ref(op.repo) @@ -175,11 +175,11 @@ for branch, oldnb in tr._prepushheads.items(): newnb = finalheads.pop(branch, 0) if oldnb < newnb: - msg = _('push create a new head on branch "%s"' % branch) + msg = _(b'push create a new head on branch "%s"' % branch) raise error.Abort(msg) for branch, newnb in finalheads.items(): if 1 < newnb: - msg = _('push create more than 1 head on new branch "%s"' + msg = _(b'push create more than 1 head on new branch "%s"' % branch) raise error.Abort(msg) return oldvalidator(tr) @@ -191,7 +191,7 @@ def _pushb2phases(orig, pushop, bundler): if common.hastopicext(pushop.repo): - checktypes = ('check:heads', 'check:updated-heads') + checktypes = (b'check:heads', b'check:updated-heads') hascheck = any(p.type in checktypes for p in bundler._parts) if not hascheck and pushop.outdatedphases: exchange._pushb2ctxcheckheads(pushop, bundler) @@ -199,8 +199,8 @@ def wireprotocaps(orig, repo, proto): caps = orig(repo, proto) - if common.hastopicext(repo) and repo.peer().capable('topics'): - caps.append('topics') + if common.hastopicext(repo) and repo.peer().capable(b'topics'): + caps.append(b'topics') return caps def modsetup(ui): @@ -211,11 +211,11 @@ # we need a proper wrap b2 part stuff extensions.wrapfunction(bundle2, 'handlecheckheads', handlecheckheads) bundle2.handlecheckheads.params = frozenset() - bundle2.parthandlermapping['check:heads'] = bundle2.handlecheckheads + bundle2.parthandlermapping[b'check:heads'] = bundle2.handlecheckheads if util.safehasattr(bundle2, 'handlecheckupdatedheads'): # we still need a proper wrap b2 part stuff extensions.wrapfunction(bundle2, 'handlecheckupdatedheads', handlecheckheads) bundle2.handlecheckupdatedheads.params = frozenset() - bundle2.parthandlermapping['check:updated-heads'] = bundle2.handlecheckupdatedheads + bundle2.parthandlermapping[b'check:updated-heads'] = bundle2.handlecheckupdatedheads extensions.wrapfunction(exchange, '_pushb2phases', _pushb2phases) - exchange.b2partsgenmapping['phase'] = exchange._pushb2phases + exchange.b2partsgenmapping[b'phase'] = exchange._pushb2phases
--- a/hgext3rd/topic/evolvebits.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/evolvebits.py Tue Sep 24 12:42:27 2019 +0200 @@ -78,8 +78,8 @@ newer = obsutil.successorssets(repo, obs.node()) # search of a parent which is not killed while not newer: - ui.debug("stabilize target %s is plain dead," - " trying to stabilize on its parent\n" % + ui.debug(b"stabilize target %s is plain dead," + b" trying to stabilize on its parent\n" % obs) obs = obs.parents()[0] newer = obsutil.successorssets(repo, obs.node()) @@ -88,7 +88,7 @@ # we should pick as arbitrary one raise MultipleSuccessorsError(newer) elif 1 < len(newer[0]): - splitheads = list(repo.revs('heads(%ln::%ln)', newer[0], newer[0])) + splitheads = list(repo.revs(b'heads(%ln::%ln)', newer[0], newer[0])) if 1 < len(splitheads): # split case, See if we can make sense of it. raise MultipleSuccessorsError(newer)
--- a/hgext3rd/topic/flow.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/flow.py Tue Sep 24 12:42:27 2019 +0200 @@ -16,19 +16,19 @@ ) def enforcesinglehead(repo, tr): - branchmap = repo.filtered('visible').branchmap() + branchmap = repo.filtered(b'visible').branchmap() for name, heads in compat.branchmapitems(branchmap): if len(heads) > 1: hexs = [node.short(n) for n in heads] - raise error.Abort(_('%d heads on "%s"') % (len(heads), name), - hint=(', '.join(hexs))) + raise error.Abort(_(b'%d heads on "%s"') % (len(heads), name), + hint=(b', '.join(hexs))) def publishbarebranch(repo, tr): """Publish changeset without topic""" - if 'node' not in tr.hookargs: # no new node + if b'node' not in tr.hookargs: # no new node return - startnode = node.bin(tr.hookargs['node']) - topublish = repo.revs('not public() and (%n:) - hidden() - topic()', startnode) + startnode = node.bin(tr.hookargs[b'node']) + topublish = repo.revs(b'not public() and (%n:) - hidden() - topic()', startnode) if topublish: cl = repo.changelog nodes = [cl.node(r) for r in topublish] @@ -36,41 +36,41 @@ def rejectuntopicedchangeset(repo, tr): """Reject the push if there are changeset without topic""" - if 'node' not in tr.hookargs: # no new revs + if b'node' not in tr.hookargs: # no new revs return - startnode = node.bin(tr.hookargs['node']) + startnode = node.bin(tr.hookargs[b'node']) - mode = repo.ui.config('experimental', 'topic-mode.server', 'ignore') + mode = repo.ui.config(b'experimental', b'topic-mode.server', b'ignore') - untopiced = repo.revs('not public() and (%n:) - hidden() - topic()', startnode) + untopiced = repo.revs(b'not public() and (%n:) - hidden() - topic()', startnode) if untopiced: num = len(untopiced) fnode = repo[untopiced.first()].hex()[:10] if num == 1: - msg = _("%s") % fnode + msg = _(b"%s") % fnode else: - msg = _("%s and %d more") % (fnode, num - 1) - if mode == 'warning': - fullmsg = _("pushed draft changeset without topic: %s\n") + msg = _(b"%s and %d more") % (fnode, num - 1) + if mode == b'warning': + fullmsg = _(b"pushed draft changeset without topic: %s\n") repo.ui.warn(fullmsg % msg) - elif mode == 'enforce': - fullmsg = _("rejecting draft changesets: %s") + elif mode == b'enforce': + fullmsg = _(b"rejecting draft changesets: %s") raise error.Abort(fullmsg % msg) else: - repo.ui.warn(_("unknown 'topic-mode.server': %s\n" % mode)) + repo.ui.warn(_(b"unknown 'topic-mode.server': %s\n" % mode)) def reject_publish(repo, tr): """prevent a transaction to be publish anything""" published = set() - for r, (o, n) in tr.changes['phases'].items(): + for r, (o, n) in tr.changes[b'phases'].items(): if n == phases.public: published.add(r) if published: r = min(published) - msg = "rejecting publishing of changeset %s" % repo[r] + msg = b"rejecting publishing of changeset %s" % repo[r] if len(published) > 1: - msg += ' and %d others' % (len(published) - 1) + msg += b' and %d others' % (len(published) - 1) raise error.Abort(msg) def wrappush(orig, repo, remote, *args, **kwargs): @@ -80,8 +80,8 @@ opargs = kwargs.get('opargs') if opargs is None: opargs = {} - newargs['opargs'] = opargs.copy() - newargs['opargs']['publish'] = True + newargs[r'opargs'] = opargs.copy() + newargs[r'opargs'][b'publish'] = True return orig(repo, remote, *args, **newargs) def extendpushoperation(orig, self, *args, **kwargs): @@ -95,16 +95,16 @@ if not pushop.remotephases.publishing: unfi = pushop.repo.unfiltered() droots = pushop.remotephases.draftroots - revset = '%ln and (not public() or %ln::)' + revset = b'%ln and (not public() or %ln::)' future = list(unfi.set(revset, pushop.futureheads, droots)) pushop.outdatedphases = future def installpushflag(ui): - entry = extensions.wrapcommand(commands.table, 'push', wrappush) - if not any(opt for opt in entry[1] if opt[1] == 'publish'): # hg <= 4.9 - entry[1].append(('', 'publish', False, - _('push the changeset as public'))) + entry = extensions.wrapcommand(commands.table, b'push', wrappush) + if not any(opt for opt in entry[1] if opt[1] == b'publish'): # hg <= 4.9 + entry[1].append((b'', b'publish', False, + _(b'push the changeset as public'))) extensions.wrapfunction(exchange.pushoperation, '__init__', extendpushoperation) extensions.wrapfunction(exchange, '_pushdiscoveryphase', wrapphasediscovery) - exchange.pushdiscoverymapping['phase'] = exchange._pushdiscoveryphase + exchange.pushdiscoverymapping[b'phase'] = exchange._pushdiscoveryphase
--- a/hgext3rd/topic/randomname.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/randomname.py Tue Sep 24 12:42:27 2019 +0200 @@ -8,1007 +8,1007 @@ import random animals = [ - 'aardvark', - 'albatross', - 'alligator', - 'alpaca', - 'ant', - 'anteater', - 'antelope', - 'ape', - 'armadillo', - 'baboon', - 'badger', - 'barracuda', - 'bat', - 'bear', - 'beaver', - 'bee', - 'beetle', - 'bison', - 'boar', - 'buffalo', - 'bushbaby', - 'bustard', - 'butterfly', - 'camel', - 'capuchin', - 'carabao', - 'caribou', - 'cat', - 'caterpillar', - 'cattle', - 'chameleon', - 'chamois', - 'cheetah', - 'chicken', - 'chimpanzee', - 'chinchilla', - 'chipmunk', - 'chough', - 'cicada', - 'clam', - 'cobra', - 'cockroach', - 'cod', - 'cormorant', - 'coyote', - 'crab', - 'crane', - 'cricket', - 'crocodile', - 'crow', - 'curlew', - 'deer', - 'dinosaur', - 'dog', - 'dogfish', - 'dolphin', - 'donkey', - 'dotterel', - 'dove', - 'dragon', - 'dragonfly', - 'duck', - 'dugong', - 'dunlin', - 'eagle', - 'echidna', - 'eel', - 'eland', - 'elephant', - 'elk', - 'emu', - 'falcon', - 'ferret', - 'finch', - 'fish', - 'flamingo', - 'fly', - 'fox', - 'frog', - 'gaur', - 'gazelle', - 'gecko', - 'gerbil', - 'giraffe', - 'gnat', - 'gnu', - 'goat', - 'goldfish', - 'goose', - 'gorilla', - 'goshawk', - 'grasshopper', - 'grouse', - 'guanaco', - 'guinea', - 'gull', - 'hamster', - 'hare', - 'hawk', - 'hedgehog', - 'heron', - 'herring', - 'hippopotamus', - 'hornet', - 'horse', - 'horsecrab', - 'hound', - 'hummingbird', - 'hyena', - 'hyrax', - 'ibex', - 'ibis', - 'iguana', - 'impala', - 'insect', - 'jackal', - 'jaguar', - 'jay', - 'jellyfish', - 'kangaroo', - 'koala', - 'kouprey', - 'kudu', - 'lapwing', - 'lark', - 'lemming', - 'lemur', - 'leopard', - 'lion', - 'lizard', - 'llama', - 'lobster', - 'locust', - 'loris', - 'louse', - 'lynx', - 'lyrebird', - 'magpie', - 'mallard', - 'mammoth', - 'manatee', - 'marten', - 'meerkat', - 'mink', - 'minnow', - 'mole', - 'mongoose', - 'monkey', - 'moose', - 'mosquito', - 'mouse', - 'mule', - 'muskrat', - 'narwhal', - 'newt', - 'nightingale', - 'numbat', - 'octopus', - 'okapi', - 'opossum', - 'oryx', - 'ostrich', - 'otter', - 'owl', - 'ox', - 'oyster', - 'panda', - 'panther', - 'parrot', - 'partridge', - 'peacock', - 'peafowl', - 'pelican', - 'penguin', - 'pheasant', - 'pig', - 'pigeon', - 'platypus', - 'pony', - 'porcupine', - 'porpoise', - 'puffin', - 'pug', - 'quagga', - 'quail', - 'quelea', - 'rabbit', - 'raccoon', - 'ram', - 'rat', - 'raven', - 'reindeer', - 'rhea', - 'rhinoceros', - 'rook', - 'ruff', - 'salamander', - 'salmon', - 'sambar', - 'sandpiper', - 'sardine', - 'scorpion', - 'seahorse', - 'seal', - 'serval', - 'shark', - 'sheep', - 'shrew', - 'shrimp', - 'skink', - 'skunk', - 'snail', - 'snake', - 'spider', - 'squid', - 'squirrel', - 'starling', - 'stinkbug', - 'stork', - 'swan', - 'tapir', - 'tarsier', - 'termite', - 'tern', - 'tiger', - 'toad', - 'trout', - 'turkey', - 'turtle', - 'unicorn', - 'viper', - 'vulture', - 'wallaby', - 'walrus', - 'wasp', - 'weasel', - 'whale', - 'wolf', - 'wolverine', - 'wombat', - 'woodchuck', - 'woodcock', - 'woodpecker', - 'worm', - 'wren', - 'yak', - 'zebra', - 'zorilla' + b'aardvark', + b'albatross', + b'alligator', + b'alpaca', + b'ant', + b'anteater', + b'antelope', + b'ape', + b'armadillo', + b'baboon', + b'badger', + b'barracuda', + b'bat', + b'bear', + b'beaver', + b'bee', + b'beetle', + b'bison', + b'boar', + b'buffalo', + b'bushbaby', + b'bustard', + b'butterfly', + b'camel', + b'capuchin', + b'carabao', + b'caribou', + b'cat', + b'caterpillar', + b'cattle', + b'chameleon', + b'chamois', + b'cheetah', + b'chicken', + b'chimpanzee', + b'chinchilla', + b'chipmunk', + b'chough', + b'cicada', + b'clam', + b'cobra', + b'cockroach', + b'cod', + b'cormorant', + b'coyote', + b'crab', + b'crane', + b'cricket', + b'crocodile', + b'crow', + b'curlew', + b'deer', + b'dinosaur', + b'dog', + b'dogfish', + b'dolphin', + b'donkey', + b'dotterel', + b'dove', + b'dragon', + b'dragonfly', + b'duck', + b'dugong', + b'dunlin', + b'eagle', + b'echidna', + b'eel', + b'eland', + b'elephant', + b'elk', + b'emu', + b'falcon', + b'ferret', + b'finch', + b'fish', + b'flamingo', + b'fly', + b'fox', + b'frog', + b'gaur', + b'gazelle', + b'gecko', + b'gerbil', + b'giraffe', + b'gnat', + b'gnu', + b'goat', + b'goldfish', + b'goose', + b'gorilla', + b'goshawk', + b'grasshopper', + b'grouse', + b'guanaco', + b'guinea', + b'gull', + b'hamster', + b'hare', + b'hawk', + b'hedgehog', + b'heron', + b'herring', + b'hippopotamus', + b'hornet', + b'horse', + b'horsecrab', + b'hound', + b'hummingbird', + b'hyena', + b'hyrax', + b'ibex', + b'ibis', + b'iguana', + b'impala', + b'insect', + b'jackal', + b'jaguar', + b'jay', + b'jellyfish', + b'kangaroo', + b'koala', + b'kouprey', + b'kudu', + b'lapwing', + b'lark', + b'lemming', + b'lemur', + b'leopard', + b'lion', + b'lizard', + b'llama', + b'lobster', + b'locust', + b'loris', + b'louse', + b'lynx', + b'lyrebird', + b'magpie', + b'mallard', + b'mammoth', + b'manatee', + b'marten', + b'meerkat', + b'mink', + b'minnow', + b'mole', + b'mongoose', + b'monkey', + b'moose', + b'mosquito', + b'mouse', + b'mule', + b'muskrat', + b'narwhal', + b'newt', + b'nightingale', + b'numbat', + b'octopus', + b'okapi', + b'opossum', + b'oryx', + b'ostrich', + b'otter', + b'owl', + b'ox', + b'oyster', + b'panda', + b'panther', + b'parrot', + b'partridge', + b'peacock', + b'peafowl', + b'pelican', + b'penguin', + b'pheasant', + b'pig', + b'pigeon', + b'platypus', + b'pony', + b'porcupine', + b'porpoise', + b'puffin', + b'pug', + b'quagga', + b'quail', + b'quelea', + b'rabbit', + b'raccoon', + b'ram', + b'rat', + b'raven', + b'reindeer', + b'rhea', + b'rhinoceros', + b'rook', + b'ruff', + b'salamander', + b'salmon', + b'sambar', + b'sandpiper', + b'sardine', + b'scorpion', + b'seahorse', + b'seal', + b'serval', + b'shark', + b'sheep', + b'shrew', + b'shrimp', + b'skink', + b'skunk', + b'snail', + b'snake', + b'spider', + b'squid', + b'squirrel', + b'starling', + b'stinkbug', + b'stork', + b'swan', + b'tapir', + b'tarsier', + b'termite', + b'tern', + b'tiger', + b'toad', + b'trout', + b'turkey', + b'turtle', + b'unicorn', + b'viper', + b'vulture', + b'wallaby', + b'walrus', + b'wasp', + b'weasel', + b'whale', + b'wolf', + b'wolverine', + b'wombat', + b'woodchuck', + b'woodcock', + b'woodpecker', + b'worm', + b'wren', + b'yak', + b'zebra', + b'zorilla' ] adjectives = [ - 'abiding', - 'abject', - 'ablaze', - 'able', - 'aboard', - 'abounding', - 'absorbed', - 'absorbing', - 'abstracted', - 'abundant', - 'acceptable', - 'accessible', - 'accurate', - 'acoustic', - 'adamant', - 'adaptable', - 'adhesive', - 'adjoining', - 'adorable', - 'adventurous', - 'affable', - 'affectionate', - 'agreeable', - 'alert', - 'alive', - 'alluring', - 'amazing', - 'ambiguous', - 'ambitious', - 'amiable', - 'amicable', - 'amused', - 'amusing', - 'ancient', - 'animated', - 'apricot', - 'appropriate', - 'aquatic', - 'arctic', - 'arenaceous', - 'aromatic', - 'aspiring', - 'assiduous', - 'assorted', - 'astonishing', - 'attractive', - 'auspicious', - 'automatic', - 'available', - 'average', - 'awake', - 'aware', - 'awesome', - 'axiomatic', - 'bashful', - 'bawdy', - 'beautiful', - 'beefy', - 'befitting', - 'beneficial', - 'benevolent', - 'bent', - 'best', - 'better', - 'bewildered', - 'bewitching', - 'big', - 'billowy', - 'bizarre', - 'black', - 'blithe', - 'blue', - 'blushing', - 'bouncy', - 'boundless', - 'brainy', - 'brash', - 'brave', - 'brawny', - 'brazen', - 'breezy', - 'brief', - 'bright', - 'brilliant', - 'broad', - 'brown', - 'bucolic', - 'bulky', - 'bumpy', - 'burgundy', - 'burly', - 'bustling', - 'busy', - 'calm', - 'capable', - 'capricious', - 'captivating', - 'carefree', - 'careful', - 'caring', - 'carrot', - 'ceaseless', - 'cerise', - 'certain', - 'challenging', - 'changeable', - 'charming', - 'cheerful', - 'chief', - 'chilly', - 'chipper', - 'classy', - 'clean', - 'clear', - 'clever', - 'cloudy', - 'coherent', - 'colorful', - 'colossal', - 'comfortable', - 'common', - 'communicative', - 'compassionate', - 'complete', - 'complex', - 'compulsive', - 'confused', - 'conscientious', - 'conscious', - 'conservative', - 'considerate', - 'convivial', - 'cooing', - 'cool', - 'cooperative', - 'coordinated', - 'courageous', - 'courteous', - 'crazy', - 'creative', - 'crispy', - 'crooked', - 'crowded', - 'cuddly', - 'cultured', - 'cunning', - 'curious', - 'curly', - 'curved', - 'curvy', - 'cut', - 'cute', - 'daily', - 'damp', - 'dapper', - 'dashing', - 'dazzling', - 'dear', - 'debonair', - 'decisive', - 'decorous', - 'deep', - 'defiant', - 'delicate', - 'delicious', - 'delighted', - 'delightful', - 'delirious', - 'descriptive', - 'detached', - 'detailed', - 'determined', - 'different', - 'diligent', - 'diminutive', - 'diplomatic', - 'discreet', - 'distinct', - 'distinctive', - 'dramatic', - 'dry', - 'dynamic', - 'dynamite', - 'eager', - 'early', - 'earthy', - 'easy', - 'easygoing', - 'eatable', - 'economic', - 'ecstatic', - 'educated', - 'efficacious', - 'efficient', - 'effortless', - 'eight', - 'elastic', - 'elated', - 'electric', - 'elegant', - 'elfin', - 'elite', - 'eminent', - 'emotional', - 'enchanted', - 'enchanting', - 'encouraging', - 'endless', - 'energetic', - 'enormous', - 'entertaining', - 'enthusiastic', - 'envious', - 'epicurean', - 'equable', - 'equal', - 'eternal', - 'ethereal', - 'evanescent', - 'even', - 'excellent', - 'excited', - 'exciting', - 'exclusive', - 'exotic', - 'expensive', - 'exquisite', - 'extroverted', - 'exuberant', - 'exultant', - 'fabulous', - 'fair', - 'faithful', - 'familiar', - 'famous', - 'fancy', - 'fantastic', - 'far', - 'fascinated', - 'fast', - 'fearless', - 'female', - 'fertile', - 'festive', - 'few', - 'fine', - 'first', - 'five', - 'fixed', - 'flamboyant', - 'flashy', - 'flat', - 'flawless', - 'flirtatious', - 'florid', - 'flowery', - 'fluffy', - 'fluttering', - 'foamy', - 'foolish', - 'foregoing', - 'fortunate', - 'four', - 'frank', - 'free', - 'frequent', - 'fresh', - 'friendly', - 'full', - 'functional', - 'funny', - 'furry', - 'future', - 'futuristic', - 'fuzzy', - 'gabby', - 'gainful', - 'garrulous', - 'general', - 'generous', - 'gentle', - 'giant', - 'giddy', - 'gifted', - 'gigantic', - 'gilded', - 'glamorous', - 'gleaming', - 'glorious', - 'glossy', - 'glowing', - 'godly', - 'good', - 'goofy', - 'gorgeous', - 'graceful', - 'grandiose', - 'grateful', - 'gratis', - 'gray', - 'great', - 'green', - 'gregarious', - 'grey', - 'groovy', - 'guiltless', - 'gusty', - 'guttural', - 'habitual', - 'half', - 'hallowed', - 'halting', - 'handsome', - 'happy', - 'hard', - 'hardworking', - 'harmonious', - 'heady', - 'healthy', - 'heavenly', - 'helpful', - 'hilarious', - 'historical', - 'holistic', - 'hollow', - 'honest', - 'honorable', - 'hopeful', - 'hospitable', - 'hot', - 'huge', - 'humorous', - 'hungry', - 'hushed', - 'hypnotic', - 'illustrious', - 'imaginary', - 'imaginative', - 'immense', - 'imminent', - 'impartial', - 'important', - 'imported', - 'impossible', - 'incandescent', - 'inconclusive', - 'incredible', - 'independent', - 'industrious', - 'inexpensive', - 'innate', - 'innocent', - 'inquisitive', - 'instinctive', - 'intellectual', - 'intelligent', - 'intense', - 'interesting', - 'internal', - 'intuitive', - 'inventive', - 'invincible', - 'jazzy', - 'jolly', - 'joyful', - 'joyous', - 'judicious', - 'juicy', - 'jumpy', - 'keen', - 'kind', - 'kindhearted', - 'kindly', - 'knotty', - 'knowing', - 'knowledgeable', - 'known', - 'laconic', - 'large', - 'lavish', - 'lean', - 'learned', - 'left', - 'legal', - 'level', - 'light', - 'likeable', - 'literate', - 'little', - 'lively', - 'living', - 'long', - 'longing', - 'loud', - 'lovely', - 'loving', - 'loyal', - 'lucky', - 'luminous', - 'lush', - 'luxuriant', - 'luxurious', - 'lyrical', - 'magenta', - 'magical', - 'magnificent', - 'majestic', - 'male', - 'mammoth', - 'many', - 'marvelous', - 'massive', - 'material', - 'mature', - 'meandering', - 'meaty', - 'medical', - 'mellow', - 'melodic', - 'melted', - 'merciful', - 'mighty', - 'miniature', - 'miniscule', - 'minor', - 'minute', - 'misty', - 'modern', - 'modest', - 'momentous', - 'motionless', - 'mountainous', - 'mute', - 'mysterious', - 'narrow', - 'natural', - 'near', - 'neat', - 'nebulous', - 'necessary', - 'neighborly', - 'new', - 'next', - 'nice', - 'nifty', - 'nimble', - 'nine', - 'nippy', - 'noiseless', - 'noisy', - 'nonchalant', - 'normal', - 'numberless', - 'numerous', - 'nutritious', - 'obedient', - 'observant', - 'obtainable', - 'oceanic', - 'omniscient', - 'one', - 'open', - 'opposite', - 'optimal', - 'optimistic', - 'opulent', - 'orange', - 'ordinary', - 'organic', - 'outgoing', - 'outrageous', - 'outstanding', - 'oval', - 'overjoyed', - 'overt', - 'palatial', - 'panoramic', - 'parallel', - 'passionate', - 'past', - 'pastoral', - 'patient', - 'peaceful', - 'perfect', - 'periodic', - 'permissible', - 'perpetual', - 'persistent', - 'petite', - 'philosophical', - 'physical', - 'picturesque', - 'pink', - 'pioneering', - 'piquant', - 'plausible', - 'pleasant', - 'plucky', - 'poised', - 'polite', - 'possible', - 'powerful', - 'practical', - 'precious', - 'premium', - 'present', - 'pretty', - 'previous', - 'private', - 'probable', - 'productive', - 'profound', - 'profuse', - 'protective', - 'proud', - 'psychedelic', - 'public', - 'pumped', - 'purple', - 'purring', - 'puzzled', - 'puzzling', - 'quaint', - 'quick', - 'quicker', - 'quickest', - 'quiet', - 'quirky', - 'quixotic', - 'quizzical', - 'rainy', - 'rapid', - 'rare', - 'rational', - 'ready', - 'real', - 'rebel', - 'receptive', - 'red', - 'reflective', - 'regular', - 'relaxed', - 'reliable', - 'relieved', - 'remarkable', - 'reminiscent', - 'reserved', - 'resolute', - 'resonant', - 'resourceful', - 'responsible', - 'rich', - 'ridiculous', - 'right', - 'rightful', - 'ripe', - 'ritzy', - 'roasted', - 'robust', - 'romantic', - 'roomy', - 'round', - 'royal', - 'ruddy', - 'rural', - 'rustic', - 'sable', - 'safe', - 'salty', - 'same', - 'satisfying', - 'savory', - 'scientific', - 'scintillating', - 'scrumptious', - 'second', - 'secret', - 'secretive', - 'seemly', - 'selective', - 'sensible', - 'separate', - 'shaggy', - 'shaky', - 'shining', - 'shiny', - 'short', - 'shy', - 'silent', - 'silky', - 'silly', - 'simple', - 'simplistic', - 'sincere', - 'six', - 'sizzling', - 'skillful', - 'sleepy', - 'slick', - 'slim', - 'smart', - 'smiling', - 'smooth', - 'soaring', - 'sociable', - 'soft', - 'solid', - 'sophisticated', - 'sparkling', - 'special', - 'spectacular', - 'speedy', - 'spicy', - 'spiffy', - 'spiritual', - 'splendid', - 'spooky', - 'spotless', - 'spotted', - 'square', - 'standing', - 'statuesque', - 'steadfast', - 'steady', - 'steep', - 'stimulating', - 'straight', - 'straightforward', - 'striking', - 'striped', - 'strong', - 'stunning', - 'stupendous', - 'sturdy', - 'subsequent', - 'substantial', - 'subtle', - 'successful', - 'succinct', - 'sudden', - 'super', - 'superb', - 'supreme', - 'swanky', - 'sweet', - 'swift', - 'sympathetic', - 'synonymous', - 'talented', - 'tall', - 'tame', - 'tan', - 'tangible', - 'tangy', - 'tasteful', - 'tasty', - 'telling', - 'temporary', - 'tempting', - 'ten', - 'tender', - 'terrific', - 'tested', - 'thankful', - 'therapeutic', - 'thin', - 'thinkable', - 'third', - 'thoughtful', - 'three', - 'thrifty', - 'tidy', - 'tiny', - 'toothsome', - 'towering', - 'tranquil', - 'tremendous', - 'tricky', - 'true', - 'truthful', - 'two', - 'typical', - 'ubiquitous', - 'ultra', - 'unassuming', - 'unbiased', - 'uncovered', - 'understanding', - 'understood', - 'unequaled', - 'unique', - 'unusual', - 'unwritten', - 'upbeat', - 'useful', - 'utopian', - 'utter', - 'uttermost', - 'valuable', - 'various', - 'vast', - 'verdant', - 'vermilion', - 'versatile', - 'versed', - 'victorious', - 'vigorous', - 'violet', - 'vivacious', - 'voiceless', - 'voluptuous', - 'wacky', - 'waiting', - 'wakeful', - 'wandering', - 'warm', - 'warmhearted', - 'wealthy', - 'whimsical', - 'whispering', - 'white', - 'whole', - 'wholesale', - 'whopping', - 'wide', - 'wiggly', - 'wild', - 'willing', - 'windy', - 'winsome', - 'wiry', - 'wise', - 'wistful', - 'witty', - 'womanly', - 'wonderful', - 'workable', - 'young', - 'youthful', - 'yummy', - 'zany', - 'zealous', - 'zesty', - 'zippy' + b'abiding', + b'abject', + b'ablaze', + b'able', + b'aboard', + b'abounding', + b'absorbed', + b'absorbing', + b'abstracted', + b'abundant', + b'acceptable', + b'accessible', + b'accurate', + b'acoustic', + b'adamant', + b'adaptable', + b'adhesive', + b'adjoining', + b'adorable', + b'adventurous', + b'affable', + b'affectionate', + b'agreeable', + b'alert', + b'alive', + b'alluring', + b'amazing', + b'ambiguous', + b'ambitious', + b'amiable', + b'amicable', + b'amused', + b'amusing', + b'ancient', + b'animated', + b'apricot', + b'appropriate', + b'aquatic', + b'arctic', + b'arenaceous', + b'aromatic', + b'aspiring', + b'assiduous', + b'assorted', + b'astonishing', + b'attractive', + b'auspicious', + b'automatic', + b'available', + b'average', + b'awake', + b'aware', + b'awesome', + b'axiomatic', + b'bashful', + b'bawdy', + b'beautiful', + b'beefy', + b'befitting', + b'beneficial', + b'benevolent', + b'bent', + b'best', + b'better', + b'bewildered', + b'bewitching', + b'big', + b'billowy', + b'bizarre', + b'black', + b'blithe', + b'blue', + b'blushing', + b'bouncy', + b'boundless', + b'brainy', + b'brash', + b'brave', + b'brawny', + b'brazen', + b'breezy', + b'brief', + b'bright', + b'brilliant', + b'broad', + b'brown', + b'bucolic', + b'bulky', + b'bumpy', + b'burgundy', + b'burly', + b'bustling', + b'busy', + b'calm', + b'capable', + b'capricious', + b'captivating', + b'carefree', + b'careful', + b'caring', + b'carrot', + b'ceaseless', + b'cerise', + b'certain', + b'challenging', + b'changeable', + b'charming', + b'cheerful', + b'chief', + b'chilly', + b'chipper', + b'classy', + b'clean', + b'clear', + b'clever', + b'cloudy', + b'coherent', + b'colorful', + b'colossal', + b'comfortable', + b'common', + b'communicative', + b'compassionate', + b'complete', + b'complex', + b'compulsive', + b'confused', + b'conscientious', + b'conscious', + b'conservative', + b'considerate', + b'convivial', + b'cooing', + b'cool', + b'cooperative', + b'coordinated', + b'courageous', + b'courteous', + b'crazy', + b'creative', + b'crispy', + b'crooked', + b'crowded', + b'cuddly', + b'cultured', + b'cunning', + b'curious', + b'curly', + b'curved', + b'curvy', + b'cut', + b'cute', + b'daily', + b'damp', + b'dapper', + b'dashing', + b'dazzling', + b'dear', + b'debonair', + b'decisive', + b'decorous', + b'deep', + b'defiant', + b'delicate', + b'delicious', + b'delighted', + b'delightful', + b'delirious', + b'descriptive', + b'detached', + b'detailed', + b'determined', + b'different', + b'diligent', + b'diminutive', + b'diplomatic', + b'discreet', + b'distinct', + b'distinctive', + b'dramatic', + b'dry', + b'dynamic', + b'dynamite', + b'eager', + b'early', + b'earthy', + b'easy', + b'easygoing', + b'eatable', + b'economic', + b'ecstatic', + b'educated', + b'efficacious', + b'efficient', + b'effortless', + b'eight', + b'elastic', + b'elated', + b'electric', + b'elegant', + b'elfin', + b'elite', + b'eminent', + b'emotional', + b'enchanted', + b'enchanting', + b'encouraging', + b'endless', + b'energetic', + b'enormous', + b'entertaining', + b'enthusiastic', + b'envious', + b'epicurean', + b'equable', + b'equal', + b'eternal', + b'ethereal', + b'evanescent', + b'even', + b'excellent', + b'excited', + b'exciting', + b'exclusive', + b'exotic', + b'expensive', + b'exquisite', + b'extroverted', + b'exuberant', + b'exultant', + b'fabulous', + b'fair', + b'faithful', + b'familiar', + b'famous', + b'fancy', + b'fantastic', + b'far', + b'fascinated', + b'fast', + b'fearless', + b'female', + b'fertile', + b'festive', + b'few', + b'fine', + b'first', + b'five', + b'fixed', + b'flamboyant', + b'flashy', + b'flat', + b'flawless', + b'flirtatious', + b'florid', + b'flowery', + b'fluffy', + b'fluttering', + b'foamy', + b'foolish', + b'foregoing', + b'fortunate', + b'four', + b'frank', + b'free', + b'frequent', + b'fresh', + b'friendly', + b'full', + b'functional', + b'funny', + b'furry', + b'future', + b'futuristic', + b'fuzzy', + b'gabby', + b'gainful', + b'garrulous', + b'general', + b'generous', + b'gentle', + b'giant', + b'giddy', + b'gifted', + b'gigantic', + b'gilded', + b'glamorous', + b'gleaming', + b'glorious', + b'glossy', + b'glowing', + b'godly', + b'good', + b'goofy', + b'gorgeous', + b'graceful', + b'grandiose', + b'grateful', + b'gratis', + b'gray', + b'great', + b'green', + b'gregarious', + b'grey', + b'groovy', + b'guiltless', + b'gusty', + b'guttural', + b'habitual', + b'half', + b'hallowed', + b'halting', + b'handsome', + b'happy', + b'hard', + b'hardworking', + b'harmonious', + b'heady', + b'healthy', + b'heavenly', + b'helpful', + b'hilarious', + b'historical', + b'holistic', + b'hollow', + b'honest', + b'honorable', + b'hopeful', + b'hospitable', + b'hot', + b'huge', + b'humorous', + b'hungry', + b'hushed', + b'hypnotic', + b'illustrious', + b'imaginary', + b'imaginative', + b'immense', + b'imminent', + b'impartial', + b'important', + b'imported', + b'impossible', + b'incandescent', + b'inconclusive', + b'incredible', + b'independent', + b'industrious', + b'inexpensive', + b'innate', + b'innocent', + b'inquisitive', + b'instinctive', + b'intellectual', + b'intelligent', + b'intense', + b'interesting', + b'internal', + b'intuitive', + b'inventive', + b'invincible', + b'jazzy', + b'jolly', + b'joyful', + b'joyous', + b'judicious', + b'juicy', + b'jumpy', + b'keen', + b'kind', + b'kindhearted', + b'kindly', + b'knotty', + b'knowing', + b'knowledgeable', + b'known', + b'laconic', + b'large', + b'lavish', + b'lean', + b'learned', + b'left', + b'legal', + b'level', + b'light', + b'likeable', + b'literate', + b'little', + b'lively', + b'living', + b'long', + b'longing', + b'loud', + b'lovely', + b'loving', + b'loyal', + b'lucky', + b'luminous', + b'lush', + b'luxuriant', + b'luxurious', + b'lyrical', + b'magenta', + b'magical', + b'magnificent', + b'majestic', + b'male', + b'mammoth', + b'many', + b'marvelous', + b'massive', + b'material', + b'mature', + b'meandering', + b'meaty', + b'medical', + b'mellow', + b'melodic', + b'melted', + b'merciful', + b'mighty', + b'miniature', + b'miniscule', + b'minor', + b'minute', + b'misty', + b'modern', + b'modest', + b'momentous', + b'motionless', + b'mountainous', + b'mute', + b'mysterious', + b'narrow', + b'natural', + b'near', + b'neat', + b'nebulous', + b'necessary', + b'neighborly', + b'new', + b'next', + b'nice', + b'nifty', + b'nimble', + b'nine', + b'nippy', + b'noiseless', + b'noisy', + b'nonchalant', + b'normal', + b'numberless', + b'numerous', + b'nutritious', + b'obedient', + b'observant', + b'obtainable', + b'oceanic', + b'omniscient', + b'one', + b'open', + b'opposite', + b'optimal', + b'optimistic', + b'opulent', + b'orange', + b'ordinary', + b'organic', + b'outgoing', + b'outrageous', + b'outstanding', + b'oval', + b'overjoyed', + b'overt', + b'palatial', + b'panoramic', + b'parallel', + b'passionate', + b'past', + b'pastoral', + b'patient', + b'peaceful', + b'perfect', + b'periodic', + b'permissible', + b'perpetual', + b'persistent', + b'petite', + b'philosophical', + b'physical', + b'picturesque', + b'pink', + b'pioneering', + b'piquant', + b'plausible', + b'pleasant', + b'plucky', + b'poised', + b'polite', + b'possible', + b'powerful', + b'practical', + b'precious', + b'premium', + b'present', + b'pretty', + b'previous', + b'private', + b'probable', + b'productive', + b'profound', + b'profuse', + b'protective', + b'proud', + b'psychedelic', + b'public', + b'pumped', + b'purple', + b'purring', + b'puzzled', + b'puzzling', + b'quaint', + b'quick', + b'quicker', + b'quickest', + b'quiet', + b'quirky', + b'quixotic', + b'quizzical', + b'rainy', + b'rapid', + b'rare', + b'rational', + b'ready', + b'real', + b'rebel', + b'receptive', + b'red', + b'reflective', + b'regular', + b'relaxed', + b'reliable', + b'relieved', + b'remarkable', + b'reminiscent', + b'reserved', + b'resolute', + b'resonant', + b'resourceful', + b'responsible', + b'rich', + b'ridiculous', + b'right', + b'rightful', + b'ripe', + b'ritzy', + b'roasted', + b'robust', + b'romantic', + b'roomy', + b'round', + b'royal', + b'ruddy', + b'rural', + b'rustic', + b'sable', + b'safe', + b'salty', + b'same', + b'satisfying', + b'savory', + b'scientific', + b'scintillating', + b'scrumptious', + b'second', + b'secret', + b'secretive', + b'seemly', + b'selective', + b'sensible', + b'separate', + b'shaggy', + b'shaky', + b'shining', + b'shiny', + b'short', + b'shy', + b'silent', + b'silky', + b'silly', + b'simple', + b'simplistic', + b'sincere', + b'six', + b'sizzling', + b'skillful', + b'sleepy', + b'slick', + b'slim', + b'smart', + b'smiling', + b'smooth', + b'soaring', + b'sociable', + b'soft', + b'solid', + b'sophisticated', + b'sparkling', + b'special', + b'spectacular', + b'speedy', + b'spicy', + b'spiffy', + b'spiritual', + b'splendid', + b'spooky', + b'spotless', + b'spotted', + b'square', + b'standing', + b'statuesque', + b'steadfast', + b'steady', + b'steep', + b'stimulating', + b'straight', + b'straightforward', + b'striking', + b'striped', + b'strong', + b'stunning', + b'stupendous', + b'sturdy', + b'subsequent', + b'substantial', + b'subtle', + b'successful', + b'succinct', + b'sudden', + b'super', + b'superb', + b'supreme', + b'swanky', + b'sweet', + b'swift', + b'sympathetic', + b'synonymous', + b'talented', + b'tall', + b'tame', + b'tan', + b'tangible', + b'tangy', + b'tasteful', + b'tasty', + b'telling', + b'temporary', + b'tempting', + b'ten', + b'tender', + b'terrific', + b'tested', + b'thankful', + b'therapeutic', + b'thin', + b'thinkable', + b'third', + b'thoughtful', + b'three', + b'thrifty', + b'tidy', + b'tiny', + b'toothsome', + b'towering', + b'tranquil', + b'tremendous', + b'tricky', + b'true', + b'truthful', + b'two', + b'typical', + b'ubiquitous', + b'ultra', + b'unassuming', + b'unbiased', + b'uncovered', + b'understanding', + b'understood', + b'unequaled', + b'unique', + b'unusual', + b'unwritten', + b'upbeat', + b'useful', + b'utopian', + b'utter', + b'uttermost', + b'valuable', + b'various', + b'vast', + b'verdant', + b'vermilion', + b'versatile', + b'versed', + b'victorious', + b'vigorous', + b'violet', + b'vivacious', + b'voiceless', + b'voluptuous', + b'wacky', + b'waiting', + b'wakeful', + b'wandering', + b'warm', + b'warmhearted', + b'wealthy', + b'whimsical', + b'whispering', + b'white', + b'whole', + b'wholesale', + b'whopping', + b'wide', + b'wiggly', + b'wild', + b'willing', + b'windy', + b'winsome', + b'wiry', + b'wise', + b'wistful', + b'witty', + b'womanly', + b'wonderful', + b'workable', + b'young', + b'youthful', + b'yummy', + b'zany', + b'zealous', + b'zesty', + b'zippy' ] def randomtopicname(ui): # Re-implement random.choice() in the way it was written in Python 2. def choice(things): return things[int(len(things) * random.random())] - if ui.configint("devel", "randomseed"): - random.seed(ui.configint("devel", "randomseed")) - return choice(adjectives) + "-" + choice(animals) + if ui.configint(b"devel", b"randomseed"): + random.seed(ui.configint(b"devel", b"randomseed")) + return choice(adjectives) + b"-" + choice(animals)
--- a/hgext3rd/topic/revset.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/revset.py Tue Sep 24 12:42:27 2019 +0200 @@ -24,7 +24,7 @@ revsetpredicate = registrar.revsetpredicate() def getstringstrict(x, err): - if x and x[0] == 'string': + if x and x[0] == b'string': return x[1] raise error.ParseError(err) @@ -36,7 +36,7 @@ If `string` starts with `re:` the remainder of the name is treated as a regular expression. """ - args = revset.getargs(x, 0, 1, 'topic takes one or no arguments') + args = revset.getargs(x, 0, 1, b'topic takes one or no arguments') mutable = revset._notpublic(repo, revset.fullreposet(repo), ()) @@ -44,15 +44,15 @@ return (subset & mutable).filter(lambda r: bool(repo[r].topic())) try: - topic = getstringstrict(args[0], '') + topic = getstringstrict(args[0], b'') except error.ParseError: # not a string, but another revset pass else: kind, pattern, matcher = mkmatcher(topic) - if topic.startswith('literal:') and pattern not in repo.topics: - raise error.RepoLookupError("topic '%s' does not exist" % pattern) + if topic.startswith(b'literal:') and pattern not in repo.topics: + raise error.RepoLookupError(b"topic '%s' does not exist" % pattern) def matches(r): topic = repo[r].topic() @@ -64,7 +64,7 @@ s = revset.getset(repo, revset.fullreposet(repo), x) topics = {repo[r].topic() for r in s} - topics.discard('') + topics.discard(b'') def matches(r): if r in s: @@ -82,11 +82,11 @@ Name is horrible so that people change it. """ - args = revset.getargs(x, 1, 1, 'ngtip takes one argument') + args = revset.getargs(x, 1, 1, b'ngtip takes one argument') # match a specific topic - branch = revset.getstring(args[0], 'ngtip requires a string') - if branch == '.': - branch = repo['.'].branch() + branch = revset.getstring(args[0], b'ngtip requires a string') + if branch == b'.': + branch = repo[b'.'].branch() return subset & revset.baseset(destination.ngtip(repo, branch)) @revsetpredicate(b'stack()') @@ -97,7 +97,7 @@ unstable changeset after there future parent (as if evolve where already run). """ - err = 'stack takes no arguments, it works on current topic' + err = b'stack takes no arguments, it works on current topic' revset.getargs(x, 0, 0, err) topic = None branch = None @@ -120,8 +120,8 @@ if isinstance(z, tuple): a, b = revset.getintrange( z, - 'relation subscript must be an integer or a range', - 'relation subscript bounds must be integers', + b'relation subscript must be an integer or a range', + b'relation subscript bounds must be integers', None, None) else: a = b = z @@ -159,12 +159,12 @@ return subset & revset.baseset(revs) - revset.subscriptrelations['stack'] = stackrel - revset.subscriptrelations['s'] = stackrel + revset.subscriptrelations[b'stack'] = stackrel + revset.subscriptrelations[b's'] = stackrel def topicrel(repo, subset, x, *args): subset &= topicset(repo, subset, x) return revset.generationsrel(repo, subset, x, *args) - revset.subscriptrelations['topic'] = topicrel - revset.subscriptrelations['t'] = topicrel + revset.subscriptrelations[b'topic'] = topicrel + revset.subscriptrelations[b't'] = topicrel
--- a/hgext3rd/topic/stack.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/stack.py Tue Sep 24 12:42:27 2019 +0200 @@ -26,7 +26,7 @@ username = None if user: # user is of form "abc <abc@xyz.com>" - username = user.split('<')[0] + username = user.split(b'<')[0] if not username: # assuming user is of form "<abc@xyz.com>" if len(user) > 1: @@ -45,10 +45,10 @@ """ phasesets = repo._phasecache._phasesets if not phasesets or None in phasesets[phases.draft:]: - return repo.revs('(not public()) - obsolete()') + return repo.revs(b'(not public()) - obsolete()') result = set.union(*phasesets[phases.draft:]) - result -= obsolete.getrevs(repo, 'obsolete') + result -= obsolete.getrevs(repo, b'obsolete') return result class stack(object): @@ -63,13 +63,13 @@ subset = _stackcandidates(repo) if topic is not None and branch is not None: - raise error.ProgrammingError('both branch and topic specified (not defined yet)') + raise error.ProgrammingError(b'both branch and topic specified (not defined yet)') elif topic is not None: - trevs = repo.revs("%ld and topic(%s)", subset, topic) + trevs = repo.revs(b"%ld and topic(%s)", subset, topic) elif branch is not None: - trevs = repo.revs("%ld and branch(%s) - topic()", subset, branch) + trevs = repo.revs(b"%ld and branch(%s) - topic()", subset, branch) else: - raise error.ProgrammingError('neither branch and topic specified (not defined yet)') + raise error.ProgrammingError(b'neither branch and topic specified (not defined yet)') self._revs = trevs def __iter__(self): @@ -178,7 +178,7 @@ if revs: pt1 = self._repo[revs[0]].p1() else: - pt1 = self._repo['.'] + pt1 = self._repo[b'.'] if pt1.obsolete(): pt1 = self._repo[_singlesuccessor(self._repo, pt1)] @@ -206,15 +206,15 @@ if revs: minroot = [min(r for r in revs if not deps[r])] try: - dest = destutil.destmerge(self._repo, action='rebase', + dest = destutil.destmerge(self._repo, action=b'rebase', sourceset=minroot, onheadcheck=False) - return len(self._repo.revs("only(%d, %ld)", dest, minroot)) + return len(self._repo.revs(b"only(%d, %ld)", dest, minroot)) except error.NoMergeDestAbort: return 0 except error.ManyMergeDestAbort as exc: # XXX we should make it easier for upstream to provide the information - self.behinderror = pycompat.bytestr(exc).split('-', 1)[0].rstrip() + self.behinderror = pycompat.bytestr(exc).split(b'-', 1)[0].rstrip() return -1 return 0 @@ -226,68 +226,68 @@ return branches def labelsgen(prefix, parts): - fmt = prefix + '.%s' - return prefix + ' ' + ' '.join(fmt % p.replace(' ', '-') for p in parts) + fmt = prefix + b'.%s' + return prefix + b' ' + b' '.join(fmt % p.replace(b' ', b'-') for p in parts) def showstack(ui, repo, branch=None, topic=None, opts=None): if opts is None: opts = {} if topic is not None and branch is not None: - msg = 'both branch and topic specified [%s]{%s}(not defined yet)' + msg = b'both branch and topic specified [%s]{%s}(not defined yet)' msg %= (branch, topic) raise error.ProgrammingError(msg) elif topic is not None: - prefix = 's' + prefix = b's' if topic not in repo.topics: - raise error.Abort(_('cannot resolve "%s": no such topic found') % topic) + raise error.Abort(_(b'cannot resolve "%s": no such topic found') % topic) elif branch is not None: - prefix = 's' + prefix = b's' else: - raise error.ProgrammingError('neither branch and topic specified (not defined yet)') + raise error.ProgrammingError(b'neither branch and topic specified (not defined yet)') - fm = ui.formatter('topicstack', opts) + fm = ui.formatter(b'topicstack', opts) prev = None entries = [] idxmap = {} - label = 'topic' + label = b'topic' if topic == repo.currenttopic: - label = 'topic.active' + label = b'topic.active' st = stack(repo, branch, topic) if topic is not None: - fm.plain(_('### topic: %s') + fm.plain(_(b'### topic: %s') % ui.label(topic, label), - label='stack.summary.topic') + label=b'stack.summary.topic') if 1 < len(st.heads): - fm.plain(' (') - fm.plain('%d heads' % len(st.heads), - label='stack.summary.headcount.multiple') - fm.plain(')') - fm.plain('\n') - fm.plain(_('### target: %s (branch)') - % '+'.join(st.branches), # XXX handle multi branches - label='stack.summary.branches') + fm.plain(b' (') + fm.plain(b'%d heads' % len(st.heads), + label=b'stack.summary.headcount.multiple') + fm.plain(b')') + fm.plain(b'\n') + fm.plain(_(b'### target: %s (branch)') + % b'+'.join(st.branches), # XXX handle multi branches + label=b'stack.summary.branches') if topic is None: if 1 < len(st.heads): - fm.plain(' (') - fm.plain('%d heads' % len(st.heads), - label='stack.summary.headcount.multiple') - fm.plain(')') + fm.plain(b' (') + fm.plain(b'%d heads' % len(st.heads), + label=b'stack.summary.headcount.multiple') + fm.plain(b')') else: if st.behindcount == -1: - fm.plain(', ') - fm.plain('ambiguous rebase destination - %s' % st.behinderror, - label='stack.summary.behinderror') + fm.plain(b', ') + fm.plain(b'ambiguous rebase destination - %s' % st.behinderror, + label=b'stack.summary.behinderror') elif st.behindcount: - fm.plain(', ') - fm.plain('%d behind' % st.behindcount, label='stack.summary.behindcount') - fm.plain('\n') + fm.plain(b', ') + fm.plain(b'%d behind' % st.behindcount, label=b'stack.summary.behindcount') + fm.plain(b'\n') if not st: - fm.plain(_("(stack is empty)\n")) + fm.plain(_(b"(stack is empty)\n")) st = stack(repo, branch=branch, topic=topic) for idx, r in enumerate(st, 0): @@ -326,40 +326,40 @@ symbol = None states = [] - if opts.get('children'): - expr = 'children(%d) and merge() - %ld' + if opts.get(b'children'): + expr = b'children(%d) and merge() - %ld' revisions = repo.revs(expr, ctx.rev(), st._revs) if len(revisions) > 0: - states.append('external-children') + states.append(b'external-children') if ctx.orphan(): - symbol = '$' - states.append('orphan') + symbol = b'$' + states.append(b'orphan') if ctx.contentdivergent(): - symbol = '$' - states.append('content divergent') + symbol = b'$' + states.append(b'content divergent') if ctx.phasedivergent(): - symbol = '$' - states.append('phase divergent') + symbol = b'$' + states.append(b'phase divergent') - iscurrentrevision = repo.revs('%d and parents()', ctx.rev()) + iscurrentrevision = repo.revs(b'%d and parents()', ctx.rev()) if iscurrentrevision: - symbol = '@' - states.append('current') + symbol = b'@' + states.append(b'current') if not isentry: - symbol = '^' + symbol = b'^' # "base" is kind of a "ghost" entry - states.append('base') + states.append(b'base') # none of the above if statments get executed if not symbol: - symbol = ':' + symbol = b':' if not states: - states.append('clean') + states.append(b'clean') states.sort() @@ -377,22 +377,22 @@ spacewidth = 2 + 40 # s# alias width spacewidth += 2 - fm.plain(' ' * spacewidth) + fm.plain(b' ' * spacewidth) else: - fm.write('stack_index', '%s%%d' % prefix, idx, - label=labelsgen('stack.index', states)) + fm.write(b'stack_index', b'%s%%d' % prefix, idx, + label=labelsgen(b'stack.index', states)) if ui.verbose: - fm.write('node', '(%s)', fm.hexfunc(ctx.node()), - label=labelsgen('stack.shortnode', states)) + fm.write(b'node', b'(%s)', fm.hexfunc(ctx.node()), + label=labelsgen(b'stack.shortnode', states)) else: fm.data(node=fm.hexfunc(ctx.node())) - fm.write('symbol', '%s', symbol, - label=labelsgen('stack.state', states)) - fm.plain(' ') - fm.write('desc', '%s', ctx.description().splitlines()[0], - label=labelsgen('stack.desc', states)) - fm.condwrite(states != ['clean'] and idx is not None, 'state', - ' (%s)', fm.formatlist(states, 'stack.state'), - label=labelsgen('stack.state', states)) - fm.plain('\n') + fm.write(b'symbol', b'%s', symbol, + label=labelsgen(b'stack.state', states)) + fm.plain(b' ') + fm.write(b'desc', b'%s', ctx.description().splitlines()[0], + label=labelsgen(b'stack.desc', states)) + fm.condwrite(states != [b'clean'] and idx is not None, b'state', + b' (%s)', fm.formatlist(states, b'stack.state'), + label=labelsgen(b'stack.state', states)) + fm.plain(b'\n') fm.end()
--- a/hgext3rd/topic/topicmap.py Fri Jul 19 16:26:48 2019 +0200 +++ b/hgext3rd/topic/topicmap.py Tue Sep 24 12:42:27 2019 +0200 @@ -16,22 +16,22 @@ common, ) -basefilter = set(['base', 'immutable']) +basefilter = set([b'base', b'immutable']) def topicfilter(name): """return a "topic" version of a filter level""" if name in basefilter: return name elif name is None: return None - elif name.endswith('-topic'): + elif name.endswith(b'-topic'): return name else: - return name + '-topic' + return name + b'-topic' def istopicfilter(filtername): if filtername is None: return False - return filtername.endswith('-topic') + return filtername.endswith(b'-topic') def gettopicrepo(repo): if not common.hastopicext(repo): @@ -61,8 +61,8 @@ if newfilter not in funcmap: funcmap[newfilter] = revsfunc partialmap[newfilter] = base - funcmap['unfiltered-topic'] = lambda repo: frozenset() - partialmap['unfiltered-topic'] = 'visible-topic' + funcmap[b'unfiltered-topic'] = lambda repo: frozenset() + partialmap[b'unfiltered-topic'] = b'visible-topic' def _phaseshash(repo, maxrev): """uniq ID for a phase matching a set of rev""" @@ -80,7 +80,7 @@ if revs: s = hashlib.sha1() for rev in revs: - s.update('%d;' % rev) + s.update(b'%d;' % rev) key = s.digest() return key @@ -100,7 +100,7 @@ # wrap commit status use the topic branch heads ctx = repo[node] if ctx.topic() and ctx.branch() == branch: - bheads = repo.branchheads("%s:%s" % (branch, ctx.topic())) + bheads = repo.branchheads(b"%s:%s" % (branch, ctx.topic())) ret = orig(repo, node, branch, bheads=bheads, opts=opts) @@ -111,10 +111,10 @@ return ret parents = ctx.parents() - if (not opts.get('amend') and bheads and node not in bheads and not + if (not opts.get(b'amend') and bheads and node not in bheads and not [x for x in parents if x.node() in bheads and x.branch() == branch]): - repo.ui.status(_("(consider using topic for lightweight branches." - " See 'hg help topic')\n")) + repo.ui.status(_(b"(consider using topic for lightweight branches." + b" See 'hg help topic')\n")) return ret @@ -179,17 +179,17 @@ new.phaseshash = self.phaseshash return new - def branchtip(self, branch, topic=''): + def branchtip(self, branch, topic=b''): '''Return the tipmost open head on branch head, otherwise return the tipmost closed head on branch. Raise KeyError for unknown branch.''' if topic: - branch = '%s:%s' % (branch, topic) + branch = b'%s:%s' % (branch, topic) return super(_topiccache, self).branchtip(branch) - def branchheads(self, branch, closed=False, topic=''): + def branchheads(self, branch, closed=False, topic=b''): if topic: - branch = '%s:%s' % (branch, topic) + branch = b'%s:%s' % (branch, topic) return super(_topiccache, self).branchheads(branch, closed=closed) def validfor(self, repo): @@ -229,13 +229,13 @@ def branchinfo(r, changelog=None): info = oldgetbranchinfo(r) - topic = '' + topic = b'' ctx = unfi[r] if ctx.mutable(): topic = ctx.topic() branch = info[0] if topic: - branch = '%s:%s' % (branch, topic) + branch = b'%s:%s' % (branch, topic) return (branch, info[1]) try: unfi.revbranchcache().branchinfo = branchinfo
--- a/setup.py Fri Jul 19 16:26:48 2019 +0200 +++ b/setup.py Tue Sep 24 12:42:27 2019 +0200 @@ -7,7 +7,8 @@ def get_metadata(): meta = {} fullpath = join(dirname(__file__), META_PATH) - execfile(fullpath, meta) + with open(fullpath, 'r') as fp: + exec(fp.read(), meta) return meta def get_version(): @@ -28,6 +29,8 @@ 'hgext3rd.topic', ] +py_versions = '>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4' + if os.environ.get('INCLUDE_INHIBIT'): py_modules.append('hgext3rd.evolve.hack.inhibit') py_modules.append('hgext3rd.evolve.hack.directaccess') @@ -45,5 +48,6 @@ keywords='hg mercurial', license='GPLv2+', py_modules=py_modules, - packages=py_packages + packages=py_packages, + python_requires=py_versions )
--- a/tests/test-discovery-obshashrange-cache.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-discovery-obshashrange-cache.t Tue Sep 24 12:42:27 2019 +0200 @@ -35,13 +35,21 @@ > hg -R main debugobsolete $anc $node > done marking 000011223334456677789aaaaabbbbcccddddeef as predecessors of 4de32a90b66cd083ebf3c00b41277aa7abca51dd + 1 new obsolescence markers marking 012234455555666699aaaaabbbccccccefffffff as predecessors of f69452c5b1af6cbaaa56ef50cf94fff5bcc6ca23 + 1 new obsolescence markers marking 00001122233445555777778889999abbcccddeef as predecessors of c8d03c1b5e94af74b772900c58259d2e08917735 + 1 new obsolescence markers marking 0011222445667777889999aabbbbcddddeeeeeee as predecessors of bebd167eb94d257ace0e814aeb98e6972ed2970d + 1 new obsolescence markers marking 000011222223344555566778899aaaabccddefff as predecessors of 2dc09a01254db841290af0538aa52f6f52c776e3 + 1 new obsolescence markers marking 01111222223333444455555566999abbbbcceeef as predecessors of 01241442b3c2bf3211e593b549c655ea65b295e3 + 1 new obsolescence markers marking 01122444445555566677888aabbcccddddefffff as predecessors of 66f7d451a68b85ed82ff5fcc254daf50c74144bd + 1 new obsolescence markers marking 000111111234444467777889999aaaabcdeeeeff as predecessors of 1ea73414a91b0920940797d8fc6a11e447f8ea1e + 1 new obsolescence markers $ hg debugobsolete -R main 000011223334456677789aaaaabbbbcccddddeef 4de32a90b66cd083ebf3c00b41277aa7abca51dd 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
--- a/tests/test-discovery-obshashrange.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-discovery-obshashrange.t Tue Sep 24 12:42:27 2019 +0200 @@ -66,10 +66,15 @@ $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(r1)'` + 1 new obsolescence markers $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(r2)'` --config experimental.obshashrange.max-revs=1 + 1 new obsolescence markers $ hg debugobsolete cccccccccccccccccccccccccccccccccccccccc `getid 'desc(r4)'` + 1 new obsolescence markers $ hg debugobsolete dddddddddddddddddddddddddddddddddddddddd `getid 'desc(r5)'` --config experimental.obshashrange.warm-cache=0 + 1 new obsolescence markers $ hg debugobsolete eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee `getid 'desc(r7)'` + 1 new obsolescence markers $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 66f7d451a68b85ed82ff5fcc254daf50c74144bd 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 01241442b3c2bf3211e593b549c655ea65b295e3 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} @@ -183,6 +188,7 @@ $ hg add foo $ hg commit -m foo $ hg debugobsolete ffffffffffffffffffffffffffffffffffffffff `getid '.'` + 1 new obsolescence markers $ hg push -f --debug could not import hgext.hgext3rd.evolve (No module named hgext3rd.evolve): trying hgext3rd.hgext3rd.evolve (?) could not import hgext3rd.hgext3rd.evolve (No module named hgext3rd.evolve): trying hgext3rd.evolve (?) @@ -231,6 +237,7 @@ remote: adding manifests remote: adding file changes remote: added 1 changesets with 1 changes to 1 files (+1 heads) + remote: 1 new obsolescence markers bundle2-input-bundle: no-transaction bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported bundle2-input-part: "reply:obsmarkers" (params: 0 advisory) supported @@ -238,7 +245,6 @@ preparing listkeys for "phases" sending listkeys command received listkey for "phases": 58 bytes - remote: 1 new obsolescence markers $ hg -R ../server blackbox * @0000000000000000000000000000000000000000 (*)> -R server serve --stdio (glob) 1970/01/01 00:00:00 * @0000000000000000000000000000000000000000 (*)> updated evo-ext-firstmerge in *.???? seconds (1r) (glob) @@ -272,7 +278,9 @@ o 0 1ea73414a91b r0 $ hg debugobsolete 111111111111111aaaaaaaaa1111111111111111 `getid 'desc(r1)'` + 1 new obsolescence markers $ hg debugobsolete 22222222222222222bbbbbbbbbbbbb2222222222 `getid 'desc(r3)'` + 1 new obsolescence markers $ hg push pushing to ssh://user@dummy/server searching for changes @@ -413,7 +421,9 @@ o 0 1ea73414a91b r0 $ hg -R ../server debugobsolete aaaaaaa11111111aaaaaaaaa1111111111111111 `getid 'desc(r1)'` + 1 new obsolescence markers $ hg -R ../server debugobsolete bbbbbbb2222222222bbbbbbbbbbbbb2222222222 `getid 'desc(r4)'` + 1 new obsolescence markers $ hg pull -r 6 pulling from ssh://user@dummy/server searching for changes @@ -507,7 +517,9 @@ 6 c8d03c1b5e94 5 1 6 446c2dc3bce5 7 f69452c5b1af 6 1 7 000000000000 $ hg -R ../server debugobsolete aaaa333333333aaaaa333a3a3a3a3a3a3a3a3a3a `getid 'desc(r1)'` + 1 new obsolescence markers $ hg -R ../server debugobsolete bb4b4b4b4b4b4b4b44b4b4b4b4b4b4b4b4b4b4b4 `getid 'desc(r3)'` + 1 new obsolescence markers $ hg pull -r `getid 'desc(r6)'` pulling from ssh://user@dummy/server no changes found @@ -588,6 +600,7 @@ 5 c8d03c1b5e94 5 1 6 446c2dc3bce5 6 f69452c5b1af 6 1 7 000000000000 $ hg -R ../server debugobsolete --record-parents `getid 'desc(foo)'` + 1 new obsolescence markers $ hg debugobshashrange -R ../server --subranges --rev 'heads(all())' rev node index size depth obshash 7 4de32a90b66c 0 8 8 c7f1f7e9925b @@ -672,6 +685,7 @@ 5 c8d03c1b5e94 5 1 6 446c2dc3bce5 6 f69452c5b1af 6 1 7 000000000000 $ hg -R ../server debugobsolete --record-parents `(cd ../server/; getid 'desc("chain_prune")')` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobshashrange -R ../server --subranges --rev 'heads(all())' rev node index size depth obshash @@ -756,6 +770,7 @@ New marker prefixed to another one $ hg -R ../server debugobsolete aaaa4444444444444444aaaaaaaaaaaaaaaaaaaa `(cd ../server/; getid 'desc("chain_prune")')` + 1 new obsolescence markers $ hg debugobshashrange -R ../server --subranges --rev 'heads(all())' rev node index size depth obshash 7 4de32a90b66c 0 8 8 0ffc0013bda0 @@ -818,6 +833,7 @@ New prune marker prefixed to another one $ hg -R ../server debugobsolete aaaa4444444444444444aaaaaaaaaaaaaaaaaaaa + 1 new obsolescence markers $ hg debugobshashrange -R ../server --subranges --rev 'heads(all())' rev node index size depth obshash 7 4de32a90b66c 0 8 8 87b2a11bd884 @@ -1142,7 +1158,6 @@ [1] $ hg debugupdatecache --debug updating the branch cache - invalid branch cache (served): tip differs $ f -s .hg/cache/evoext* .hg/cache/evoext-depthcache-00: size=96 .hg/cache/evoext-firstmerge-00: size=96
--- a/tests/test-evolve-abort-orphan.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-evolve-abort-orphan.t Tue Sep 24 12:42:27 2019 +0200 @@ -10,6 +10,7 @@ Setup ===== +#testcases abortcommand abortflag $ cat >> $HGRCPATH <<EOF > [phases] > publish = False @@ -19,6 +20,13 @@ > EOF $ echo "evolve=$(echo $(dirname $TESTDIR))/hgext3rd/evolve/" >> $HGRCPATH +#if abortflag + $ cat >> $HGRCPATH <<EOF + > [alias] + > abort = evolve --abort + > EOF +#endif + $ hg init abortrepo $ cd abortrepo $ echo ".*\.orig" > .hgignore @@ -41,9 +49,15 @@ Testing --abort when no evolve is interrupted ============================================= +#if abortflag $ hg evolve --abort abort: no interrupted evolve to abort [255] +#else + $ hg abort + abort: no operation in progress + [255] +#endif Testing with wrong combination of flags ======================================= @@ -103,8 +117,13 @@ instability: orphan summary: added d +#if abortcommand +when in dry-run mode + $ hg abort --dry-run + evolve in progress, will be aborted +#endif - $ hg evolve --abort + $ hg abort evolve aborted working directory is now at e93a9161a274 @@ -199,7 +218,7 @@ o 0:8fa14d15e168 added hgignore () draft - $ hg evolve --abort + $ hg abort 1 new orphan changesets evolve aborted working directory is now at 125af0ed8cae @@ -299,7 +318,7 @@ o 0:8fa14d15e168 added hgignore () draft - $ hg evolve --abort + $ hg abort 2 new orphan changesets evolve aborted working directory is now at 807e8e2ca559 @@ -395,7 +414,7 @@ adding file changes added 1 changesets with 1 changes to 1 files $ cd ../repotwo - $ hg evolve --abort + $ hg abort warning: new changesets detected on destination branch abort: unable to abort interrupted evolve, use 'hg evolve --stop' to stop evolve [255] @@ -442,7 +461,7 @@ $ hg phase -r 1c476940790a --public - $ hg evolve --abort + $ hg abort cannot clean up public changesets: 1c476940790a abort: unable to abort interrupted evolve, use 'hg evolve --stop' to stop evolve [255] @@ -510,7 +529,7 @@ o 0:8fa14d15e168 added hgignore () draft - $ hg evolve --abort + $ hg abort 1 new orphan changesets evolve aborted working directory is now at a0086c17bfc7 @@ -543,7 +562,7 @@ (see 'hg help evolve.interrupted') [1] - $ hg evolve --abort + $ hg abort evolve aborted working directory is now at c1f4718020e3
--- a/tests/test-evolve-abort-phasediv.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-evolve-abort-phasediv.t Tue Sep 24 12:42:27 2019 +0200 @@ -10,6 +10,7 @@ Setup ===== +#testcases abortcommand abortflag $ cat >> $HGRCPATH <<EOF > [phases] > publish = False @@ -19,6 +20,13 @@ > EOF $ echo "evolve=$(echo $(dirname $TESTDIR))/hgext3rd/evolve/" >> $HGRCPATH +#if abortflag + $ cat >> $HGRCPATH <<EOF + > [alias] + > abort = evolve --abort + > EOF +#endif + $ hg init abortrepo $ cd abortrepo $ echo ".*\.orig" > .hgignore @@ -124,7 +132,7 @@ summary: added d - $ hg evolve --abort + $ hg abort evolve aborted working directory is now at ddba58020bc0 @@ -218,7 +226,7 @@ (see 'hg help evolve.interrupted') [1] - $ hg evolve --abort + $ hg abort 1 new phase-divergent changesets evolve aborted working directory is now at 28cd06b3f801 @@ -304,7 +312,7 @@ (see 'hg help evolve.interrupted') [1] - $ hg evolve --abort + $ hg abort 1 new phase-divergent changesets evolve aborted working directory is now at ef9b72b9b42c
--- a/tests/test-evolve-templates.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-evolve-templates.t Tue Sep 24 12:42:27 2019 +0200 @@ -1105,11 +1105,14 @@ Create the cycle $ hg debugobsolete `getid "desc(A0)"` `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)"` `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(B0)"` `getid "desc(A0)"` + 1 new obsolescence markers Check templates --------------- @@ -1288,6 +1291,7 @@ summary: ROOT $ hg debugobsolete `getid "4"` `getid "5"` `getid "6"` `getid "7"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G @ changeset: 7:ba2ed02b0c9a
--- a/tests/test-exchange-obsmarkers-case-A1.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-A1.t Tue Sep 24 12:42:27 2019 +0200 @@ -53,6 +53,7 @@ $ cd main $ mkcommit A $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ hg log -G @ f5bc6836db60 (draft): A | @@ -211,6 +212,7 @@ o a9bdc8b26820 (public): O $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ inspect_obsmarkers obsstore content ================
--- a/tests/test-exchange-obsmarkers-case-A2.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-A2.t Tue Sep 24 12:42:27 2019 +0200 @@ -56,11 +56,13 @@ $ cd main $ mkcommit A $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ hg up '.~1' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit B created new head $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B)'` + 1 new obsolescence markers $ hg log -G @ 35b183996678 (draft): B |
--- a/tests/test-exchange-obsmarkers-case-A3.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-A3.t Tue Sep 24 12:42:27 2019 +0200 @@ -73,9 +73,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6298a8ac3a4 (draft): B1 @@ -178,9 +180,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6298a8ac3a4 (draft): B1
--- a/tests/test-exchange-obsmarkers-case-A4.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-A4.t Tue Sep 24 12:42:27 2019 +0200 @@ -63,7 +63,9 @@ $ mkcommit A1 created new head $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-A5.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-A5.t Tue Sep 24 12:42:27 2019 +0200 @@ -65,9 +65,12 @@ created new head $ mkcommit A1 $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A0)'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 8c0a98c83722 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-A6.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-A6.t Tue Sep 24 12:42:27 2019 +0200 @@ -64,6 +64,7 @@ create a marker after this $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ e5ea8f9c7314 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-A7.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-A7.t Tue Sep 24 12:42:27 2019 +0200 @@ -51,6 +51,7 @@ $ hg push -q ../pushdest $ hg push -q ../pulldest $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A)'` + 1 new obsolescence markers $ hg log -G --hidden @ f5bc6836db60 (draft): A |
--- a/tests/test-exchange-obsmarkers-case-B5.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-B5.t Tue Sep 24 12:42:27 2019 +0200 @@ -70,10 +70,13 @@ created new head $ mkcommit B1 $ hg debugobsolete --hidden `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 2 new orphan changesets $ hg debugobsolete --hidden aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(B0)'` + 1 new obsolescence markers $ hg debugobsolete --hidden `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -qd '0 0' 'desc(B1)' $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-B6.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-B6.t Tue Sep 24 12:42:27 2019 +0200 @@ -57,6 +57,7 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid 'desc(B0)'` `getid 'desc(B1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -qd '0 0' . $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-C2.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-C2.t Tue Sep 24 12:42:27 2019 +0200 @@ -62,6 +62,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ e5ea8f9c7314 (draft): A1
--- a/tests/test-exchange-obsmarkers-case-C3.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-C3.t Tue Sep 24 12:42:27 2019 +0200 @@ -64,6 +64,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -qd '0 0' . $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-C4.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-C4.t Tue Sep 24 12:42:27 2019 +0200 @@ -65,8 +65,10 @@ $ mkcommit C created new head $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(B)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --hidden `getid 'desc(A)'` `getid 'desc(C)'` + 1 new obsolescence markers 2 new content-divergent changesets $ hg prune -qd '0 0' . $ hg log -G --hidden
--- a/tests/test-exchange-obsmarkers-case-D1.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-D1.t Tue Sep 24 12:42:27 2019 +0200 @@ -61,6 +61,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg prune -d '0 0' 'desc(B)'
--- a/tests/test-exchange-obsmarkers-case-D2.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-D2.t Tue Sep 24 12:42:27 2019 +0200 @@ -54,6 +54,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune --date '0 0' . 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-exchange-obsmarkers-case-D3.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-D3.t Tue Sep 24 12:42:27 2019 +0200 @@ -57,6 +57,7 @@ created new head $ mkcommit A1 $ hg debugobsolete `getid 'desc(A0)'` `getid 'desc(A1)'` + 1 new obsolescence markers obsoleted 1 changesets $ hg prune -d '0 0' . 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-exchange-obsmarkers-case-D4.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-exchange-obsmarkers-case-D4.t Tue Sep 24 12:42:27 2019 +0200 @@ -59,12 +59,16 @@ created new head $ mkcommit B1 $ hg debugobsolete `getid 'desc(A0)'` aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa `getid 'desc(A1)'` + 1 new obsolescence markers $ hg debugobsolete `getid 'desc(B0)'` bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb `getid 'desc(B1)'` + 1 new obsolescence markers $ hg log -G --hidden @ 069b05c3876d (draft): B1 |
--- a/tests/test-obsolete.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-obsolete.t Tue Sep 24 12:42:27 2019 +0200 @@ -33,6 +33,7 @@ $ getid 3 0d3f46688ccc6e756c7e96cf64c391c411309597 $ hg debugobsolete 4538525df7e2b9f09423636c61ef63a4cb872a2d 0d3f46688ccc6e756c7e96cf64c391c411309597 + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete 4538525df7e2b9f09423636c61ef63a4cb872a2d 0d3f46688ccc6e756c7e96cf64c391c411309597 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} @@ -97,6 +98,7 @@ $ mkcommit "obsol_c'" # 4 (on 1) created new head $ hg debugobsolete `getid 3` `getid 4` + 1 new obsolescence markers obsoleted 1 changesets $ qlog 4 @@ -215,6 +217,7 @@ 1 new orphan changesets created new head $ hg debugobsolete `getid 5` `getid 6` + 1 new obsolescence markers obsoleted 1 changesets $ qlog 6 @@ -274,6 +277,7 @@ 1 new orphan changesets created new head $ hg debugobsolete `getid 6` `getid 7` + 1 new obsolescence markers obsoleted 1 changesets $ hg pull -R ../other-new . pulling from . @@ -365,6 +369,7 @@ 1 new orphan changesets created new head $ hg debugobsolete `getid 7` `getid 8` + 1 new obsolescence markers obsoleted 1 changesets $ cd ../other-new $ hg up -q 3 @@ -428,6 +433,7 @@ $ hg id -n 9 $ hg debugobsolete `getid 0` `getid 9` + 1 new obsolescence markers 1 new phase-divergent changesets 83b5778897ad try to obsolete immutable changeset 1f0dee641bb7 # at core level the warning is not issued @@ -452,6 +458,7 @@ - 1f0dee641bb7 $ hg debugobsolete `getid 9` #kill + 1 new obsolescence markers obsoleted 1 changesets $ hg up null -q # to be not based on 9 anymore $ qlog @@ -562,6 +569,7 @@ $ mkcommit "obsol_d'''" created new head $ hg debugobsolete `getid 11` `getid 12` + 1 new obsolescence markers obsoleted 1 changesets $ hg push ../other-new --traceback pushing to ../other-new @@ -688,6 +696,7 @@ phases: 3 draft phase-divergent: 1 changesets $ hg debugobsolete `getid a7a6f2b5d8a5` `getid 50f11e5e3a63` + 1 new obsolescence markers 2 new content-divergent changesets $ hg log -r 'contentdivergent()' changeset: 12:6db5e282cb91
--- a/tests/test-pick.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-pick.t Tue Sep 24 12:42:27 2019 +0200 @@ -1,3 +1,4 @@ +#testcases abortcommand abortflag Test for the pick command $ cat >> $HGRCPATH <<EOF @@ -8,6 +9,13 @@ > EOF $ echo "evolve=$(echo $(dirname $TESTDIR))/hgext3rd/evolve/" >> $HGRCPATH +#if abortflag + $ cat >> $HGRCPATH <<EOF + > [alias] + > abort = pick --abort + > EOF +#endif + $ mkcommit() { > echo "$1" > "$1" > hg add "$1" @@ -79,6 +87,11 @@ $ hg pick --abort abort: no interrupted pick state exists [255] +#if abortcommand + $ hg abort + abort: no operation in progress + [255] +#endif Specifying both continue and revs @@ -235,7 +248,11 @@ unresolved merge conflicts (see hg help resolve) [1] - $ hg pick --abort +#if abortcommand + $ hg abort --dry-run + pick in progress, will be aborted +#endif + $ hg abort aborting pick, updating to c437988de89f $ hg glog
--- a/tests/test-prune.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-prune.t Tue Sep 24 12:42:27 2019 +0200 @@ -39,7 +39,7 @@ abort: can only specify one of pair, fold [255] $ hg prune --fold --biject - abort: nothing to prune + abort: no revisions specified to prune [255] $ hg prune --split --fold abort: can only specify one of fold, split @@ -307,7 +307,7 @@ (activating bookmark todelete) $ hg prune -B nostrip bookmark 'nostrip' deleted - abort: nothing to prune + abort: no revisions specified to prune [255] $ hg tag --remove --local a $ hg prune -B todelete
--- a/tests/test-pullbundle.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-pullbundle.t Tue Sep 24 12:42:27 2019 +0200 @@ -118,7 +118,6 @@ bundle2-input: payload chunk size: 0 adding manifests adding file changes - added 1024 changesets with 0 changes to 0 files bundle2-input-part: total payload size 186208 bundle2-input: part header size: 43 bundle2-input: part type: "CHANGEGROUP" @@ -131,7 +130,6 @@ bundle2-input: payload chunk size: 0 adding manifests adding file changes - added 128 changesets with 0 changes to 0 files bundle2-input-part: total payload size 23564 bundle2-input: part header size: 42 bundle2-input: part type: "CHANGEGROUP" @@ -144,7 +142,6 @@ bundle2-input: payload chunk size: 0 adding manifests adding file changes - added 64 changesets with 0 changes to 0 files bundle2-input-part: total payload size 11788 bundle2-input: part header size: 42 bundle2-input: part type: "CHANGEGROUP" @@ -157,7 +154,6 @@ bundle2-input: payload chunk size: 0 adding manifests adding file changes - added 16 changesets with 0 changes to 0 files bundle2-input-part: total payload size 2956 bundle2-input: part header size: 41 bundle2-input: part type: "CHANGEGROUP" @@ -170,7 +166,6 @@ bundle2-input: payload chunk size: 0 adding manifests adding file changes - added 2 changesets with 0 changes to 0 files bundle2-input-part: total payload size 380 bundle2-input: part header size: 41 bundle2-input: part type: "CHANGEGROUP" @@ -183,7 +178,6 @@ bundle2-input: payload chunk size: 0 adding manifests adding file changes - added 1 changesets with 0 changes to 0 files bundle2-input-part: total payload size 196 bundle2-input: part header size: 18 bundle2-input: part type: "PHASE-HEADS" @@ -208,6 +202,7 @@ bundle2-input-bundle: 7 parts total checking for updated bookmarks updating the branch cache + added 1235 changesets with 0 changes to 0 files new changesets 1ea73414a91b:f864bc82f6a2 (run 'hg update' to get a working copy) @@ -244,7 +239,7 @@ $ hg -R client pull server --verbose pulling from server searching for changes - all local heads known remotely + all local changesets known remotely pullbundle-cache: "missing" set sliced into 18 subranges in *.* seconds (glob) 1 changesets found 4 changesets found @@ -321,75 +316,58 @@ adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files (+1 heads) adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files + added 896 changesets with 0 changes to 0 files (+1 heads) new changesets 17185c1c22f1:0f376356904f (run 'hg heads' to see heads, 'hg merge' to merge) @@ -447,27 +425,22 @@ adding changesets adding manifests adding file changes - added 1024 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files + added 1235 changesets with 0 changes to 0 files new changesets 1ea73414a91b:f864bc82f6a2 (run 'hg update' to get a working copy) @@ -492,7 +465,7 @@ $ hg -R client2 pull server --verbose pulling from server searching for changes - all local heads known remotely + all local changesets known remotely pullbundle-cache: "missing" set sliced into 18 subranges in *.* seconds (glob) 1 changesets found in caches 4 changesets found in caches @@ -515,75 +488,58 @@ adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files (+1 heads) adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files + added 896 changesets with 0 changes to 0 files (+1 heads) new changesets 17185c1c22f1:0f376356904f (run 'hg heads' to see heads, 'hg merge' to merge) @@ -635,19 +591,15 @@ adding changesets adding manifests adding file changes - added 1024 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 227 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 29 changesets with 0 changes to 0 files (+1 heads) adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files (-1 heads) adding changesets uncompressed size of bundle content: 5892 (changelog) @@ -663,23 +615,19 @@ 4 (manifests) adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files + added 1523 changesets with 0 changes to 0 files new changesets 1ea73414a91b:44e80141ad53 (run 'hg update' to get a working copy) @@ -733,7 +681,7 @@ $ hg -R client3 pull server --verbose pulling from server searching for changes - all local heads known remotely + all local changesets known remotely pullbundle-cache: "missing" set sliced into 16 subranges in *.* seconds (glob) 1 changesets found 4 changesets found @@ -775,67 +723,52 @@ adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files (+1 heads) adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files + added 608 changesets with 0 changes to 0 files (+1 heads) new changesets d1807e351389:0f376356904f (run 'hg heads' to see heads, 'hg merge' to merge) @@ -912,67 +845,52 @@ adding changesets adding manifests adding file changes - added 1024 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files (+1 heads) adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files + added 2131 changesets with 0 changes to 0 files (+1 heads) new changesets 1ea73414a91b:0f376356904f (run 'hg heads' to see heads, 'hg merge' to merge) @@ -1040,29 +958,25 @@ adding changesets adding manifests adding file changes - added 512 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files (+1 heads) + added 901 changesets with 0 changes to 0 files (+1 heads) new changesets 1ea73414a91b:c31a4e0cc28d (run 'hg heads' to see heads, 'hg merge' to merge) $ hg -R test-local-missing pull server --verbose pulling from server searching for changes - all local heads known remotely + all local changesets known remotely pullbundle-cache: "missing" set sliced into 19 subranges in *.* seconds (glob) 4 changesets found 8 changesets found @@ -1101,79 +1015,61 @@ adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files + added 1230 changesets with 0 changes to 0 files new changesets e600b80a2fc8:0f376356904f (run 'hg update' to get a working copy) @@ -1195,67 +1091,52 @@ adding changesets adding manifests adding file changes - added 1024 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 128 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 32 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 2 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 1 changesets with 0 changes to 0 files (+1 heads) adding changesets adding manifests adding file changes - added 4 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 16 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 256 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 64 changesets with 0 changes to 0 files adding changesets adding manifests adding file changes - added 8 changesets with 0 changes to 0 files + added 2131 changesets with 0 changes to 0 files (+1 heads) new changesets 1ea73414a91b:0f376356904f updating to branch default 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-partial-C1.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-partial-C1.t Tue Sep 24 12:42:27 2019 +0200 @@ -58,6 +58,7 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-partial-C2.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-partial-C2.t Tue Sep 24 12:42:27 2019 +0200 @@ -60,6 +60,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg log -G --hidden
--- a/tests/test-push-checkheads-partial-C3.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-partial-C3.t Tue Sep 24 12:42:27 2019 +0200 @@ -60,6 +60,7 @@ $ mkcommit C0 created new head $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-partial-C4.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-partial-C4.t Tue Sep 24 12:42:27 2019 +0200 @@ -60,6 +60,7 @@ $ mkcommit C0 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg log -G --hidden
--- a/tests/test-push-checkheads-pruned-B1.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B1.t Tue Sep 24 12:42:27 2019 +0200 @@ -49,6 +49,7 @@ $ mkcommit B0 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 74ff5441d343 (draft): B0
--- a/tests/test-push-checkheads-pruned-B2.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B2.t Tue Sep 24 12:42:27 2019 +0200 @@ -60,9 +60,11 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6082bc4ffef (draft): A1
--- a/tests/test-push-checkheads-pruned-B3.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B3.t Tue Sep 24 12:42:27 2019 +0200 @@ -60,9 +60,11 @@ $ mkcommit B1 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-pruned-B4.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B4.t Tue Sep 24 12:42:27 2019 +0200 @@ -61,9 +61,11 @@ $ mkcommit C0 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-pruned-B5.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B5.t Tue Sep 24 12:42:27 2019 +0200 @@ -64,11 +64,14 @@ $ mkcommit B1 created new head $ hg debugobsolete --record-parents `getid "desc(A0)"` + 1 new obsolescence markers obsoleted 1 changesets 2 new orphan changesets $ hg debugobsolete `getid "desc(B0)"` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(C0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-pruned-B6.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B6.t Tue Sep 24 12:42:27 2019 +0200 @@ -52,8 +52,10 @@ $ hg up 'desc(B0)' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden x ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-pruned-B7.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B7.t Tue Sep 24 12:42:27 2019 +0200 @@ -51,8 +51,10 @@ $ hg up 'desc(B0)' 0 files updated, 0 files merged, 1 files removed, 0 files unresolved $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden x ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-pruned-B8.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-pruned-B8.t Tue Sep 24 12:42:27 2019 +0200 @@ -67,13 +67,17 @@ $ mkcommit A2 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ c1f8d089020f (draft): A2
--- a/tests/test-push-checkheads-superceed-A1.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A1.t Tue Sep 24 12:42:27 2019 +0200 @@ -46,6 +46,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ f6082bc4ffef (draft): A1
--- a/tests/test-push-checkheads-superceed-A2.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A2.t Tue Sep 24 12:42:27 2019 +0200 @@ -60,9 +60,11 @@ created new head $ mkcommit B1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 262c8c798096 (draft): B1
--- a/tests/test-push-checkheads-superceed-A3.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A3.t Tue Sep 24 12:42:27 2019 +0200 @@ -63,9 +63,11 @@ created new head $ mkcommit A1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ c1c7524e9488 (draft): A1
--- a/tests/test-push-checkheads-superceed-A4.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A4.t Tue Sep 24 12:42:27 2019 +0200 @@ -48,6 +48,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ mkcommit B0 $ hg log -G --hidden
--- a/tests/test-push-checkheads-superceed-A5.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A5.t Tue Sep 24 12:42:27 2019 +0200 @@ -49,6 +49,7 @@ created new head $ mkcommit A1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ ba93660aff8d (draft): A1
--- a/tests/test-push-checkheads-superceed-A6.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A6.t Tue Sep 24 12:42:27 2019 +0200 @@ -69,9 +69,11 @@ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit B1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ d70a1f75a020 (draft): B1
--- a/tests/test-push-checkheads-superceed-A7.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A7.t Tue Sep 24 12:42:27 2019 +0200 @@ -69,9 +69,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-superceed-A8.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-superceed-A8.t Tue Sep 24 12:42:27 2019 +0200 @@ -53,8 +53,10 @@ $ mkcommit A2 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(A1)" ` `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ c1f8d089020f (draft): A2
--- a/tests/test-push-checkheads-unpushed-D1.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-unpushed-D1.t Tue Sep 24 12:42:27 2019 +0200 @@ -49,6 +49,7 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 0 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D2.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-unpushed-D2.t Tue Sep 24 12:42:27 2019 +0200 @@ -64,9 +64,11 @@ $ mkcommit A1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete --record-parents `getid "desc(B0)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg up 0 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
--- a/tests/test-push-checkheads-unpushed-D3.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-unpushed-D3.t Tue Sep 24 12:42:27 2019 +0200 @@ -67,9 +67,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D4.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-unpushed-D4.t Tue Sep 24 12:42:27 2019 +0200 @@ -83,9 +83,11 @@ 1 files updated, 0 files merged, 1 files removed, 0 files unresolved $ mkcommit B1 $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ d70a1f75a020 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D5.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-unpushed-D5.t Tue Sep 24 12:42:27 2019 +0200 @@ -72,9 +72,11 @@ $ mkcommit B1 created new head $ hg debugobsolete `getid "desc(A0)" ` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets 1 new orphan changesets $ hg debugobsolete `getid "desc(B0)" ` `getid "desc(B1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 25c56d33e4c4 (draft): B1
--- a/tests/test-push-checkheads-unpushed-D6.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-unpushed-D6.t Tue Sep 24 12:42:27 2019 +0200 @@ -56,8 +56,10 @@ $ mkcommit C0 created new head $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-push-checkheads-unpushed-D7.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-push-checkheads-unpushed-D7.t Tue Sep 24 12:42:27 2019 +0200 @@ -65,10 +65,13 @@ $ mkcommit C0 created new head $ hg debugobsolete `getid "desc(A0)"` `getid "desc(A1)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete `getid "desc(A1)"` `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg debugobsolete --record-parents `getid "desc(A2)"` + 1 new obsolescence markers obsoleted 1 changesets $ hg log -G --hidden @ 0f88766e02d6 (draft): C0
--- a/tests/test-rewind.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-rewind.t Tue Sep 24 12:42:27 2019 +0200 @@ -924,25 +924,11 @@ $ cd .. -Check error cases -================= - - $ hg clone rewind-testing-base rewind-testing-error - updating to branch default - 3 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ cd rewind-testing-error +Merge commits +============= -Uncommited changes ------------------- - - $ echo C > C - $ hg add C - $ hg rewind - abort: uncommitted changes - [255] - -Merge commits -------------- + $ hg clone -q rewind-testing-base rewind-merge + $ cd rewind-merge $ hg up --clean .^ 0 files updated, 0 files merged, 1 files removed, 0 files unresolved @@ -965,7 +951,7 @@ $ hg rewind --from . rewinded to 1 changesets (1 changesets obsoleted) - working directory is now at 006fd8c2fed9 + working directory is now at 9d325190bd87 $ hg st --change . A B @@ -976,9 +962,91 @@ | |/ +---x 4: merge () | |/ - | o 3: foo (C foo) + | o 3: foo (foo) | | | ~ o 2: c_B0 (B) | ~ + + $ cd .. + +Rewind --keep +============= + + $ hg init rewind-keep + $ cd rewind-keep + $ echo root > root + $ hg ci -qAm 'root' + + $ echo apple > a + $ echo banana > b + $ hg ci -qAm initial + + $ hg rm b + $ echo apricot > a + $ echo coconut > c + $ hg add c + $ hg status + M a + A c + R b + $ hg amend -m amended + $ hg glf --hidden + @ 2: amended (a c) + | + | x 1: initial (a b) + |/ + o 0: root (root) + + +Clean wdir + + $ hg rewind --keep --to 'desc("initial")' --hidden + rewinded to 1 changesets + (1 changesets obsoleted) + $ hg obslog + @ b4c97fddc16a (3) initial + |\ + x | 2ea5be2f8751 (2) amended + |/ rewritten(description, meta, content) as b4c97fddc16a using rewind by test (Thu Jan 01 00:00:06 1970 +0000) + | + x 30704102d912 (1) initial + rewritten(description, content) as 2ea5be2f8751 using amend by test (Thu Jan 01 00:00:06 1970 +0000) + rewritten(meta) as b4c97fddc16a using rewind by test (Thu Jan 01 00:00:06 1970 +0000) + + $ hg glf --hidden + @ 3: initial (a b) + | + | x 2: amended (a c) + |/ + | x 1: initial (a b) + |/ + o 0: root (root) + + $ hg st + M a + A c + R b + +Making wdir even more dirty + + $ echo avocado > a + $ echo durian > d + $ hg st + M a + A c + R b + ? d + +No rewinding without --keep + + $ hg rewind --to 'desc("amended")' --hidden + abort: uncommitted changes + [255] + +XXX: Unfortunately, even with --keep it's not allowed + + $ hg rewind --keep --to 'desc("amended")' --hidden + abort: uncommitted changes + [255]
--- a/tests/test-topic-flow-reject-untopiced.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-topic-flow-reject-untopiced.t Tue Sep 24 12:42:27 2019 +0200 @@ -73,7 +73,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files transaction abort! rollback completed abort: rejecting draft changesets: 4e8b0e0237 @@ -85,7 +84,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files transaction abort! rollback completed abort: rejecting draft changesets: 4e8b0e0237 @@ -121,7 +119,6 @@ adding changesets adding manifests adding file changes - added 4 changesets with 4 changes to 4 files transaction abort! rollback completed abort: rejecting draft changesets: 4e8b0e0237 @@ -139,7 +136,6 @@ adding changesets adding manifests adding file changes - added 4 changesets with 4 changes to 4 files transaction abort! rollback completed abort: rejecting draft changesets: 4e8b0e0237
--- a/tests/test-topic-flow-single-head.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-topic-flow-single-head.t Tue Sep 24 12:42:27 2019 +0200 @@ -70,7 +70,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) transaction abort! rollback completed abort: 2 heads on "default" @@ -145,7 +144,6 @@ adding changesets adding manifests adding file changes - added 1 changesets with 1 changes to 1 files (+1 heads) transaction abort! rollback completed abort: 2 heads on "default:bar"
--- a/tests/test-wireproto.t Fri Jul 19 16:26:48 2019 +0200 +++ b/tests/test-wireproto.t Tue Sep 24 12:42:27 2019 +0200 @@ -72,8 +72,8 @@ remote: adding changesets remote: adding manifests remote: adding file changes + remote: obsmarker-exchange: 92 bytes received remote: added 1 changesets with 1 changes to 1 files (+1 heads) - remote: obsmarker-exchange: 92 bytes received remote: 1 new obsolescence markers remote: obsoleted 1 changesets $ hg push @@ -91,8 +91,8 @@ adding changesets adding manifests adding file changes + obsmarker-exchange: 92 bytes received added 1 changesets with 1 changes to [12] files \(\+1 heads\) (re) - obsmarker-exchange: 92 bytes received 1 new obsolescence markers obsoleted 1 changesets new changesets 9d1c114e7797 (1 drafts) @@ -116,8 +116,8 @@ remote: adding changesets remote: adding manifests remote: adding file changes + remote: obsmarker-exchange: 92 bytes received remote: added 1 changesets with 1 changes to 1 files - remote: obsmarker-exchange: 92 bytes received remote: 1 new obsolescence markers $ hg -R ../other pull pulling from ssh://user@dummy/server @@ -125,8 +125,8 @@ adding changesets adding manifests adding file changes + obsmarker-exchange: 92 bytes received added 1 changesets with 1 changes to 1 files - obsmarker-exchange: 92 bytes received 1 new obsolescence markers new changesets a5687ec59dd4 (1 drafts) (run 'hg update' to get a working copy) @@ -140,8 +140,8 @@ remote: adding changesets remote: adding manifests remote: adding file changes + remote: obsmarker-exchange: 183 bytes received remote: added 1 changesets with 0 changes to 1 files (+1 heads) - remote: obsmarker-exchange: 183 bytes received remote: 1 new obsolescence markers remote: obsoleted 1 changesets $ hg -R ../other pull @@ -150,8 +150,8 @@ adding changesets adding manifests adding file changes + obsmarker-exchange: 183 bytes received added 1 changesets with 0 changes to 1 files (+1 heads) - obsmarker-exchange: 183 bytes received 1 new obsolescence markers obsoleted 1 changesets new changesets * (glob)