Mercurial > hg
changeset 39197:d859b48730b8
merge with stable
author | Martin von Zweigbergk <martinvonz@google.com> |
---|---|
date | Mon, 20 Aug 2018 09:48:08 -0700 |
parents | 1e7a462cb946 (diff) 8c6775e812d8 (current diff) |
children | 45e05d39d9ce |
files | mercurial/context.py |
diffstat | 198 files changed, 10340 insertions(+), 3377 deletions(-) [+] |
line wrap: on
line diff
--- a/Makefile Sun Aug 19 13:27:02 2018 +0900 +++ b/Makefile Mon Aug 20 09:48:08 2018 -0700 @@ -9,7 +9,8 @@ $(eval HGROOT := $(shell pwd)) HGPYTHONS ?= $(HGROOT)/build/pythons PURE= -PYFILES:=$(shell find mercurial hgext doc -name '*.py') +PYFILESCMD=find mercurial hgext doc -name '*.py' +PYFILES:=$(shell $(PYFILESCMD)) DOCFILES=mercurial/help/*.txt export LANGUAGE=C export LC_ALL=C @@ -145,7 +146,7 @@ # parse them even though they are not marked for translation. # Extracting with an explicit encoding of ISO-8859-1 will make # xgettext "parse" and ignore them. - echo $(PYFILES) | xargs \ + $(PYFILESCMD) | xargs \ xgettext --package-name "Mercurial" \ --msgid-bugs-address "<mercurial-devel@mercurial-scm.org>" \ --copyright-holder "Matt Mackall <mpm@selenic.com> and others" \
--- a/contrib/byteify-strings.py Sun Aug 19 13:27:02 2018 +0900 +++ b/contrib/byteify-strings.py Mon Aug 20 09:48:08 2018 -0700 @@ -169,6 +169,11 @@ yield adjusttokenpos(t._replace(string=fn[4:]), coloffset) continue + # Looks like "if __name__ == '__main__'". + if (t.type == token.NAME and t.string == '__name__' + and _isop(i + 1, '==')): + _ensuresysstr(i + 2) + # Emit unmodified token. yield adjusttokenpos(t, coloffset)
--- a/contrib/check-code.py Sun Aug 19 13:27:02 2018 +0900 +++ b/contrib/check-code.py Mon Aug 20 09:48:08 2018 -0700 @@ -30,7 +30,7 @@ opentext = open else: def opentext(f): - return open(f, encoding='ascii') + return open(f, encoding='latin1') try: xrange except NameError: @@ -511,6 +511,7 @@ (r'getopt\.getopt', "use pycompat.getoptb instead (py3)"), (r'os\.getenv', "use encoding.environ.get instead"), (r'os\.setenv', "modifying the environ dict is not preferred"), + (r'(?<!pycompat\.)xrange', "use pycompat.xrange instead (py3)"), ], # warnings [],
--- a/contrib/import-checker.py Sun Aug 19 13:27:02 2018 +0900 +++ b/contrib/import-checker.py Mon Aug 20 09:48:08 2018 -0700 @@ -36,6 +36,7 @@ 'mercurial.pure.parsers', # third-party imports should be directly imported 'mercurial.thirdparty', + 'mercurial.thirdparty.attr', 'mercurial.thirdparty.cbor', 'mercurial.thirdparty.cbor.cbor2', 'mercurial.thirdparty.zope',
--- a/contrib/perf.py Sun Aug 19 13:27:02 2018 +0900 +++ b/contrib/perf.py Mon Aug 20 09:48:08 2018 -0700 @@ -663,21 +663,20 @@ By default, all revisions are added to the changegroup. """ cl = repo.changelog - revs = [cl.lookup(r) for r in repo.revs(rev or 'all()')] + nodes = [cl.lookup(r) for r in repo.revs(rev or 'all()')] bundler = changegroup.getbundler(version, repo) - def lookup(node): - # The real bundler reads the revision in order to access the - # manifest node and files list. Do that here. - cl.read(node) - return node - def d(): - for chunk in bundler.group(revs, cl, lookup): + state, chunks = bundler._generatechangelog(cl, nodes) + for chunk in chunks: pass timer, fm = gettimer(ui, opts) - timer(d) + + # Terminal printing can interfere with timing. So disable it. + with ui.configoverride({('progress', 'disable'): True}): + timer(d) + fm.end() @command('perfdirs', formatteropts) @@ -848,17 +847,23 @@ timer(d) fm.end() -@command('perfmanifest', [], 'REV') -def perfmanifest(ui, repo, rev, **opts): +@command('perfmanifest',[ + ('m', 'manifest-rev', False, 'Look up a manifest node revision'), + ('', 'clear-disk', False, 'clear on-disk caches too'), + ], 'REV|NODE') +def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts): """benchmark the time to read a manifest from disk and return a usable dict-like object Manifest caches are cleared before retrieval.""" timer, fm = gettimer(ui, opts) - ctx = scmutil.revsingle(repo, rev, rev) - t = ctx.manifestnode() + if not manifest_rev: + ctx = scmutil.revsingle(repo, rev, rev) + t = ctx.manifestnode() + else: + t = repo.manifestlog._revlog.lookup(rev) def d(): - repo.manifestlog.clearcaches() + repo.manifestlog.clearcaches(clear_persisted_data=clear_disk) repo.manifestlog[t].read() timer(d) fm.end() @@ -940,6 +945,38 @@ timer(lambda: len(repo.lookup(rev))) fm.end() +@command('perflinelogedits', + [('n', 'edits', 10000, 'number of edits'), + ('', 'max-hunk-lines', 10, 'max lines in a hunk'), + ], norepo=True) +def perflinelogedits(ui, **opts): + from mercurial import linelog + + edits = opts['edits'] + maxhunklines = opts['max_hunk_lines'] + + maxb1 = 100000 + random.seed(0) + randint = random.randint + currentlines = 0 + arglist = [] + for rev in xrange(edits): + a1 = randint(0, currentlines) + a2 = randint(a1, min(currentlines, a1 + maxhunklines)) + b1 = randint(0, maxb1) + b2 = randint(b1, b1 + maxhunklines) + currentlines += (b2 - b1) - (a2 - a1) + arglist.append((rev, a1, a2, b1, b2)) + + def d(): + ll = linelog.linelog() + for args in arglist: + ll.replacelines(*args) + + timer, fm = gettimer(ui, opts) + timer(d) + fm.end() + @command('perfrevrange', formatteropts) def perfrevrange(ui, repo, *specs, **opts): timer, fm = gettimer(ui, opts) @@ -980,7 +1017,7 @@ """ timer, fm = gettimer(ui, opts) def moonwalk(): - for i in xrange(len(repo), -1, -1): + for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1): ctx = repo[i] ctx.branch() # read changelog data (in addition to the index) timer(moonwalk) @@ -1771,6 +1808,31 @@ branchcachewrite.restore() fm.end() +@command('perfbranchmapload', [ + ('f', 'filter', '', 'Specify repoview filter'), + ('', 'list', False, 'List brachmap filter caches'), + ] + formatteropts) +def perfbranchmapread(ui, repo, filter='', list=False, **opts): + """benchmark reading the branchmap""" + if list: + for name, kind, st in repo.cachevfs.readdir(stat=True): + if name.startswith('branch2'): + filtername = name.partition('-')[2] or 'unfiltered' + ui.status('%s - %s\n' + % (filtername, util.bytecount(st.st_size))) + return + if filter: + repo = repoview.repoview(repo, filter) + else: + repo = repo.unfiltered() + # try once without timer, the filter may not be cached + if branchmap.read(repo) is None: + raise error.Abort('No brachmap cached for %s repo' + % (filter or 'unfiltered')) + timer, fm = gettimer(ui, opts) + timer(lambda: branchmap.read(repo) and None) + fm.end() + @command('perfloadmarkers') def perfloadmarkers(ui, repo): """benchmark the time to parse the on-disk markers for a repo
--- a/contrib/phabricator.py Sun Aug 19 13:27:02 2018 +0900 +++ b/contrib/phabricator.py Mon Aug 20 09:48:08 2018 -0700 @@ -570,6 +570,7 @@ drevid = drevids[i] drev = [d for d in drevs if int(d[r'id']) == drevid][0] newdesc = getdescfromdrev(drev) + newdesc = encoding.unitolocal(newdesc) # Make sure commit message contain "Differential Revision" if old.description() != newdesc: parents = [
--- a/contrib/python3-whitelist Sun Aug 19 13:27:02 2018 +0900 +++ b/contrib/python3-whitelist Mon Aug 20 09:48:08 2018 -0700 @@ -1,4 +1,7 @@ test-abort-checkin.t +test-absorb-filefixupstate.py +test-absorb-phase.t +test-absorb-strip.t test-add.t test-addremove-similar.t test-addremove.t @@ -48,6 +51,7 @@ test-cbor.py test-censor.t test-changelog-exec.t +test-check-code.t test-check-commit.t test-check-execute.t test-check-interfaces.py @@ -179,9 +183,12 @@ test-generaldelta.t test-getbundle.t test-git-export.t +test-glog-beautifygraph.t test-glog-topological.t +test-glog.t test-gpg.t test-graft.t +test-grep.t test-hg-parseurl.py test-hghave.t test-hgignore.t @@ -254,6 +261,7 @@ test-largefiles.t test-lfs-largefiles.t test-lfs-pointer.py +test-linelog.py test-linerange.py test-locate.t test-lock-badness.t @@ -296,6 +304,7 @@ test-minifileset.py test-minirst.py test-mq-git.t +test-mq-guards.t test-mq-header-date.t test-mq-header-from.t test-mq-merge.t @@ -308,6 +317,7 @@ test-mq-qimport-fail-cleanup.t test-mq-qnew.t test-mq-qpush-exact.t +test-mq-qpush-fail.t test-mq-qqueue.t test-mq-qrefresh-interactive.t test-mq-qrefresh-replace-log-message.t @@ -318,6 +328,7 @@ test-mq-subrepo.t test-mq-symlinks.t test-mv-cp-st-diff.t +test-narrow-acl.t test-narrow-archive.t test-narrow-clone-no-ellipsis.t test-narrow-clone-non-narrow-server.t @@ -358,6 +369,9 @@ test-parseindex2.py test-patch-offset.t test-patch.t +test-patchbomb-bookmark.t +test-patchbomb-tls.t +test-patchbomb.t test-pathconflicts-merge.t test-pathconflicts-update.t test-pathencode.py @@ -405,6 +419,7 @@ test-pushvars.t test-qrecord.t test-rebase-abort.t +test-rebase-backup.t test-rebase-base-flag.t test-rebase-bookmarks.t test-rebase-brute-force.t @@ -446,6 +461,7 @@ test-revert-flags.t test-revert-interactive.t test-revert-unknown.t +test-revisions.t test-revlog-ancestry.py test-revlog-group-emptyiter.t test-revlog-mmapindex.t @@ -529,6 +545,7 @@ test-url-rev.t test-url.py test-username-newline.t +test-util.py test-verify.t test-walk.t test-walkrepo.py
--- a/contrib/wix/help.wxs Sun Aug 19 13:27:02 2018 +0900 +++ b/contrib/wix/help.wxs Mon Aug 20 09:48:08 2018 -0700 @@ -46,6 +46,7 @@ <File Id="internals.censor.txt" Name="censor.txt" /> <File Id="internals.changegroups.txt" Name="changegroups.txt" /> <File Id="internals.config.txt" Name="config.txt" /> + <File Id="internals.linelog.txt" Name="linelog.txt" /> <File Id="internals.requirements.txt" Name="requirements.txt" /> <File Id="internals.revlogs.txt" Name="revlogs.txt" /> <File Id="internals.wireprotocol.txt" Name="wireprotocol.txt" />
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/absorb.py Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,977 @@ +# absorb.py +# +# Copyright 2016 Facebook, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""apply working directory changes to changesets (EXPERIMENTAL) + +The absorb extension provides a command to use annotate information to +amend modified chunks into the corresponding non-public changesets. + +:: + + [absorb] + # only check 50 recent non-public changesets at most + max-stack-size = 50 + # whether to add noise to new commits to avoid obsolescence cycle + add-noise = 1 + # make `amend --correlated` a shortcut to the main command + amend-flag = correlated + + [color] + absorb.node = blue bold + absorb.path = bold +""" + +# TODO: +# * Rename config items to [commands] namespace +# * Converge getdraftstack() with other code in core +# * move many attributes on fixupstate to be private + +from __future__ import absolute_import + +import collections + +from mercurial.i18n import _ +from mercurial import ( + cmdutil, + commands, + context, + crecord, + error, + linelog, + mdiff, + node, + obsolete, + patch, + phases, + pycompat, + registrar, + repair, + scmutil, + util, +) +from mercurial.utils import ( + stringutil, +) + +# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'ships-with-hg-core' + +cmdtable = {} +command = registrar.command(cmdtable) + +configtable = {} +configitem = registrar.configitem(configtable) + +configitem('absorb', 'add-noise', default=True) +configitem('absorb', 'amend-flag', default=None) +configitem('absorb', 'max-stack-size', default=50) + +colortable = { + 'absorb.node': 'blue bold', + 'absorb.path': 'bold', +} + +defaultdict = collections.defaultdict + +class nullui(object): + """blank ui object doing nothing""" + debugflag = False + verbose = False + quiet = True + + def __getitem__(name): + def nullfunc(*args, **kwds): + return + return nullfunc + +class emptyfilecontext(object): + """minimal filecontext representing an empty file""" + def data(self): + return '' + + def node(self): + return node.nullid + +def uniq(lst): + """list -> list. remove duplicated items without changing the order""" + seen = set() + result = [] + for x in lst: + if x not in seen: + seen.add(x) + result.append(x) + return result + +def getdraftstack(headctx, limit=None): + """(ctx, int?) -> [ctx]. get a linear stack of non-public changesets. + + changesets are sorted in topo order, oldest first. + return at most limit items, if limit is a positive number. + + merges are considered as non-draft as well. i.e. every commit + returned has and only has 1 parent. + """ + ctx = headctx + result = [] + while ctx.phase() != phases.public: + if limit and len(result) >= limit: + break + parents = ctx.parents() + if len(parents) != 1: + break + result.append(ctx) + ctx = parents[0] + result.reverse() + return result + +def getfilestack(stack, path, seenfctxs=None): + """([ctx], str, set) -> [fctx], {ctx: fctx} + + stack is a list of contexts, from old to new. usually they are what + "getdraftstack" returns. + + follows renames, but not copies. + + seenfctxs is a set of filecontexts that will be considered "immutable". + they are usually what this function returned in earlier calls, useful + to avoid issues that a file was "moved" to multiple places and was then + modified differently, like: "a" was copied to "b", "a" was also copied to + "c" and then "a" was deleted, then both "b" and "c" were "moved" from "a" + and we enforce only one of them to be able to affect "a"'s content. + + return an empty list and an empty dict, if the specified path does not + exist in stack[-1] (the top of the stack). + + otherwise, return a list of de-duplicated filecontexts, and the map to + convert ctx in the stack to fctx, for possible mutable fctxs. the first item + of the list would be outside the stack and should be considered immutable. + the remaining items are within the stack. + + for example, given the following changelog and corresponding filelog + revisions: + + changelog: 3----4----5----6----7 + filelog: x 0----1----1----2 (x: no such file yet) + + - if stack = [5, 6, 7], returns ([0, 1, 2], {5: 1, 6: 1, 7: 2}) + - if stack = [3, 4, 5], returns ([e, 0, 1], {4: 0, 5: 1}), where "e" is a + dummy empty filecontext. + - if stack = [2], returns ([], {}) + - if stack = [7], returns ([1, 2], {7: 2}) + - if stack = [6, 7], returns ([1, 2], {6: 1, 7: 2}), although {6: 1} can be + removed, since 1 is immutable. + """ + if seenfctxs is None: + seenfctxs = set() + assert stack + + if path not in stack[-1]: + return [], {} + + fctxs = [] + fctxmap = {} + + pctx = stack[0].p1() # the public (immutable) ctx we stop at + for ctx in reversed(stack): + if path not in ctx: # the file is added in the next commit + pctx = ctx + break + fctx = ctx[path] + fctxs.append(fctx) + if fctx in seenfctxs: # treat fctx as the immutable one + pctx = None # do not add another immutable fctx + break + fctxmap[ctx] = fctx # only for mutable fctxs + renamed = fctx.renamed() + if renamed: + path = renamed[0] # follow rename + if path in ctx: # but do not follow copy + pctx = ctx.p1() + break + + if pctx is not None: # need an extra immutable fctx + if path in pctx: + fctxs.append(pctx[path]) + else: + fctxs.append(emptyfilecontext()) + + fctxs.reverse() + # note: we rely on a property of hg: filerev is not reused for linear + # history. i.e. it's impossible to have: + # changelog: 4----5----6 (linear, no merges) + # filelog: 1----2----1 + # ^ reuse filerev (impossible) + # because parents are part of the hash. if that's not true, we need to + # remove uniq and find a different way to identify fctxs. + return uniq(fctxs), fctxmap + +class overlaystore(patch.filestore): + """read-only, hybrid store based on a dict and ctx. + memworkingcopy: {path: content}, overrides file contents. + """ + def __init__(self, basectx, memworkingcopy): + self.basectx = basectx + self.memworkingcopy = memworkingcopy + + def getfile(self, path): + """comply with mercurial.patch.filestore.getfile""" + if path not in self.basectx: + return None, None, None + fctx = self.basectx[path] + if path in self.memworkingcopy: + content = self.memworkingcopy[path] + else: + content = fctx.data() + mode = (fctx.islink(), fctx.isexec()) + renamed = fctx.renamed() # False or (path, node) + return content, mode, (renamed and renamed[0]) + +def overlaycontext(memworkingcopy, ctx, parents=None, extra=None): + """({path: content}, ctx, (p1node, p2node)?, {}?) -> memctx + memworkingcopy overrides file contents. + """ + # parents must contain 2 items: (node1, node2) + if parents is None: + parents = ctx.repo().changelog.parents(ctx.node()) + if extra is None: + extra = ctx.extra() + date = ctx.date() + desc = ctx.description() + user = ctx.user() + files = set(ctx.files()).union(memworkingcopy) + store = overlaystore(ctx, memworkingcopy) + return context.memctx( + repo=ctx.repo(), parents=parents, text=desc, + files=files, filectxfn=store, user=user, date=date, + branch=None, extra=extra) + +class filefixupstate(object): + """state needed to apply fixups to a single file + + internally, it keeps file contents of several revisions and a linelog. + + the linelog uses odd revision numbers for original contents (fctxs passed + to __init__), and even revision numbers for fixups, like: + + linelog rev 1: self.fctxs[0] (from an immutable "public" changeset) + linelog rev 2: fixups made to self.fctxs[0] + linelog rev 3: self.fctxs[1] (a child of fctxs[0]) + linelog rev 4: fixups made to self.fctxs[1] + ... + + a typical use is like: + + 1. call diffwith, to calculate self.fixups + 2. (optionally), present self.fixups to the user, or change it + 3. call apply, to apply changes + 4. read results from "finalcontents", or call getfinalcontent + """ + + def __init__(self, fctxs, ui=None, opts=None): + """([fctx], ui or None) -> None + + fctxs should be linear, and sorted by topo order - oldest first. + fctxs[0] will be considered as "immutable" and will not be changed. + """ + self.fctxs = fctxs + self.ui = ui or nullui() + self.opts = opts or {} + + # following fields are built from fctxs. they exist for perf reason + self.contents = [f.data() for f in fctxs] + self.contentlines = pycompat.maplist(mdiff.splitnewlines, self.contents) + self.linelog = self._buildlinelog() + if self.ui.debugflag: + assert self._checkoutlinelog() == self.contents + + # following fields will be filled later + self.chunkstats = [0, 0] # [adopted, total : int] + self.targetlines = [] # [str] + self.fixups = [] # [(linelog rev, a1, a2, b1, b2)] + self.finalcontents = [] # [str] + + def diffwith(self, targetfctx, showchanges=False): + """calculate fixups needed by examining the differences between + self.fctxs[-1] and targetfctx, chunk by chunk. + + targetfctx is the target state we move towards. we may or may not be + able to get there because not all modified chunks can be amended into + a non-public fctx unambiguously. + + call this only once, before apply(). + + update self.fixups, self.chunkstats, and self.targetlines. + """ + a = self.contents[-1] + alines = self.contentlines[-1] + b = targetfctx.data() + blines = mdiff.splitnewlines(b) + self.targetlines = blines + + self.linelog.annotate(self.linelog.maxrev) + annotated = self.linelog.annotateresult # [(linelog rev, linenum)] + assert len(annotated) == len(alines) + # add a dummy end line to make insertion at the end easier + if annotated: + dummyendline = (annotated[-1][0], annotated[-1][1] + 1) + annotated.append(dummyendline) + + # analyse diff blocks + for chunk in self._alldiffchunks(a, b, alines, blines): + newfixups = self._analysediffchunk(chunk, annotated) + self.chunkstats[0] += bool(newfixups) # 1 or 0 + self.chunkstats[1] += 1 + self.fixups += newfixups + if showchanges: + self._showchanges(alines, blines, chunk, newfixups) + + def apply(self): + """apply self.fixups. update self.linelog, self.finalcontents. + + call this only once, before getfinalcontent(), after diffwith(). + """ + # the following is unnecessary, as it's done by "diffwith": + # self.linelog.annotate(self.linelog.maxrev) + for rev, a1, a2, b1, b2 in reversed(self.fixups): + blines = self.targetlines[b1:b2] + if self.ui.debugflag: + idx = (max(rev - 1, 0)) // 2 + self.ui.write(_('%s: chunk %d:%d -> %d lines\n') + % (node.short(self.fctxs[idx].node()), + a1, a2, len(blines))) + self.linelog.replacelines(rev, a1, a2, b1, b2) + if self.opts.get('edit_lines', False): + self.finalcontents = self._checkoutlinelogwithedits() + else: + self.finalcontents = self._checkoutlinelog() + + def getfinalcontent(self, fctx): + """(fctx) -> str. get modified file content for a given filecontext""" + idx = self.fctxs.index(fctx) + return self.finalcontents[idx] + + def _analysediffchunk(self, chunk, annotated): + """analyse a different chunk and return new fixups found + + return [] if no lines from the chunk can be safely applied. + + the chunk (or lines) cannot be safely applied, if, for example: + - the modified (deleted) lines belong to a public changeset + (self.fctxs[0]) + - the chunk is a pure insertion and the adjacent lines (at most 2 + lines) belong to different non-public changesets, or do not belong + to any non-public changesets. + - the chunk is modifying lines from different changesets. + in this case, if the number of lines deleted equals to the number + of lines added, assume it's a simple 1:1 map (could be wrong). + otherwise, give up. + - the chunk is modifying lines from a single non-public changeset, + but other revisions touch the area as well. i.e. the lines are + not continuous as seen from the linelog. + """ + a1, a2, b1, b2 = chunk + # find involved indexes from annotate result + involved = annotated[a1:a2] + if not involved and annotated: # a1 == a2 and a is not empty + # pure insertion, check nearby lines. ignore lines belong + # to the public (first) changeset (i.e. annotated[i][0] == 1) + nearbylinenums = {a2, max(0, a1 - 1)} + involved = [annotated[i] + for i in nearbylinenums if annotated[i][0] != 1] + involvedrevs = list(set(r for r, l in involved)) + newfixups = [] + if len(involvedrevs) == 1 and self._iscontinuous(a1, a2 - 1, True): + # chunk belongs to a single revision + rev = involvedrevs[0] + if rev > 1: + fixuprev = rev + 1 + newfixups.append((fixuprev, a1, a2, b1, b2)) + elif a2 - a1 == b2 - b1 or b1 == b2: + # 1:1 line mapping, or chunk was deleted + for i in pycompat.xrange(a1, a2): + rev, linenum = annotated[i] + if rev > 1: + if b1 == b2: # deletion, simply remove that single line + nb1 = nb2 = 0 + else: # 1:1 line mapping, change the corresponding rev + nb1 = b1 + i - a1 + nb2 = nb1 + 1 + fixuprev = rev + 1 + newfixups.append((fixuprev, i, i + 1, nb1, nb2)) + return self._optimizefixups(newfixups) + + @staticmethod + def _alldiffchunks(a, b, alines, blines): + """like mdiff.allblocks, but only care about differences""" + blocks = mdiff.allblocks(a, b, lines1=alines, lines2=blines) + for chunk, btype in blocks: + if btype != '!': + continue + yield chunk + + def _buildlinelog(self): + """calculate the initial linelog based on self.content{,line}s. + this is similar to running a partial "annotate". + """ + llog = linelog.linelog() + a, alines = '', [] + for i in pycompat.xrange(len(self.contents)): + b, blines = self.contents[i], self.contentlines[i] + llrev = i * 2 + 1 + chunks = self._alldiffchunks(a, b, alines, blines) + for a1, a2, b1, b2 in reversed(list(chunks)): + llog.replacelines(llrev, a1, a2, b1, b2) + a, alines = b, blines + return llog + + def _checkoutlinelog(self): + """() -> [str]. check out file contents from linelog""" + contents = [] + for i in pycompat.xrange(len(self.contents)): + rev = (i + 1) * 2 + self.linelog.annotate(rev) + content = ''.join(map(self._getline, self.linelog.annotateresult)) + contents.append(content) + return contents + + def _checkoutlinelogwithedits(self): + """() -> [str]. prompt all lines for edit""" + alllines = self.linelog.getalllines() + # header + editortext = (_('HG: editing %s\nHG: "y" means the line to the right ' + 'exists in the changeset to the top\nHG:\n') + % self.fctxs[-1].path()) + # [(idx, fctx)]. hide the dummy emptyfilecontext + visiblefctxs = [(i, f) + for i, f in enumerate(self.fctxs) + if not isinstance(f, emptyfilecontext)] + for i, (j, f) in enumerate(visiblefctxs): + editortext += (_('HG: %s/%s %s %s\n') % + ('|' * i, '-' * (len(visiblefctxs) - i + 1), + node.short(f.node()), + f.description().split('\n',1)[0])) + editortext += _('HG: %s\n') % ('|' * len(visiblefctxs)) + # figure out the lifetime of a line, this is relatively inefficient, + # but probably fine + lineset = defaultdict(lambda: set()) # {(llrev, linenum): {llrev}} + for i, f in visiblefctxs: + self.linelog.annotate((i + 1) * 2) + for l in self.linelog.annotateresult: + lineset[l].add(i) + # append lines + for l in alllines: + editortext += (' %s : %s' % + (''.join([('y' if i in lineset[l] else ' ') + for i, _f in visiblefctxs]), + self._getline(l))) + # run editor + editedtext = self.ui.edit(editortext, '', action='absorb') + if not editedtext: + raise error.Abort(_('empty editor text')) + # parse edited result + contents = ['' for i in self.fctxs] + leftpadpos = 4 + colonpos = leftpadpos + len(visiblefctxs) + 1 + for l in mdiff.splitnewlines(editedtext): + if l.startswith('HG:'): + continue + if l[colonpos - 1:colonpos + 2] != ' : ': + raise error.Abort(_('malformed line: %s') % l) + linecontent = l[colonpos + 2:] + for i, ch in enumerate(l[leftpadpos:colonpos - 1]): + if ch == 'y': + contents[visiblefctxs[i][0]] += linecontent + # chunkstats is hard to calculate if anything changes, therefore + # set them to just a simple value (1, 1). + if editedtext != editortext: + self.chunkstats = [1, 1] + return contents + + def _getline(self, lineinfo): + """((rev, linenum)) -> str. convert rev+line number to line content""" + rev, linenum = lineinfo + if rev & 1: # odd: original line taken from fctxs + return self.contentlines[rev // 2][linenum] + else: # even: fixup line from targetfctx + return self.targetlines[linenum] + + def _iscontinuous(self, a1, a2, closedinterval=False): + """(a1, a2 : int) -> bool + + check if these lines are continuous. i.e. no other insertions or + deletions (from other revisions) among these lines. + + closedinterval decides whether a2 should be included or not. i.e. is + it [a1, a2), or [a1, a2] ? + """ + if a1 >= a2: + return True + llog = self.linelog + offset1 = llog.getoffset(a1) + offset2 = llog.getoffset(a2) + int(closedinterval) + linesinbetween = llog.getalllines(offset1, offset2) + return len(linesinbetween) == a2 - a1 + int(closedinterval) + + def _optimizefixups(self, fixups): + """[(rev, a1, a2, b1, b2)] -> [(rev, a1, a2, b1, b2)]. + merge adjacent fixups to make them less fragmented. + """ + result = [] + pcurrentchunk = [[-1, -1, -1, -1, -1]] + + def pushchunk(): + if pcurrentchunk[0][0] != -1: + result.append(tuple(pcurrentchunk[0])) + + for i, chunk in enumerate(fixups): + rev, a1, a2, b1, b2 = chunk + lastrev = pcurrentchunk[0][0] + lasta2 = pcurrentchunk[0][2] + lastb2 = pcurrentchunk[0][4] + if (a1 == lasta2 and b1 == lastb2 and rev == lastrev and + self._iscontinuous(max(a1 - 1, 0), a1)): + # merge into currentchunk + pcurrentchunk[0][2] = a2 + pcurrentchunk[0][4] = b2 + else: + pushchunk() + pcurrentchunk[0] = list(chunk) + pushchunk() + return result + + def _showchanges(self, alines, blines, chunk, fixups): + ui = self.ui + + def label(line, label): + if line.endswith('\n'): + line = line[:-1] + return ui.label(line, label) + + # this is not optimized for perf but _showchanges only gets executed + # with an extra command-line flag. + a1, a2, b1, b2 = chunk + aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1) + for idx, fa1, fa2, fb1, fb2 in fixups: + for i in pycompat.xrange(fa1, fa2): + aidxs[i - a1] = (max(idx, 1) - 1) // 2 + for i in pycompat.xrange(fb1, fb2): + bidxs[i - b1] = (max(idx, 1) - 1) // 2 + + buf = [] # [(idx, content)] + buf.append((0, label('@@ -%d,%d +%d,%d @@' + % (a1, a2 - a1, b1, b2 - b1), 'diff.hunk'))) + buf += [(aidxs[i - a1], label('-' + alines[i], 'diff.deleted')) + for i in pycompat.xrange(a1, a2)] + buf += [(bidxs[i - b1], label('+' + blines[i], 'diff.inserted')) + for i in pycompat.xrange(b1, b2)] + for idx, line in buf: + shortnode = idx and node.short(self.fctxs[idx].node()) or '' + ui.write(ui.label(shortnode[0:7].ljust(8), 'absorb.node') + + line + '\n') + +class fixupstate(object): + """state needed to run absorb + + internally, it keeps paths and filefixupstates. + + a typical use is like filefixupstates: + + 1. call diffwith, to calculate fixups + 2. (optionally), present fixups to the user, or edit fixups + 3. call apply, to apply changes to memory + 4. call commit, to commit changes to hg database + """ + + def __init__(self, stack, ui=None, opts=None): + """([ctx], ui or None) -> None + + stack: should be linear, and sorted by topo order - oldest first. + all commits in stack are considered mutable. + """ + assert stack + self.ui = ui or nullui() + self.opts = opts or {} + self.stack = stack + self.repo = stack[-1].repo().unfiltered() + + # following fields will be filled later + self.paths = [] # [str] + self.status = None # ctx.status output + self.fctxmap = {} # {path: {ctx: fctx}} + self.fixupmap = {} # {path: filefixupstate} + self.replacemap = {} # {oldnode: newnode or None} + self.finalnode = None # head after all fixups + + def diffwith(self, targetctx, match=None, showchanges=False): + """diff and prepare fixups. update self.fixupmap, self.paths""" + # only care about modified files + self.status = self.stack[-1].status(targetctx, match) + self.paths = [] + # but if --edit-lines is used, the user may want to edit files + # even if they are not modified + editopt = self.opts.get('edit_lines') + if not self.status.modified and editopt and match: + interestingpaths = match.files() + else: + interestingpaths = self.status.modified + # prepare the filefixupstate + seenfctxs = set() + # sorting is necessary to eliminate ambiguity for the "double move" + # case: "hg cp A B; hg cp A C; hg rm A", then only "B" can affect "A". + for path in sorted(interestingpaths): + self.ui.debug('calculating fixups for %s\n' % path) + targetfctx = targetctx[path] + fctxs, ctx2fctx = getfilestack(self.stack, path, seenfctxs) + # ignore symbolic links or binary, or unchanged files + if any(f.islink() or stringutil.binary(f.data()) + for f in [targetfctx] + fctxs + if not isinstance(f, emptyfilecontext)): + continue + if targetfctx.data() == fctxs[-1].data() and not editopt: + continue + seenfctxs.update(fctxs[1:]) + self.fctxmap[path] = ctx2fctx + fstate = filefixupstate(fctxs, ui=self.ui, opts=self.opts) + if showchanges: + colorpath = self.ui.label(path, 'absorb.path') + header = 'showing changes for ' + colorpath + self.ui.write(header + '\n') + fstate.diffwith(targetfctx, showchanges=showchanges) + self.fixupmap[path] = fstate + self.paths.append(path) + + def apply(self): + """apply fixups to individual filefixupstates""" + for path, state in self.fixupmap.iteritems(): + if self.ui.debugflag: + self.ui.write(_('applying fixups to %s\n') % path) + state.apply() + + @property + def chunkstats(self): + """-> {path: chunkstats}. collect chunkstats from filefixupstates""" + return dict((path, state.chunkstats) + for path, state in self.fixupmap.iteritems()) + + def commit(self): + """commit changes. update self.finalnode, self.replacemap""" + with self.repo.wlock(), self.repo.lock(): + with self.repo.transaction('absorb') as tr: + self._commitstack() + self._movebookmarks(tr) + if self.repo['.'].node() in self.replacemap: + self._moveworkingdirectoryparent() + if self._useobsolete: + self._obsoleteoldcommits() + if not self._useobsolete: # strip must be outside transactions + self._stripoldcommits() + return self.finalnode + + def printchunkstats(self): + """print things like '1 of 2 chunk(s) applied'""" + ui = self.ui + chunkstats = self.chunkstats + if ui.verbose: + # chunkstats for each file + for path, stat in chunkstats.iteritems(): + if stat[0]: + ui.write(_('%s: %d of %d chunk(s) applied\n') + % (path, stat[0], stat[1])) + elif not ui.quiet: + # a summary for all files + stats = chunkstats.values() + applied, total = (sum(s[i] for s in stats) for i in (0, 1)) + ui.write(_('%d of %d chunk(s) applied\n') % (applied, total)) + + def _commitstack(self): + """make new commits. update self.finalnode, self.replacemap. + it is splitted from "commit" to avoid too much indentation. + """ + # last node (20-char) committed by us + lastcommitted = None + # p1 which overrides the parent of the next commit, "None" means use + # the original parent unchanged + nextp1 = None + for ctx in self.stack: + memworkingcopy = self._getnewfilecontents(ctx) + if not memworkingcopy and not lastcommitted: + # nothing changed, nothing commited + nextp1 = ctx + continue + msg = '' + if self._willbecomenoop(memworkingcopy, ctx, nextp1): + # changeset is no longer necessary + self.replacemap[ctx.node()] = None + msg = _('became empty and was dropped') + else: + # changeset needs re-commit + nodestr = self._commitsingle(memworkingcopy, ctx, p1=nextp1) + lastcommitted = self.repo[nodestr] + nextp1 = lastcommitted + self.replacemap[ctx.node()] = lastcommitted.node() + if memworkingcopy: + msg = _('%d file(s) changed, became %s') % ( + len(memworkingcopy), self._ctx2str(lastcommitted)) + else: + msg = _('became %s') % self._ctx2str(lastcommitted) + if self.ui.verbose and msg: + self.ui.write(_('%s: %s\n') % (self._ctx2str(ctx), msg)) + self.finalnode = lastcommitted and lastcommitted.node() + + def _ctx2str(self, ctx): + if self.ui.debugflag: + return ctx.hex() + else: + return node.short(ctx.node()) + + def _getnewfilecontents(self, ctx): + """(ctx) -> {path: str} + + fetch file contents from filefixupstates. + return the working copy overrides - files different from ctx. + """ + result = {} + for path in self.paths: + ctx2fctx = self.fctxmap[path] # {ctx: fctx} + if ctx not in ctx2fctx: + continue + fctx = ctx2fctx[ctx] + content = fctx.data() + newcontent = self.fixupmap[path].getfinalcontent(fctx) + if content != newcontent: + result[fctx.path()] = newcontent + return result + + def _movebookmarks(self, tr): + repo = self.repo + needupdate = [(name, self.replacemap[hsh]) + for name, hsh in repo._bookmarks.iteritems() + if hsh in self.replacemap] + changes = [] + for name, hsh in needupdate: + if hsh: + changes.append((name, hsh)) + if self.ui.verbose: + self.ui.write(_('moving bookmark %s to %s\n') + % (name, node.hex(hsh))) + else: + changes.append((name, None)) + if self.ui.verbose: + self.ui.write(_('deleting bookmark %s\n') % name) + repo._bookmarks.applychanges(repo, tr, changes) + + def _moveworkingdirectoryparent(self): + if not self.finalnode: + # Find the latest not-{obsoleted,stripped} parent. + revs = self.repo.revs('max(::. - %ln)', self.replacemap.keys()) + ctx = self.repo[revs.first()] + self.finalnode = ctx.node() + else: + ctx = self.repo[self.finalnode] + + dirstate = self.repo.dirstate + # dirstate.rebuild invalidates fsmonitorstate, causing "hg status" to + # be slow. in absorb's case, no need to invalidate fsmonitorstate. + noop = lambda: 0 + restore = noop + if util.safehasattr(dirstate, '_fsmonitorstate'): + bak = dirstate._fsmonitorstate.invalidate + def restore(): + dirstate._fsmonitorstate.invalidate = bak + dirstate._fsmonitorstate.invalidate = noop + try: + with dirstate.parentchange(): + dirstate.rebuild(ctx.node(), ctx.manifest(), self.paths) + finally: + restore() + + @staticmethod + def _willbecomenoop(memworkingcopy, ctx, pctx=None): + """({path: content}, ctx, ctx) -> bool. test if a commit will be noop + + if it will become an empty commit (does not change anything, after the + memworkingcopy overrides), return True. otherwise return False. + """ + if not pctx: + parents = ctx.parents() + if len(parents) != 1: + return False + pctx = parents[0] + # ctx changes more files (not a subset of memworkingcopy) + if not set(ctx.files()).issubset(set(memworkingcopy)): + return False + for path, content in memworkingcopy.iteritems(): + if path not in pctx or path not in ctx: + return False + fctx = ctx[path] + pfctx = pctx[path] + if pfctx.flags() != fctx.flags(): + return False + if pfctx.data() != content: + return False + return True + + def _commitsingle(self, memworkingcopy, ctx, p1=None): + """(ctx, {path: content}, node) -> node. make a single commit + + the commit is a clone from ctx, with a (optionally) different p1, and + different file contents replaced by memworkingcopy. + """ + parents = p1 and (p1, node.nullid) + extra = ctx.extra() + if self._useobsolete and self.ui.configbool('absorb', 'add-noise'): + extra['absorb_source'] = ctx.hex() + mctx = overlaycontext(memworkingcopy, ctx, parents, extra=extra) + # preserve phase + with mctx.repo().ui.configoverride({ + ('phases', 'new-commit'): ctx.phase()}): + return mctx.commit() + + @util.propertycache + def _useobsolete(self): + """() -> bool""" + return obsolete.isenabled(self.repo, obsolete.createmarkersopt) + + def _obsoleteoldcommits(self): + relations = [(self.repo[k], v and (self.repo[v],) or ()) + for k, v in self.replacemap.iteritems()] + if relations: + obsolete.createmarkers(self.repo, relations) + + def _stripoldcommits(self): + nodelist = self.replacemap.keys() + # make sure we don't strip innocent children + revs = self.repo.revs('%ln - (::(heads(%ln::)-%ln))', nodelist, + nodelist, nodelist) + tonode = self.repo.changelog.node + nodelist = [tonode(r) for r in revs] + if nodelist: + repair.strip(self.repo.ui, self.repo, nodelist) + +def _parsechunk(hunk): + """(crecord.uihunk or patch.recordhunk) -> (path, (a1, a2, [bline]))""" + if type(hunk) not in (crecord.uihunk, patch.recordhunk): + return None, None + path = hunk.header.filename() + a1 = hunk.fromline + len(hunk.before) - 1 + # remove before and after context + hunk.before = hunk.after = [] + buf = util.stringio() + hunk.write(buf) + patchlines = mdiff.splitnewlines(buf.getvalue()) + # hunk.prettystr() will update hunk.removed + a2 = a1 + hunk.removed + blines = [l[1:] for l in patchlines[1:] if l[0] != '-'] + return path, (a1, a2, blines) + +def overlaydiffcontext(ctx, chunks): + """(ctx, [crecord.uihunk]) -> memctx + + return a memctx with some [1] patches (chunks) applied to ctx. + [1]: modifications are handled. renames, mode changes, etc. are ignored. + """ + # sadly the applying-patch logic is hardly reusable, and messy: + # 1. the core logic "_applydiff" is too heavy - it writes .rej files, it + # needs a file stream of a patch and will re-parse it, while we have + # structured hunk objects at hand. + # 2. a lot of different implementations about "chunk" (patch.hunk, + # patch.recordhunk, crecord.uihunk) + # as we only care about applying changes to modified files, no mode + # change, no binary diff, and no renames, it's probably okay to + # re-invent the logic using much simpler code here. + memworkingcopy = {} # {path: content} + patchmap = defaultdict(lambda: []) # {path: [(a1, a2, [bline])]} + for path, info in map(_parsechunk, chunks): + if not path or not info: + continue + patchmap[path].append(info) + for path, patches in patchmap.iteritems(): + if path not in ctx or not patches: + continue + patches.sort(reverse=True) + lines = mdiff.splitnewlines(ctx[path].data()) + for a1, a2, blines in patches: + lines[a1:a2] = blines + memworkingcopy[path] = ''.join(lines) + return overlaycontext(memworkingcopy, ctx) + +def absorb(ui, repo, stack=None, targetctx=None, pats=None, opts=None): + """pick fixup chunks from targetctx, apply them to stack. + + if targetctx is None, the working copy context will be used. + if stack is None, the current draft stack will be used. + return fixupstate. + """ + if stack is None: + limit = ui.configint('absorb', 'max-stack-size') + stack = getdraftstack(repo['.'], limit) + if limit and len(stack) >= limit: + ui.warn(_('absorb: only the recent %d changesets will ' + 'be analysed\n') + % limit) + if not stack: + raise error.Abort(_('no changeset to change')) + if targetctx is None: # default to working copy + targetctx = repo[None] + if pats is None: + pats = () + if opts is None: + opts = {} + state = fixupstate(stack, ui=ui, opts=opts) + matcher = scmutil.match(targetctx, pats, opts) + if opts.get('interactive'): + diff = patch.diff(repo, stack[-1].node(), targetctx.node(), matcher) + origchunks = patch.parsepatch(diff) + chunks = cmdutil.recordfilter(ui, origchunks)[0] + targetctx = overlaydiffcontext(stack[-1], chunks) + state.diffwith(targetctx, matcher, showchanges=opts.get('print_changes')) + if not opts.get('dry_run'): + state.apply() + if state.commit(): + state.printchunkstats() + elif not ui.quiet: + ui.write(_('nothing applied\n')) + return state + +@command('^absorb', + [('p', 'print-changes', None, + _('print which changesets are modified by which changes')), + ('i', 'interactive', None, + _('interactively select which chunks to apply (EXPERIMENTAL)')), + ('e', 'edit-lines', None, + _('edit what lines belong to which changesets before commit ' + '(EXPERIMENTAL)')), + ] + commands.dryrunopts + commands.walkopts, + _('hg absorb [OPTION] [FILE]...')) +def absorbcmd(ui, repo, *pats, **opts): + """incorporate corrections into the stack of draft changesets + + absorb analyzes each change in your working directory and attempts to + amend the changed lines into the changesets in your stack that first + introduced those lines. + + If absorb cannot find an unambiguous changeset to amend for a change, + that change will be left in the working directory, untouched. They can be + observed by :hg:`status` or :hg:`diff` afterwards. In other words, + absorb does not write to the working directory. + + Changesets outside the revset `::. and not public() and not merge()` will + not be changed. + + Changesets that become empty after applying the changes will be deleted. + + If in doubt, run :hg:`absorb -pn` to preview what changesets will + be amended by what changed lines, without actually changing anything. + + Returns 0 on success, 1 if all chunks were ignored and nothing amended. + """ + state = absorb(ui, repo, pats=pats, opts=opts) + if sum(s[0] for s in state.chunkstats.values()) == 0: + return 1
--- a/hgext/acl.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/acl.py Mon Aug 20 09:48:08 2018 -0700 @@ -220,6 +220,7 @@ error, extensions, match, + pycompat, registrar, util, ) @@ -403,7 +404,7 @@ allow = buildmatch(ui, repo, user, 'acl.allow') deny = buildmatch(ui, repo, user, 'acl.deny') - for rev in xrange(repo[node].rev(), len(repo)): + for rev in pycompat.xrange(repo[node].rev(), len(repo)): ctx = repo[rev] branch = ctx.branch() if denybranches and denybranches(branch):
--- a/hgext/beautifygraph.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/beautifygraph.py Mon Aug 20 09:48:08 2018 -0700 @@ -18,6 +18,7 @@ encoding, extensions, graphmod, + pycompat, templatekw, ) @@ -53,8 +54,10 @@ def convertedges(line): line = ' %s ' % line pretty = [] - for idx in xrange(len(line) - 2): - pretty.append(prettyedge(line[idx], line[idx + 1], line[idx + 2])) + for idx in pycompat.xrange(len(line) - 2): + pretty.append(prettyedge(line[idx:idx + 1], + line[idx + 1:idx + 2], + line[idx + 2:idx + 3])) return ''.join(pretty) def getprettygraphnode(orig, *args, **kwargs): @@ -81,7 +84,7 @@ ui.warn(_('beautifygraph: unsupported encoding, UTF-8 required\n')) return - if 'A' in encoding._wide: + if r'A' in encoding._wide: ui.warn(_('beautifygraph: unsupported terminal settings, ' 'monospace narrow text required\n')) return
--- a/hgext/blackbox.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/blackbox.py Mon Aug 20 09:48:08 2018 -0700 @@ -45,6 +45,7 @@ from mercurial import ( encoding, + pycompat, registrar, ui as uimod, util, @@ -111,7 +112,7 @@ if st.st_size >= maxsize: path = vfs.join(name) maxfiles = ui.configint('blackbox', 'maxfiles') - for i in xrange(maxfiles - 1, 1, -1): + for i in pycompat.xrange(maxfiles - 1, 1, -1): rotate(oldpath='%s.%d' % (path, i - 1), newpath='%s.%d' % (path, i)) rotate(oldpath=path,
--- a/hgext/censor.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/censor.py Mon Aug 20 09:48:08 2018 -0700 @@ -32,6 +32,7 @@ from mercurial import ( error, + pycompat, registrar, revlog, scmutil, @@ -160,7 +161,7 @@ offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED) # Rewrite all following filelog revisions fixing up offsets and deltas. - for srev in xrange(crev + 1, len(flog)): + for srev in pycompat.xrange(crev + 1, len(flog)): if crev in flog.parentrevs(srev): # Immediate children of censored node must be re-added as fulltext. try:
--- a/hgext/convert/cvsps.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/convert/cvsps.py Mon Aug 20 09:48:08 2018 -0700 @@ -763,7 +763,7 @@ # branchpoints such that it is the latest possible # commit without any intervening, unrelated commits. - for candidate in xrange(i): + for candidate in pycompat.xrange(i): if c.branch not in changesets[candidate].branchpoints: if p is not None: break
--- a/hgext/eol.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/eol.py Mon Aug 20 09:48:08 2018 -0700 @@ -266,7 +266,7 @@ ensureenabled(ui) files = set() revs = set() - for rev in xrange(repo[node].rev(), len(repo)): + for rev in pycompat.xrange(repo[node].rev(), len(repo)): revs.add(rev) if headsonly: ctx = repo[rev]
--- a/hgext/fix.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/fix.py Mon Aug 20 09:48:08 2018 -0700 @@ -96,15 +96,16 @@ # user. configitem('fix', 'maxfilesize', default='2MB') -@command('fix', - [('', 'all', False, _('fix all non-public non-obsolete revisions')), - ('', 'base', [], _('revisions to diff against (overrides automatic ' - 'selection, and applies to every revision being ' - 'fixed)'), _('REV')), - ('r', 'rev', [], _('revisions to fix'), _('REV')), - ('w', 'working-dir', False, _('fix the working directory')), - ('', 'whole', False, _('always fix every line of a file'))], - _('[OPTION]... [FILE]...')) +allopt = ('', 'all', False, _('fix all non-public non-obsolete revisions')) +baseopt = ('', 'base', [], _('revisions to diff against (overrides automatic ' + 'selection, and applies to every revision being ' + 'fixed)'), _('REV')) +revopt = ('r', 'rev', [], _('revisions to fix'), _('REV')) +wdiropt = ('w', 'working-dir', False, _('fix the working directory')) +wholeopt = ('', 'whole', False, _('always fix every line of a file')) +usage = _('[OPTION]... [FILE]...') + +@command('fix', [allopt, baseopt, revopt, wdiropt, wholeopt], usage) def fix(ui, repo, *pats, **opts): """rewrite file content in changesets or working directory @@ -161,6 +162,7 @@ # it makes the results more easily reproducible. filedata = collections.defaultdict(dict) replacements = {} + wdirwritten = False commitorder = sorted(revstofix, reverse=True) with ui.makeprogress(topic=_('fixing'), unit=_('files'), total=sum(numitems.values())) as progress: @@ -178,12 +180,28 @@ ctx = repo[rev] if rev == wdirrev: writeworkingdir(repo, ctx, filedata[rev], replacements) + wdirwritten = bool(filedata[rev]) else: replacerev(ui, repo, ctx, filedata[rev], replacements) del filedata[rev] - replacements = {prec: [succ] for prec, succ in replacements.iteritems()} - scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True) + cleanup(repo, replacements, wdirwritten) + +def cleanup(repo, replacements, wdirwritten): + """Calls scmutil.cleanupnodes() with the given replacements. + + "replacements" is a dict from nodeid to nodeid, with one key and one value + for every revision that was affected by fixing. This is slightly different + from cleanupnodes(). + + "wdirwritten" is a bool which tells whether the working copy was affected by + fixing, since it has no entry in "replacements". + + Useful as a hook point for extending "hg fix" with output summarizing the + effects of the command, though we choose not to output anything here. + """ + replacements = {prec: [succ] for prec, succ in replacements.iteritems()} + scmutil.cleanupnodes(repo, replacements, 'fix', fixphase=True) def getworkqueue(ui, repo, pats, opts, revstofix, basectxs): """"Constructs the list of files to be fixed at specific revisions @@ -267,8 +285,8 @@ """ files = set() for basectx in basectxs: - stat = repo.status( - basectx, fixctx, match=match, clean=bool(pats), unknown=bool(pats)) + stat = basectx.status(fixctx, match=match, listclean=bool(pats), + listunknown=bool(pats)) files.update( set(itertools.chain(stat.added, stat.modified, stat.clean, stat.unknown))) @@ -417,12 +435,15 @@ starting with the file's content in the fixctx. Fixers that support line ranges will affect lines that have changed relative to any of the basectxs (i.e. they will only avoid lines that are common to all basectxs). + + A fixer tool's stdout will become the file's new content if and only if it + exits with code zero. """ newdata = fixctx[path].data() for fixername, fixer in fixers.iteritems(): if fixer.affects(opts, fixctx, path): - ranges = lineranges(opts, path, basectxs, fixctx, newdata) - command = fixer.command(ui, path, ranges) + rangesfn = lambda: lineranges(opts, path, basectxs, fixctx, newdata) + command = fixer.command(ui, path, rangesfn) if command is None: continue ui.debug('subprocess: %s\n' % (command,)) @@ -436,8 +457,11 @@ newerdata, stderr = proc.communicate(newdata) if stderr: showstderr(ui, fixctx.rev(), fixername, stderr) - else: + if proc.returncode == 0: newdata = newerdata + elif not stderr: + showstderr(ui, fixctx.rev(), fixername, + _('exited with status %d\n') % (proc.returncode,)) return newdata def showstderr(ui, rev, fixername, stderr): @@ -567,7 +591,7 @@ """Should this fixer run on the file at the given path and context?""" return scmutil.match(fixctx, [self._fileset], opts)(path) - def command(self, ui, path, ranges): + def command(self, ui, path, rangesfn): """A shell command to use to invoke this fixer on the given file/lines May return None if there is no appropriate command to run for the given @@ -577,6 +601,7 @@ parts = [expand(ui, self._command, {'rootpath': path, 'basename': os.path.basename(path)})] if self._linerange: + ranges = rangesfn() if not ranges: # No line ranges to fix, so don't run the fixer. return None
--- a/hgext/hgk.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/hgk.py Mon Aug 20 09:48:08 2018 -0700 @@ -227,7 +227,7 @@ else: i -= chunk - for x in xrange(chunk): + for x in pycompat.xrange(chunk): if i + x >= count: l[chunk - x:] = [0] * (chunk - x) break @@ -238,7 +238,7 @@ else: if (i + x) in repo: l[x] = 1 - for x in xrange(chunk - 1, -1, -1): + for x in pycompat.xrange(chunk - 1, -1, -1): if l[x] != 0: yield (i + x, full is not None and l[x] or None) if i == 0: @@ -249,7 +249,7 @@ if len(ar) == 0: return 1 mask = 0 - for i in xrange(len(ar)): + for i in pycompat.xrange(len(ar)): if sha in reachable[i]: mask |= 1 << i
--- a/hgext/histedit.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/histedit.py Mon Aug 20 09:48:08 2018 -0700 @@ -386,7 +386,7 @@ rules = [] rulelen = int(lines[index]) index += 1 - for i in xrange(rulelen): + for i in pycompat.xrange(rulelen): ruleaction = lines[index] index += 1 rule = lines[index] @@ -397,7 +397,7 @@ replacements = [] replacementlen = int(lines[index]) index += 1 - for i in xrange(replacementlen): + for i in pycompat.xrange(replacementlen): replacement = lines[index] original = node.bin(replacement[:40]) succ = [node.bin(replacement[i:i + 40]) for i in @@ -1084,7 +1084,7 @@ raise error.Abort(_('only --commands argument allowed with ' '--edit-plan')) else: - if os.path.exists(os.path.join(repo.path, 'histedit-state')): + if state.inprogress(): raise error.Abort(_('history edit already in progress, try ' '--continue or --abort')) if outg: @@ -1624,8 +1624,8 @@ def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs): if isinstance(nodelist, str): nodelist = [nodelist] - if os.path.exists(os.path.join(repo.path, 'histedit-state')): - state = histeditstate(repo) + state = histeditstate(repo) + if state.inprogress(): state.read() histedit_nodes = {action.node for action in state.actions if action.node} @@ -1638,9 +1638,9 @@ extensions.wrapfunction(repair, 'strip', stripwrapper) def summaryhook(ui, repo): - if not os.path.exists(repo.vfs.join('histedit-state')): + state = histeditstate(repo) + if not state.inprogress(): return - state = histeditstate(repo) state.read() if state.actions: # i18n: column positioning for "hg summary"
--- a/hgext/lfs/__init__.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/lfs/__init__.py Mon Aug 20 09:48:08 2018 -0700 @@ -136,7 +136,7 @@ exchange, extensions, filelog, - fileset, + filesetlang, hg, localrepo, minifileset, @@ -261,7 +261,7 @@ # deprecated config: lfs.threshold threshold = repo.ui.configbytes('lfs', 'threshold') if threshold: - fileset.parse(trackspec) # make sure syntax errors are confined + filesetlang.parse(trackspec) # make sure syntax errors are confined trackspec = "(%s) | size('>%d')" % (trackspec, threshold) return minifileset.compile(trackspec) @@ -357,11 +357,11 @@ # when writing a bundle via "hg bundle" command, upload related LFS blobs wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle) -@filesetpredicate('lfs()', callstatus=True) +@filesetpredicate('lfs()') def lfsfileset(mctx, x): """File that uses LFS storage.""" # i18n: "lfs" is a keyword - fileset.getargs(x, 0, 0, _("lfs takes no arguments")) + filesetlang.getargs(x, 0, 0, _("lfs takes no arguments")) ctx = mctx.ctx def lfsfilep(f): return wrapper.pointerfromctx(ctx, f, removed=True) is not None
--- a/hgext/mq.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/mq.py Mon Aug 20 09:48:08 2018 -0700 @@ -414,7 +414,7 @@ the field and a blank line.''' if self.message: subj = 'subject: ' + self.message[0].lower() - for i in xrange(len(self.comments)): + for i in pycompat.xrange(len(self.comments)): if subj == self.comments[i].lower(): del self.comments[i] self.message = self.message[2:] @@ -662,13 +662,13 @@ exactneg = [g for g in patchguards if g.startswith('-') and g[1:] in guards] if exactneg: - return False, pycompat.byterepr(exactneg[0]) + return False, stringutil.pprint(exactneg[0]) pos = [g for g in patchguards if g.startswith('+')] exactpos = [g for g in pos if g[1:] in guards] if pos: if exactpos: - return True, pycompat.byterepr(exactpos[0]) - return False, ' '.join([pycompat.byterepr(p) for p in pos]) + return True, stringutil.pprint(exactpos[0]) + return False, ' '.join([stringutil.pprint(p) for p in pos]) return True, '' def explainpushable(self, idx, all_patches=False): @@ -1800,7 +1800,7 @@ # if the patch excludes a modified file, mark that # file with mtime=0 so status can see it. mm = [] - for i in xrange(len(m) - 1, -1, -1): + for i in pycompat.xrange(len(m) - 1, -1, -1): if not match1(m[i]): mm.append(m[i]) del m[i] @@ -1908,7 +1908,7 @@ else: start = self.series.index(patch) + 1 unapplied = [] - for i in xrange(start, len(self.series)): + for i in pycompat.xrange(start, len(self.series)): pushable, reason = self.pushable(i) if pushable: unapplied.append((i, self.series[i])) @@ -1946,7 +1946,7 @@ if not missing: if self.ui.verbose: idxwidth = len("%d" % (start + length - 1)) - for i in xrange(start, start + length): + for i in pycompat.xrange(start, start + length): patch = self.series[i] if patch in applied: char, state = 'A', 'applied' @@ -2091,7 +2091,7 @@ def nextpatch(start): if all_patches or start >= len(self.series): return start - for i in xrange(start, len(self.series)): + for i in pycompat.xrange(start, len(self.series)): p, reason = self.pushable(i) if p: return i @@ -2876,7 +2876,7 @@ if args or opts.get(r'none'): raise error.Abort(_('cannot mix -l/--list with options or ' 'arguments')) - for i in xrange(len(q.series)): + for i in pycompat.xrange(len(q.series)): status(i) return if not args or args[0][0:1] in '-+': @@ -3179,14 +3179,16 @@ pushable = lambda i: q.pushable(q.applied[i].name)[0] if args or opts.get('none'): old_unapplied = q.unapplied(repo) - old_guarded = [i for i in xrange(len(q.applied)) if not pushable(i)] + old_guarded = [i for i in pycompat.xrange(len(q.applied)) + if not pushable(i)] q.setactive(args) q.savedirty() if not args: ui.status(_('guards deactivated\n')) if not opts.get('pop') and not opts.get('reapply'): unapplied = q.unapplied(repo) - guarded = [i for i in xrange(len(q.applied)) if not pushable(i)] + guarded = [i for i in pycompat.xrange(len(q.applied)) + if not pushable(i)] if len(unapplied) != len(old_unapplied): ui.status(_('number of unguarded, unapplied patches has ' 'changed from %d to %d\n') % @@ -3225,7 +3227,7 @@ reapply = opts.get('reapply') and q.applied and q.applied[-1].name popped = False if opts.get('pop') or opts.get('reapply'): - for i in xrange(len(q.applied)): + for i in pycompat.xrange(len(q.applied)): if not pushable(i): ui.status(_('popping guarded patches\n')) popped = True
--- a/hgext/narrow/__init__.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/narrow/__init__.py Mon Aug 20 09:48:08 2018 -0700 @@ -15,17 +15,15 @@ testedwith = 'ships-with-hg-core' from mercurial import ( - changegroup, extensions, - hg, localrepo, registrar, + repository, verify as verifymod, ) from . import ( narrowbundle2, - narrowchangegroup, narrowcommands, narrowcopies, narrowpatch, @@ -55,7 +53,7 @@ cmdtable = narrowcommands.table def featuresetup(ui, features): - features.add(changegroup.NARROW_REQUIREMENT) + features.add(repository.NARROW_REQUIREMENT) def uisetup(ui): """Wraps user-facing mercurial commands with narrow-aware versions.""" @@ -63,7 +61,6 @@ narrowrevlog.setup() narrowbundle2.setup() narrowcommands.setup() - narrowchangegroup.setup() narrowwirepeer.uisetup() def reposetup(ui, repo): @@ -71,7 +68,7 @@ if not repo.local(): return - if changegroup.NARROW_REQUIREMENT in repo.requirements: + if repository.NARROW_REQUIREMENT in repo.requirements: narrowrepo.wraprepo(repo) narrowcopies.setup(repo) narrowpatch.setup(repo) @@ -86,8 +83,6 @@ def extsetup(ui): extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit) - extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare) - extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec) templatekeyword = narrowtemplates.templatekeyword revsetpredicate = narrowtemplates.revsetpredicate
--- a/hgext/narrow/narrowbundle2.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/narrow/narrowbundle2.py Mon Aug 20 09:48:08 2018 -0700 @@ -7,7 +7,6 @@ from __future__ import absolute_import -import collections import errno import struct @@ -15,17 +14,16 @@ from mercurial.node import ( bin, nullid, - nullrev, ) from mercurial import ( bundle2, changegroup, - dagutil, error, exchange, extensions, narrowspec, repair, + repository, util, wireprototypes, ) @@ -52,171 +50,12 @@ caps[NARROWCAP] = ['v0'] return caps -def _computeellipsis(repo, common, heads, known, match, depth=None): - """Compute the shape of a narrowed DAG. - - Args: - repo: The repository we're transferring. - common: The roots of the DAG range we're transferring. - May be just [nullid], which means all ancestors of heads. - heads: The heads of the DAG range we're transferring. - match: The narrowmatcher that allows us to identify relevant changes. - depth: If not None, only consider nodes to be full nodes if they are at - most depth changesets away from one of heads. - - Returns: - A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: - - visitnodes: The list of nodes (either full or ellipsis) which - need to be sent to the client. - relevant_nodes: The set of changelog nodes which change a file inside - the narrowspec. The client needs these as non-ellipsis nodes. - ellipsisroots: A dict of {rev: parents} that is used in - narrowchangegroup to produce ellipsis nodes with the - correct parents. - """ - cl = repo.changelog - mfl = repo.manifestlog - - cldag = dagutil.revlogdag(cl) - # dagutil does not like nullid/nullrev - commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev]) - headsrevs = cldag.internalizeall(heads) - if depth: - revdepth = {h: 0 for h in headsrevs} - - ellipsisheads = collections.defaultdict(set) - ellipsisroots = collections.defaultdict(set) - - def addroot(head, curchange): - """Add a root to an ellipsis head, splitting heads with 3 roots.""" - ellipsisroots[head].add(curchange) - # Recursively split ellipsis heads with 3 roots by finding the - # roots' youngest common descendant which is an elided merge commit. - # That descendant takes 2 of the 3 roots as its own, and becomes a - # root of the head. - while len(ellipsisroots[head]) > 2: - child, roots = splithead(head) - splitroots(head, child, roots) - head = child # Recurse in case we just added a 3rd root - - def splitroots(head, child, roots): - ellipsisroots[head].difference_update(roots) - ellipsisroots[head].add(child) - ellipsisroots[child].update(roots) - ellipsisroots[child].discard(child) - - def splithead(head): - r1, r2, r3 = sorted(ellipsisroots[head]) - for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): - mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)', - nr1, head, nr2, head) - for j in mid: - if j == nr2: - return nr2, (nr1, nr2) - if j not in ellipsisroots or len(ellipsisroots[j]) < 2: - return j, (nr1, nr2) - raise error.Abort('Failed to split up ellipsis node! head: %d, ' - 'roots: %d %d %d' % (head, r1, r2, r3)) - - missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) - visit = reversed(missing) - relevant_nodes = set() - visitnodes = [cl.node(m) for m in missing] - required = set(headsrevs) | known - for rev in visit: - clrev = cl.changelogrevision(rev) - ps = cldag.parents(rev) - if depth is not None: - curdepth = revdepth[rev] - for p in ps: - revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) - needed = False - shallow_enough = depth is None or revdepth[rev] <= depth - if shallow_enough: - curmf = mfl[clrev.manifest].read() - if ps: - # We choose to not trust the changed files list in - # changesets because it's not always correct. TODO: could - # we trust it for the non-merge case? - p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() - needed = bool(curmf.diff(p1mf, match)) - if not needed and len(ps) > 1: - # For merge changes, the list of changed files is not - # helpful, since we need to emit the merge if a file - # in the narrow spec has changed on either side of the - # merge. As a result, we do a manifest diff to check. - p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() - needed = bool(curmf.diff(p2mf, match)) - else: - # For a root node, we need to include the node if any - # files in the node match the narrowspec. - needed = any(curmf.walk(match)) - - if needed: - for head in ellipsisheads[rev]: - addroot(head, rev) - for p in ps: - required.add(p) - relevant_nodes.add(cl.node(rev)) - else: - if not ps: - ps = [nullrev] - if rev in required: - for head in ellipsisheads[rev]: - addroot(head, rev) - for p in ps: - ellipsisheads[p].add(rev) - else: - for p in ps: - ellipsisheads[p] |= ellipsisheads[rev] - - # add common changesets as roots of their reachable ellipsis heads - for c in commonrevs: - for head in ellipsisheads[c]: - addroot(head, c) - return visitnodes, relevant_nodes, ellipsisroots - -def _packellipsischangegroup(repo, common, match, relevant_nodes, - ellipsisroots, visitnodes, depth, source, version): - if version in ('01', '02'): - raise error.Abort( - 'ellipsis nodes require at least cg3 on client and server, ' - 'but negotiated version %s' % version) - # We wrap cg1packer.revchunk, using a side channel to pass - # relevant_nodes into that area. Then if linknode isn't in the - # set, we know we have an ellipsis node and we should defer - # sending that node's data. We override close() to detect - # pending ellipsis nodes and flush them. - packer = changegroup.getbundler(version, repo) - # Let the packer have access to the narrow matcher so it can - # omit filelogs and dirlogs as needed - packer._narrow_matcher = lambda : match - # Give the packer the list of nodes which should not be - # ellipsis nodes. We store this rather than the set of nodes - # that should be an ellipsis because for very large histories - # we expect this to be significantly smaller. - packer.full_nodes = relevant_nodes - # Maps ellipsis revs to their roots at the changelog level. - packer.precomputed_ellipsis = ellipsisroots - # Maps CL revs to per-revlog revisions. Cleared in close() at - # the end of each group. - packer.clrev_to_localrev = {} - packer.next_clrev_to_localrev = {} - # Maps changelog nodes to changelog revs. Filled in once - # during changelog stage and then left unmodified. - packer.clnode_to_rev = {} - packer.changelog_done = False - # If true, informs the packer that it is serving shallow content and might - # need to pack file contents not introduced by the changes being packed. - packer.is_shallow = depth is not None - - return packer.generate(common, visitnodes, False, source) - # Serve a changegroup for a client with a narrow clone. def getbundlechangegrouppart_narrow(bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, common=None, **kwargs): + assert repo.ui.configbool('experimental', 'narrowservebrokenellipses') + cgversions = b2caps.get('changegroup') if cgversions: # 3.1 and 3.2 ship with an empty value cgversions = [v for v in cgversions @@ -231,32 +70,6 @@ include = sorted(filter(bool, kwargs.get(r'includepats', []))) exclude = sorted(filter(bool, kwargs.get(r'excludepats', []))) newmatch = narrowspec.match(repo.root, include=include, exclude=exclude) - if not repo.ui.configbool("experimental", "narrowservebrokenellipses"): - outgoing = exchange._computeoutgoing(repo, heads, common) - if not outgoing.missing: - return - def wrappedgetbundler(orig, *args, **kwargs): - bundler = orig(*args, **kwargs) - bundler._narrow_matcher = lambda : newmatch - return bundler - with extensions.wrappedfunction(changegroup, 'getbundler', - wrappedgetbundler): - cg = changegroup.makestream(repo, outgoing, version, source) - part = bundler.newpart('changegroup', data=cg) - part.addparam('version', version) - if 'treemanifest' in repo.requirements: - part.addparam('treemanifest', '1') - - if include or exclude: - narrowspecpart = bundler.newpart(_SPECPART) - if include: - narrowspecpart.addparam( - _SPECPART_INCLUDE, '\n'.join(include), mandatory=True) - if exclude: - narrowspecpart.addparam( - _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True) - - return depth = kwargs.get(r'depth', None) if depth is not None: @@ -300,72 +113,46 @@ yield repo.changelog.node(r) yield _DONESIGNAL bundler.newpart(_CHANGESPECPART, data=genkills()) - newvisit, newfull, newellipsis = _computeellipsis( + newvisit, newfull, newellipsis = exchange._computeellipsis( repo, set(), common, known, newmatch) if newvisit: - cg = _packellipsischangegroup( - repo, common, newmatch, newfull, newellipsis, - newvisit, depth, source, version) - part = bundler.newpart('changegroup', data=cg) + packer = changegroup.getbundler(version, repo, + filematcher=newmatch, + ellipses=True, + shallow=depth is not None, + ellipsisroots=newellipsis, + fullnodes=newfull) + cgdata = packer.generate(common, newvisit, False, source) + + part = bundler.newpart('changegroup', data=cgdata) part.addparam('version', version) if 'treemanifest' in repo.requirements: part.addparam('treemanifest', '1') - visitnodes, relevant_nodes, ellipsisroots = _computeellipsis( + visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis( repo, common, heads, set(), newmatch, depth=depth) repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes)) if visitnodes: - cg = _packellipsischangegroup( - repo, common, newmatch, relevant_nodes, ellipsisroots, - visitnodes, depth, source, version) - part = bundler.newpart('changegroup', data=cg) + packer = changegroup.getbundler(version, repo, + filematcher=newmatch, + ellipses=True, + shallow=depth is not None, + ellipsisroots=ellipsisroots, + fullnodes=relevant_nodes) + cgdata = packer.generate(common, visitnodes, False, source) + + part = bundler.newpart('changegroup', data=cgdata) part.addparam('version', version) if 'treemanifest' in repo.requirements: part.addparam('treemanifest', '1') -def applyacl_narrow(repo, kwargs): - ui = repo.ui - username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username()) - user_includes = ui.configlist( - _NARROWACL_SECTION, username + '.includes', - ui.configlist(_NARROWACL_SECTION, 'default.includes')) - user_excludes = ui.configlist( - _NARROWACL_SECTION, username + '.excludes', - ui.configlist(_NARROWACL_SECTION, 'default.excludes')) - if not user_includes: - raise error.Abort(_("{} configuration for user {} is empty") - .format(_NARROWACL_SECTION, username)) - - user_includes = [ - 'path:.' if p == '*' else 'path:' + p for p in user_includes] - user_excludes = [ - 'path:.' if p == '*' else 'path:' + p for p in user_excludes] - - req_includes = set(kwargs.get(r'includepats', [])) - req_excludes = set(kwargs.get(r'excludepats', [])) - - req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns( - req_includes, req_excludes, user_includes, user_excludes) - - if invalid_includes: - raise error.Abort( - _("The following includes are not accessible for {}: {}") - .format(username, invalid_includes)) - - new_args = {} - new_args.update(kwargs) - new_args['includepats'] = req_includes - if req_excludes: - new_args['excludepats'] = req_excludes - return new_args - @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) def _handlechangespec_2(op, inpart): includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines()) excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines()) - if not changegroup.NARROW_REQUIREMENT in op.repo.requirements: - op.repo.requirements.add(changegroup.NARROW_REQUIREMENT) + if not repository.NARROW_REQUIREMENT in op.repo.requirements: + op.repo.requirements.add(repository.NARROW_REQUIREMENT) op.repo._writerequirements() op.repo.setnarrowpats(includepats, excludepats) @@ -479,27 +266,15 @@ def wrappedcgfn(*args, **kwargs): repo = args[1] if repo.ui.has_section(_NARROWACL_SECTION): - getbundlechangegrouppart_narrow( - *args, **applyacl_narrow(repo, kwargs)) - elif kwargs.get(r'narrow', False): + kwargs = exchange.applynarrowacl(repo, kwargs) + + if (kwargs.get(r'narrow', False) and + repo.ui.configbool('experimental', 'narrowservebrokenellipses')): getbundlechangegrouppart_narrow(*args, **kwargs) else: origcgfn(*args, **kwargs) exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn - # disable rev branch cache exchange when serving a narrow bundle - # (currently incompatible with that part) - origrbcfn = exchange.getbundle2partsmapping['cache:rev-branch-cache'] - def wrappedcgfn(*args, **kwargs): - repo = args[1] - if repo.ui.has_section(_NARROWACL_SECTION): - return - elif kwargs.get(r'narrow', False): - return - else: - origrbcfn(*args, **kwargs) - exchange.getbundle2partsmapping['cache:rev-branch-cache'] = wrappedcgfn - # Extend changegroup receiver so client can fixup after widen requests. origcghandler = bundle2.parthandlermapping['changegroup'] def wrappedcghandler(op, inpart):
--- a/hgext/narrow/narrowchangegroup.py Sun Aug 19 13:27:02 2018 +0900 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,373 +0,0 @@ -# narrowchangegroup.py - narrow clone changegroup creation and consumption -# -# Copyright 2017 Google, Inc. -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -from mercurial.i18n import _ -from mercurial import ( - changegroup, - error, - extensions, - manifest, - match as matchmod, - mdiff, - node, - revlog, - util, -) - -def setup(): - - def _cgmatcher(cgpacker): - localmatcher = cgpacker._repo.narrowmatch() - remotematcher = getattr(cgpacker, '_narrow_matcher', lambda: None)() - if remotematcher: - return matchmod.intersectmatchers(localmatcher, remotematcher) - else: - return localmatcher - - def prune(orig, self, revlog, missing, commonrevs): - if isinstance(revlog, manifest.manifestrevlog): - matcher = _cgmatcher(self) - if (matcher and - not matcher.visitdir(revlog._dir[:-1] or '.')): - return [] - return orig(self, revlog, missing, commonrevs) - - extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) - - def generatefiles(orig, self, changedfiles, linknodes, commonrevs, - source): - matcher = _cgmatcher(self) - if matcher: - changedfiles = list(filter(matcher, changedfiles)) - if getattr(self, 'is_shallow', False): - # See comment in generate() for why this sadness is a thing. - mfdicts = self._mfdicts - del self._mfdicts - # In a shallow clone, the linknodes callback needs to also include - # those file nodes that are in the manifests we sent but weren't - # introduced by those manifests. - commonctxs = [self._repo[c] for c in commonrevs] - oldlinknodes = linknodes - clrev = self._repo.changelog.rev - def linknodes(flog, fname): - for c in commonctxs: - try: - fnode = c.filenode(fname) - self.clrev_to_localrev[c.rev()] = flog.rev(fnode) - except error.ManifestLookupError: - pass - links = oldlinknodes(flog, fname) - if len(links) != len(mfdicts): - for mf, lr in mfdicts: - fnode = mf.get(fname, None) - if fnode in links: - links[fnode] = min(links[fnode], lr, key=clrev) - elif fnode: - links[fnode] = lr - return links - return orig(self, changedfiles, linknodes, commonrevs, source) - extensions.wrapfunction( - changegroup.cg1packer, 'generatefiles', generatefiles) - - def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): - n = revlog_.node(rev) - p1n, p2n = revlog_.node(p1), revlog_.node(p2) - flags = revlog_.flags(rev) - flags |= revlog.REVIDX_ELLIPSIS - meta = packer.builddeltaheader( - n, p1n, p2n, node.nullid, linknode, flags) - # TODO: try and actually send deltas for ellipsis data blocks - diffheader = mdiff.trivialdiffheader(len(data)) - l = len(meta) + len(diffheader) + len(data) - return ''.join((changegroup.chunkheader(l), - meta, - diffheader, - data)) - - def close(orig, self): - getattr(self, 'clrev_to_localrev', {}).clear() - if getattr(self, 'next_clrev_to_localrev', {}): - self.clrev_to_localrev = self.next_clrev_to_localrev - del self.next_clrev_to_localrev - self.changelog_done = True - return orig(self) - extensions.wrapfunction(changegroup.cg1packer, 'close', close) - - # In a perfect world, we'd generate better ellipsis-ified graphs - # for non-changelog revlogs. In practice, we haven't started doing - # that yet, so the resulting DAGs for the manifestlog and filelogs - # are actually full of bogus parentage on all the ellipsis - # nodes. This has the side effect that, while the contents are - # correct, the individual DAGs might be completely out of whack in - # a case like 882681bc3166 and its ancestors (back about 10 - # revisions or so) in the main hg repo. - # - # The one invariant we *know* holds is that the new (potentially - # bogus) DAG shape will be valid if we order the nodes in the - # order that they're introduced in dramatis personae by the - # changelog, so what we do is we sort the non-changelog histories - # by the order in which they are used by the changelog. - def _sortgroup(orig, self, revlog, nodelist, lookup): - if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: - return orig(self, revlog, nodelist, lookup) - key = lambda n: self.clnode_to_rev[lookup(n)] - return [revlog.rev(n) for n in sorted(nodelist, key=key)] - - extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) - - def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): - '''yield a sequence of changegroup chunks (strings)''' - # Note: other than delegating to orig, the only deviation in - # logic from normal hg's generate is marked with BEGIN/END - # NARROW HACK. - if not util.safehasattr(self, 'full_nodes'): - # not sending a narrow bundle - for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): - yield x - return - - repo = self._repo - cl = repo.changelog - mfl = repo.manifestlog - mfrevlog = mfl._revlog - - clrevorder = {} - mfs = {} # needed manifests - fnodes = {} # needed file nodes - changedfiles = set() - - # Callback for the changelog, used to collect changed files and manifest - # nodes. - # Returns the linkrev node (identity in the changelog case). - def lookupcl(x): - c = cl.read(x) - clrevorder[x] = len(clrevorder) - # BEGIN NARROW HACK - # - # Only update mfs if x is going to be sent. Otherwise we - # end up with bogus linkrevs specified for manifests and - # we skip some manifest nodes that we should otherwise - # have sent. - if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: - n = c[0] - # record the first changeset introducing this manifest version - mfs.setdefault(n, x) - # Set this narrow-specific dict so we have the lowest manifest - # revnum to look up for this cl revnum. (Part of mapping - # changelog ellipsis parents to manifest ellipsis parents) - self.next_clrev_to_localrev.setdefault(cl.rev(x), - mfrevlog.rev(n)) - # We can't trust the changed files list in the changeset if the - # client requested a shallow clone. - if self.is_shallow: - changedfiles.update(mfl[c[0]].read().keys()) - else: - changedfiles.update(c[3]) - # END NARROW HACK - # Record a complete list of potentially-changed files in - # this manifest. - return x - - self._verbosenote(_('uncompressed size of bundle content:\n')) - size = 0 - for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): - size += len(chunk) - yield chunk - self._verbosenote(_('%8.i (changelog)\n') % size) - - # We need to make sure that the linkrev in the changegroup refers to - # the first changeset that introduced the manifest or file revision. - # The fastpath is usually safer than the slowpath, because the filelogs - # are walked in revlog order. - # - # When taking the slowpath with reorder=None and the manifest revlog - # uses generaldelta, the manifest may be walked in the "wrong" order. - # Without 'clrevorder', we would get an incorrect linkrev (see fix in - # cc0ff93d0c0c). - # - # When taking the fastpath, we are only vulnerable to reordering - # of the changelog itself. The changelog never uses generaldelta, so - # it is only reordered when reorder=True. To handle this case, we - # simply take the slowpath, which already has the 'clrevorder' logic. - # This was also fixed in cc0ff93d0c0c. - fastpathlinkrev = fastpathlinkrev and not self._reorder - # Treemanifests don't work correctly with fastpathlinkrev - # either, because we don't discover which directory nodes to - # send along with files. This could probably be fixed. - fastpathlinkrev = fastpathlinkrev and ( - 'treemanifest' not in repo.requirements) - # Shallow clones also don't work correctly with fastpathlinkrev - # because file nodes may need to be sent for a manifest even if they - # weren't introduced by that manifest. - fastpathlinkrev = fastpathlinkrev and not self.is_shallow - - for chunk in self.generatemanifests(commonrevs, clrevorder, - fastpathlinkrev, mfs, fnodes, source): - yield chunk - # BEGIN NARROW HACK - mfdicts = None - if self.is_shallow: - mfdicts = [(self._repo.manifestlog[n].read(), lr) - for (n, lr) in mfs.iteritems()] - # END NARROW HACK - mfs.clear() - clrevs = set(cl.rev(x) for x in clnodes) - - if not fastpathlinkrev: - def linknodes(unused, fname): - return fnodes.get(fname, {}) - else: - cln = cl.node - def linknodes(filerevlog, fname): - llr = filerevlog.linkrev - fln = filerevlog.node - revs = ((r, llr(r)) for r in filerevlog) - return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) - - # BEGIN NARROW HACK - # - # We need to pass the mfdicts variable down into - # generatefiles(), but more than one command might have - # wrapped generatefiles so we can't modify the function - # signature. Instead, we pass the data to ourselves using an - # instance attribute. I'm sorry. - self._mfdicts = mfdicts - # END NARROW HACK - for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, - source): - yield chunk - - yield self.close() - - if clnodes: - repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) - extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) - - def revchunk(orig, self, revlog, rev, prev, linknode): - if not util.safehasattr(self, 'full_nodes'): - # not sending a narrow changegroup - for x in orig(self, revlog, rev, prev, linknode): - yield x - return - # build up some mapping information that's useful later. See - # the local() nested function below. - if not self.changelog_done: - self.clnode_to_rev[linknode] = rev - linkrev = rev - self.clrev_to_localrev[linkrev] = rev - else: - linkrev = self.clnode_to_rev[linknode] - self.clrev_to_localrev[linkrev] = rev - # This is a node to send in full, because the changeset it - # corresponds to was a full changeset. - if linknode in self.full_nodes: - for x in orig(self, revlog, rev, prev, linknode): - yield x - return - # At this point, a node can either be one we should skip or an - # ellipsis. If it's not an ellipsis, bail immediately. - if linkrev not in self.precomputed_ellipsis: - return - linkparents = self.precomputed_ellipsis[linkrev] - def local(clrev): - """Turn a changelog revnum into a local revnum. - - The ellipsis dag is stored as revnums on the changelog, - but when we're producing ellipsis entries for - non-changelog revlogs, we need to turn those numbers into - something local. This does that for us, and during the - changelog sending phase will also expand the stored - mappings as needed. - """ - if clrev == node.nullrev: - return node.nullrev - if not self.changelog_done: - # If we're doing the changelog, it's possible that we - # have a parent that is already on the client, and we - # need to store some extra mapping information so that - # our contained ellipsis nodes will be able to resolve - # their parents. - if clrev not in self.clrev_to_localrev: - clnode = revlog.node(clrev) - self.clnode_to_rev[clnode] = clrev - return clrev - # Walk the ellipsis-ized changelog breadth-first looking for a - # change that has been linked from the current revlog. - # - # For a flat manifest revlog only a single step should be necessary - # as all relevant changelog entries are relevant to the flat - # manifest. - # - # For a filelog or tree manifest dirlog however not every changelog - # entry will have been relevant, so we need to skip some changelog - # nodes even after ellipsis-izing. - walk = [clrev] - while walk: - p = walk[0] - walk = walk[1:] - if p in self.clrev_to_localrev: - return self.clrev_to_localrev[p] - elif p in self.full_nodes: - walk.extend([pp for pp in self._repo.changelog.parentrevs(p) - if pp != node.nullrev]) - elif p in self.precomputed_ellipsis: - walk.extend([pp for pp in self.precomputed_ellipsis[p] - if pp != node.nullrev]) - else: - # In this case, we've got an ellipsis with parents - # outside the current bundle (likely an - # incremental pull). We "know" that we can use the - # value of this same revlog at whatever revision - # is pointed to by linknode. "Know" is in scare - # quotes because I haven't done enough examination - # of edge cases to convince myself this is really - # a fact - it works for all the (admittedly - # thorough) cases in our testsuite, but I would be - # somewhat unsurprised to find a case in the wild - # where this breaks down a bit. That said, I don't - # know if it would hurt anything. - for i in xrange(rev, 0, -1): - if revlog.linkrev(i) == clrev: - return i - # We failed to resolve a parent for this node, so - # we crash the changegroup construction. - raise error.Abort( - 'unable to resolve parent while packing %r %r' - ' for changeset %r' % (revlog.indexfile, rev, clrev)) - return node.nullrev - - if not linkparents or ( - revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): - p1, p2 = node.nullrev, node.nullrev - elif len(linkparents) == 1: - p1, = sorted(local(p) for p in linkparents) - p2 = node.nullrev - else: - p1, p2 = sorted(local(p) for p in linkparents) - n = revlog.node(rev) - yield ellipsisdata( - self, rev, revlog, p1, p2, revlog.revision(n), linknode) - extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) - - def deltaparent(orig, self, revlog, rev, p1, p2, prev): - if util.safehasattr(self, 'full_nodes'): - # TODO: send better deltas when in narrow mode. - # - # changegroup.group() loops over revisions to send, - # including revisions we'll skip. What this means is that - # `prev` will be a potentially useless delta base for all - # ellipsis nodes, as the client likely won't have it. In - # the future we should do bookkeeping about which nodes - # have been sent to the client, and try to be - # significantly smarter about delta bases. This is - # slightly tricky because this same code has to work for - # all revlogs, and we don't have the linkrev/linknode here. - return p1 - return orig(self, revlog, rev, p1, p2, prev) - extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
--- a/hgext/narrow/narrowcommands.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/narrow/narrowcommands.py Mon Aug 20 09:48:08 2018 -0700 @@ -10,7 +10,6 @@ from mercurial.i18n import _ from mercurial import ( - changegroup, cmdutil, commands, discovery, @@ -24,6 +23,7 @@ pycompat, registrar, repair, + repository, repoview, util, ) @@ -101,7 +101,7 @@ def pullnarrow(orig, repo, *args, **kwargs): if opts_narrow: - repo.requirements.add(changegroup.NARROW_REQUIREMENT) + repo.requirements.add(repository.NARROW_REQUIREMENT) repo._writerequirements() return orig(repo, *args, **kwargs) @@ -114,7 +114,7 @@ def pullnarrowcmd(orig, ui, repo, *args, **opts): """Wraps pull command to allow modifying narrow spec.""" wrappedextraprepare = util.nullcontextmanager() - if changegroup.NARROW_REQUIREMENT in repo.requirements: + if repository.NARROW_REQUIREMENT in repo.requirements: def pullbundle2extraprepare_widen(orig, pullop, kwargs): orig(pullop, kwargs) @@ -128,7 +128,7 @@ def archivenarrowcmd(orig, ui, repo, *args, **opts): """Wraps archive command to narrow the default includes.""" - if changegroup.NARROW_REQUIREMENT in repo.requirements: + if repository.NARROW_REQUIREMENT in repo.requirements: repo_includes, repo_excludes = repo.narrowpats includes = set(opts.get(r'include', [])) excludes = set(opts.get(r'exclude', [])) @@ -142,7 +142,7 @@ def pullbundle2extraprepare(orig, pullop, kwargs): repo = pullop.repo - if changegroup.NARROW_REQUIREMENT not in repo.requirements: + if repository.NARROW_REQUIREMENT not in repo.requirements: return orig(pullop, kwargs) if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps: @@ -331,7 +331,7 @@ empty and will not match any files. """ opts = pycompat.byteskwargs(opts) - if changegroup.NARROW_REQUIREMENT not in repo.requirements: + if repository.NARROW_REQUIREMENT not in repo.requirements: ui.warn(_('The narrow command is only supported on respositories cloned' ' with --narrow.\n')) return 1
--- a/hgext/narrow/narrowdirstate.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/narrow/narrowdirstate.py Mon Aug 20 09:48:08 2018 -0700 @@ -11,8 +11,6 @@ from mercurial import ( error, match as matchmod, - narrowspec, - util as hgutil, ) def wrapdirstate(repo, dirstate): @@ -29,10 +27,6 @@ return fn(self, *args) return _wrapper - def _narrowbackupname(backupname): - assert 'dirstate' in backupname - return backupname.replace('dirstate', narrowspec.FILENAME) - class narrowdirstate(dirstate.__class__): def walk(self, match, subrepos, unknown, ignored, full=True, narrowonly=True): @@ -78,22 +72,5 @@ allfiles = [f for f in allfiles if repo.narrowmatch()(f)] super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles) - def restorebackup(self, tr, backupname): - self._opener.rename(_narrowbackupname(backupname), - narrowspec.FILENAME, checkambig=True) - super(narrowdirstate, self).restorebackup(tr, backupname) - - def savebackup(self, tr, backupname): - super(narrowdirstate, self).savebackup(tr, backupname) - - narrowbackupname = _narrowbackupname(backupname) - self._opener.tryunlink(narrowbackupname) - hgutil.copyfile(self._opener.join(narrowspec.FILENAME), - self._opener.join(narrowbackupname), hardlink=True) - - def clearbackup(self, tr, backupname): - super(narrowdirstate, self).clearbackup(tr, backupname) - self._opener.unlink(_narrowbackupname(backupname)) - dirstate.__class__ = narrowdirstate return dirstate
--- a/hgext/narrow/narrowrepo.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/narrow/narrowrepo.py Mon Aug 20 09:48:08 2018 -0700 @@ -7,35 +7,11 @@ from __future__ import absolute_import -from mercurial import ( - changegroup, - hg, - narrowspec, - scmutil, -) - from . import ( narrowdirstate, narrowrevlog, ) -def wrappostshare(orig, sourcerepo, destrepo, **kwargs): - orig(sourcerepo, destrepo, **kwargs) - if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements: - with destrepo.wlock(): - with destrepo.vfs('shared', 'a') as fp: - fp.write(narrowspec.FILENAME + '\n') - -def unsharenarrowspec(orig, ui, repo, repopath): - if (changegroup.NARROW_REQUIREMENT in repo.requirements - and repo.path == repopath and repo.shared()): - srcrepo = hg.sharedreposource(repo) - with srcrepo.vfs(narrowspec.FILENAME) as f: - spec = f.read() - with repo.vfs(narrowspec.FILENAME, 'w') as f: - f.write(spec) - return orig(ui, repo, repopath) - def wraprepo(repo): """Enables narrow clone functionality on a single local repository.""" @@ -46,23 +22,6 @@ narrowrevlog.makenarrowfilelog(fl, self.narrowmatch()) return fl - # I'm not sure this is the right place to do this filter. - # context._manifestmatches() would probably be better, or perhaps - # move it to a later place, in case some of the callers do want to know - # which directories changed. This seems to work for now, though. - def status(self, *args, **kwargs): - s = super(narrowrepository, self).status(*args, **kwargs) - narrowmatch = self.narrowmatch() - modified = list(filter(narrowmatch, s.modified)) - added = list(filter(narrowmatch, s.added)) - removed = list(filter(narrowmatch, s.removed)) - deleted = list(filter(narrowmatch, s.deleted)) - unknown = list(filter(narrowmatch, s.unknown)) - ignored = list(filter(narrowmatch, s.ignored)) - clean = list(filter(narrowmatch, s.clean)) - return scmutil.status(modified, added, removed, deleted, unknown, - ignored, clean) - def _makedirstate(self): dirstate = super(narrowrepository, self)._makedirstate() return narrowdirstate.wrapdirstate(self, dirstate)
--- a/hgext/narrow/narrowtemplates.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/narrow/narrowtemplates.py Mon Aug 20 09:48:08 2018 -0700 @@ -42,7 +42,7 @@ return 'outsidenarrow' return '' -@revsetpredicate('ellipsis') +@revsetpredicate('ellipsis()') def ellipsisrevset(repo, subset, x): """Changesets that are ellipsis nodes.""" return subset.filter(lambda r: _isellipsis(repo, r))
--- a/hgext/patchbomb.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/patchbomb.py Mon Aug 20 09:48:08 2018 -0700 @@ -73,7 +73,7 @@ ''' from __future__ import absolute_import -import email as emailmod +import email.encoders as emailencoders import email.generator as emailgen import email.mime.base as emimebase import email.mime.multipart as emimemultipart @@ -139,6 +139,11 @@ default=None, ) +if pycompat.ispy3: + _bytesgenerator = emailgen.BytesGenerator +else: + _bytesgenerator = emailgen.Generator + # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should # be specifying the version(s) of Mercurial they are tested with, or @@ -273,10 +278,11 @@ seqno=idx, total=total) else: patchname = cmdutil.makefilename(repo[node], '%b.patch') - disposition = 'inline' + disposition = r'inline' if opts.get('attach'): - disposition = 'attachment' - p['Content-Disposition'] = disposition + '; filename=' + patchname + disposition = r'attachment' + p[r'Content-Disposition'] = ( + disposition + r'; filename=' + encoding.strfromlocal(patchname)) msg.attach(p) else: msg = mail.mimetextpatch(body, display=opts.get('test')) @@ -370,12 +376,12 @@ msg = emimemultipart.MIMEMultipart() if body: msg.attach(mail.mimeencode(ui, body, _charsets, opts.get(r'test'))) - datapart = emimebase.MIMEBase('application', 'x-mercurial-bundle') + datapart = emimebase.MIMEBase(r'application', r'x-mercurial-bundle') datapart.set_payload(bundle) bundlename = '%s.hg' % opts.get(r'bundlename', 'bundle') - datapart.add_header('Content-Disposition', 'attachment', - filename=bundlename) - emailmod.Encoders.encode_base64(datapart) + datapart.add_header(r'Content-Disposition', r'attachment', + filename=encoding.strfromlocal(bundlename)) + emailencoders.encode_base64(datapart) msg.attach(datapart) msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get(r'test')) return [(msg, subj, None)] @@ -463,6 +469,11 @@ ui.status(_("no changes found\n")) return revs +def _msgid(node, timestamp): + hostname = encoding.strtolocal(socket.getfqdn()) + hostname = encoding.environ.get('HGHOSTNAME', hostname) + return '<%s.%d@%s>' % (node, timestamp, hostname) + emailopts = [ ('', 'body', None, _('send patches as inline message text (default)')), ('a', 'attach', None, _('send patches as attachments')), @@ -671,8 +682,7 @@ start_time = dateutil.makedate() def genmsgid(id): - return '<%s.%d@%s>' % (id[:20], int(start_time[0]), - encoding.strtolocal(socket.getfqdn())) + return _msgid(id[:20], int(start_time[0])) # deprecated config: patchbomb.from sender = (opts.get('from') or ui.config('email', 'from') or @@ -780,10 +790,27 @@ m['Bcc'] = ', '.join(bcc) if replyto: m['Reply-To'] = ', '.join(replyto) + # Fix up all headers to be native strings. + # TODO(durin42): this should probably be cleaned up above in the future. + if pycompat.ispy3: + for hdr, val in list(m.items()): + change = False + if isinstance(hdr, bytes): + del m[hdr] + hdr = pycompat.strurl(hdr) + change = True + if isinstance(val, bytes): + val = pycompat.strurl(val) + if not change: + # prevent duplicate headers + del m[hdr] + change = True + if change: + m[hdr] = val if opts.get('test'): ui.status(_('displaying '), subj, ' ...\n') ui.pager('email') - generator = emailgen.Generator(ui, mangle_from_=False) + generator = _bytesgenerator(ui, mangle_from_=False) try: generator.flatten(m, 0) ui.write('\n') @@ -799,8 +826,10 @@ # Exim does not remove the Bcc field del m['Bcc'] fp = stringio() - generator = emailgen.Generator(fp, mangle_from_=False) + generator = _bytesgenerator(fp, mangle_from_=False) generator.flatten(m, 0) - sendmail(sender_addr, to + bcc + cc, fp.getvalue()) + alldests = to + bcc + cc + alldests = [encoding.strfromlocal(d) for d in alldests] + sendmail(sender_addr, alldests, fp.getvalue()) progress.complete()
--- a/hgext/rebase.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/rebase.py Mon Aug 20 09:48:08 2018 -0700 @@ -177,6 +177,7 @@ if e: self.extrafns = [e] + self.backupf = ui.configbool('ui', 'history-editing-backup') self.keepf = opts.get('keep', False) self.keepbranchesf = opts.get('keepbranches', False) self.obsoletenotrebased = {} @@ -343,7 +344,9 @@ msg = _('cannot continue inconsistent rebase') hint = _('use "hg rebase --abort" to clear broken state') raise error.Abort(msg, hint=hint) + if isabort: + backup = backup and self.backupf return abort(self.repo, self.originalwd, self.destmap, self.state, activebookmark=self.activebookmark, backup=backup, suppwarns=suppwarns) @@ -632,7 +635,7 @@ if self.collapsef and not self.keepf: collapsedas = newnode clearrebased(ui, repo, self.destmap, self.state, self.skipped, - collapsedas, self.keepf, fm=fm) + collapsedas, self.keepf, fm=fm, backup=self.backupf) clearstatus(repo) clearcollapsemsg(repo) @@ -670,6 +673,7 @@ ('D', 'detach', False, _('(DEPRECATED)')), ('i', 'interactive', False, _('(DEPRECATED)')), ('t', 'tool', '', _('specify merge tool')), + ('', 'stop', False, _('stop interrupted rebase')), ('c', 'continue', False, _('continue an interrupted rebase')), ('a', 'abort', False, _('abort an interrupted rebase')), ('', 'auto-orphans', '', _('automatically rebase orphan revisions ' @@ -729,7 +733,8 @@ deleted, there is no hook presently available for this. If a rebase is interrupted to manually resolve a conflict, it can be - continued with --continue/-c or aborted with --abort/-a. + continued with --continue/-c, aborted with --abort/-a, or stopped with + --stop. .. container:: verbose @@ -800,22 +805,20 @@ opts = pycompat.byteskwargs(opts) inmemory = ui.configbool('rebase', 'experimental.inmemory') dryrun = opts.get('dry_run') - if dryrun: - if opts.get('abort'): - raise error.Abort(_('cannot specify both --dry-run and --abort')) - if opts.get('continue'): - raise error.Abort(_('cannot specify both --dry-run and --continue')) - if opts.get('confirm'): - dryrun = True - if opts.get('dry_run'): - raise error.Abort(_('cannot specify both --confirm and --dry-run')) - if opts.get('abort'): - raise error.Abort(_('cannot specify both --confirm and --abort')) - if opts.get('continue'): - raise error.Abort(_('cannot specify both --confirm and --continue')) + confirm = opts.get('confirm') + selactions = [k for k in ['abort', 'stop', 'continue'] if opts.get(k)] + if len(selactions) > 1: + raise error.Abort(_('cannot use --%s with --%s') + % tuple(selactions[:2])) + action = selactions[0] if selactions else None + if dryrun and action: + raise error.Abort(_('cannot specify both --dry-run and --%s') % action) + if confirm and action: + raise error.Abort(_('cannot specify both --confirm and --%s') % action) + if dryrun and confirm: + raise error.Abort(_('cannot specify both --confirm and --dry-run')) - if (opts.get('continue') or opts.get('abort') or - repo.currenttransaction() is not None): + if action or repo.currenttransaction() is not None: # in-memory rebase is not compatible with resuming rebases. # (Or if it is run within a transaction, since the restart logic can # fail the entire transaction.) @@ -830,24 +833,43 @@ opts['rev'] = [revsetlang.formatspec('%ld and orphan()', userrevs)] opts['dest'] = '_destautoorphanrebase(SRC)' - if dryrun: - return _dryrunrebase(ui, repo, opts) + if dryrun or confirm: + return _dryrunrebase(ui, repo, action, opts) + elif action == 'stop': + rbsrt = rebaseruntime(repo, ui) + with repo.wlock(), repo.lock(): + rbsrt.restorestatus() + if rbsrt.collapsef: + raise error.Abort(_("cannot stop in --collapse session")) + allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt) + if not (rbsrt.keepf or allowunstable): + raise error.Abort(_("cannot remove original changesets with" + " unrebased descendants"), + hint=_('either enable obsmarkers to allow unstable ' + 'revisions or use --keep to keep original ' + 'changesets')) + if needupdate(repo, rbsrt.state): + # update to the current working revision + # to clear interrupted merge + hg.updaterepo(repo, rbsrt.originalwd, overwrite=True) + rbsrt._finishrebase() + return 0 elif inmemory: try: # in-memory merge doesn't support conflicts, so if we hit any, abort # and re-run as an on-disk merge. overrides = {('rebase', 'singletransaction'): True} with ui.configoverride(overrides, 'rebase'): - return _dorebase(ui, repo, opts, inmemory=inmemory) + return _dorebase(ui, repo, action, opts, inmemory=inmemory) except error.InMemoryMergeConflictsError: ui.warn(_('hit merge conflicts; re-running rebase without in-memory' ' merge\n')) - _dorebase(ui, repo, {'abort': True}) - return _dorebase(ui, repo, opts, inmemory=False) + _dorebase(ui, repo, action='abort', opts={}) + return _dorebase(ui, repo, action, opts, inmemory=False) else: - return _dorebase(ui, repo, opts) + return _dorebase(ui, repo, action, opts) -def _dryrunrebase(ui, repo, opts): +def _dryrunrebase(ui, repo, action, opts): rbsrt = rebaseruntime(repo, ui, inmemory=True, opts=opts) confirm = opts.get('confirm') if confirm: @@ -860,7 +882,7 @@ try: overrides = {('rebase', 'singletransaction'): True} with ui.configoverride(overrides, 'rebase'): - _origrebase(ui, repo, opts, rbsrt, inmemory=True, + _origrebase(ui, repo, action, opts, rbsrt, inmemory=True, leaveunfinished=True) except error.InMemoryMergeConflictsError: ui.status(_('hit a merge conflict\n')) @@ -886,11 +908,13 @@ rbsrt._prepareabortorcontinue(isabort=True, backup=False, suppwarns=True) -def _dorebase(ui, repo, opts, inmemory=False): +def _dorebase(ui, repo, action, opts, inmemory=False): rbsrt = rebaseruntime(repo, ui, inmemory, opts) - return _origrebase(ui, repo, opts, rbsrt, inmemory=inmemory) + return _origrebase(ui, repo, action, opts, rbsrt, inmemory=inmemory) -def _origrebase(ui, repo, opts, rbsrt, inmemory=False, leaveunfinished=False): +def _origrebase(ui, repo, action, opts, rbsrt, inmemory=False, + leaveunfinished=False): + assert action != 'stop' with repo.wlock(), repo.lock(): # Validate input and define rebasing points destf = opts.get('dest', None) @@ -900,8 +924,6 @@ # search default destination in this space # used in the 'hg pull --rebase' case, see issue 5214. destspace = opts.get('_destspace') - contf = opts.get('continue') - abortf = opts.get('abort') if opts.get('interactive'): try: if extensions.find('histedit'): @@ -917,22 +939,20 @@ raise error.Abort( _('message can only be specified with collapse')) - if contf or abortf: - if contf and abortf: - raise error.Abort(_('cannot use both abort and continue')) + if action: if rbsrt.collapsef: raise error.Abort( _('cannot use collapse with continue or abort')) if srcf or basef or destf: raise error.Abort( _('abort and continue do not allow specifying revisions')) - if abortf and opts.get('tool', False): + if action == 'abort' and opts.get('tool', False): ui.warn(_('tool option will be ignored\n')) - if contf: + if action == 'continue': ms = mergemod.mergestate.read(repo) mergeutil.checkunresolved(ms) - retcode = rbsrt._prepareabortorcontinue(abortf) + retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort')) if retcode is not None: return retcode else: @@ -1728,7 +1748,7 @@ return originalwd, destmap, state def clearrebased(ui, repo, destmap, state, skipped, collapsedas=None, - keepf=False, fm=None): + keepf=False, fm=None, backup=True): """dispose of rebased revision at the end of the rebase If `collapsedas` is not None, the rebase was a collapse whose result if the @@ -1736,6 +1756,9 @@ If `keepf` is not True, the rebase has --keep set and no nodes should be removed (but bookmarks still need to be moved). + + If `backup` is False, no backup will be stored when stripping rebased + revisions. """ tonode = repo.changelog.node replacements = {} @@ -1751,7 +1774,7 @@ else: succs = (newnode,) replacements[oldnode] = succs - scmutil.cleanupnodes(repo, replacements, 'rebase', moves) + scmutil.cleanupnodes(repo, replacements, 'rebase', moves, backup=backup) if fm: hf = fm.hexfunc fl = fm.formatlist
--- a/hgext/shelve.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/shelve.py Mon Aug 20 09:48:08 2018 -0700 @@ -41,6 +41,7 @@ lock as lockmod, mdiff, merge, + narrowspec, node as nodemod, patch, phases, @@ -314,10 +315,13 @@ '''Abort current transaction for shelve/unshelve, but keep dirstate ''' tr = repo.currenttransaction() - backupname = 'dirstate.shelve' - repo.dirstate.savebackup(tr, backupname) + dirstatebackupname = 'dirstate.shelve' + narrowspecbackupname = 'narrowspec.shelve' + repo.dirstate.savebackup(tr, dirstatebackupname) + narrowspec.savebackup(repo, narrowspecbackupname) tr.abort() - repo.dirstate.restorebackup(None, backupname) + narrowspec.restorebackup(repo, narrowspecbackupname) + repo.dirstate.restorebackup(None, dirstatebackupname) def createcmd(ui, repo, pats, opts): """subcommand that creates a new shelve""" @@ -783,7 +787,7 @@ tr.close() nodestoremove = [repo.changelog.node(rev) - for rev in xrange(oldtiprev, len(repo))] + for rev in pycompat.xrange(oldtiprev, len(repo))] shelvedstate.save(repo, basename, pctx, tmpwctx, nodestoremove, branchtorestore, opts.get('keep'), activebookmark) raise error.InterventionRequired(
--- a/hgext/uncommit.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/uncommit.py Mon Aug 20 09:48:08 2018 -0700 @@ -182,7 +182,7 @@ with repo.dirstate.parentchange(): repo.dirstate.setparents(newid, node.nullid) - s = repo.status(old.p1(), old, match=match) + s = old.p1().status(old, match=match) _fixdirstate(repo, old, repo[newid], s) def predecessormarkers(ctx):
--- a/hgext/win32text.py Sun Aug 19 13:27:02 2018 +0900 +++ b/hgext/win32text.py Mon Aug 20 09:48:08 2018 -0700 @@ -49,6 +49,7 @@ short, ) from mercurial import ( + pycompat, registrar, ) from mercurial.utils import ( @@ -141,7 +142,8 @@ # changegroup that contains an unacceptable commit followed later # by a commit that fixes the problem. tip = repo['tip'] - for rev in xrange(repo.changelog.tiprev(), repo[node].rev() - 1, -1): + for rev in pycompat.xrange(repo.changelog.tiprev(), + repo[node].rev() - 1, -1): c = repo[rev] for f in c.files(): if f in seen or f not in tip or f not in c:
--- a/i18n/hggettext Sun Aug 19 13:27:02 2018 +0900 +++ b/i18n/hggettext Mon Aug 20 09:48:08 2018 -0700 @@ -63,7 +63,7 @@ doctestre = re.compile(r'^ +>>> ', re.MULTILINE) -def offset(src, doc, name, default): +def offset(src, doc, name, lineno, default): """Compute offset or issue a warning on stdout.""" # remove doctest part, in order to avoid backslash mismatching m = doctestre.search(doc) @@ -76,8 +76,9 @@ # This can happen if the docstring contains unnecessary escape # sequences such as \" in a triple-quoted string. The problem # is that \" is turned into " and so doc wont appear in src. - sys.stderr.write("warning: unknown offset in %s, assuming %d lines\n" - % (name, default)) + sys.stderr.write("%s:%d:warning:" + " unknown docstr offset, assuming %d lines\n" + % (name, lineno, default)) return default else: return src.count('\n', 0, end) @@ -106,7 +107,7 @@ if not path.startswith('mercurial/') and mod.__doc__: with open(path) as fobj: src = fobj.read() - lineno = 1 + offset(src, mod.__doc__, path, 7) + lineno = 1 + offset(src, mod.__doc__, path, 1, 7) print(poentry(path, lineno, mod.__doc__)) functions = list(getattr(mod, 'i18nfunctions', [])) @@ -129,7 +130,6 @@ actualpath = '%s%s.py' % (funcmod.__name__.replace('.', '/'), extra) src = inspect.getsource(func) - name = "%s.%s" % (actualpath, func.__name__) lineno = inspect.getsourcelines(func)[1] doc = docobj.__doc__ origdoc = getattr(docobj, '_origdoc', '') @@ -137,9 +137,9 @@ doc = doc.rstrip() origdoc = origdoc.rstrip() if origdoc: - lineno += offset(src, origdoc, name, 1) + lineno += offset(src, origdoc, actualpath, lineno, 1) else: - lineno += offset(src, doc, name, 1) + lineno += offset(src, doc, actualpath, lineno, 1) print(poentry(actualpath, lineno, doc))
--- a/mercurial/__init__.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/__init__.py Mon Aug 20 09:48:08 2018 -0700 @@ -182,7 +182,7 @@ continue r, c = t.start l = (b'; from mercurial.pycompat import ' - b'delattr, getattr, hasattr, setattr, xrange, ' + b'delattr, getattr, hasattr, setattr, ' b'open, unicode\n') for u in tokenize.tokenize(io.BytesIO(l).readline): if u.type in (tokenize.ENCODING, token.ENDMARKER): @@ -223,7 +223,7 @@ # ``replacetoken`` or any mechanism that changes semantics of module # loading is changed. Otherwise cached bytecode may get loaded without # the new transformation mechanisms applied. - BYTECODEHEADER = b'HG\x00\x0a' + BYTECODEHEADER = b'HG\x00\x0b' class hgloader(importlib.machinery.SourceFileLoader): """Custom module loader that transforms source code.
--- a/mercurial/ancestor.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/ancestor.py Mon Aug 20 09:48:08 2018 -0700 @@ -11,6 +11,9 @@ import heapq from .node import nullrev +from . import ( + pycompat, +) def commonancestorsheads(pfunc, *nodes): """Returns a set with the heads of all common ancestors of all nodes, @@ -174,7 +177,7 @@ # no revs to consider return - for curr in xrange(start, min(revs) - 1, -1): + for curr in pycompat.xrange(start, min(revs) - 1, -1): if curr not in bases: continue revs.discard(curr) @@ -215,7 +218,7 @@ # exit. missing = [] - for curr in xrange(start, nullrev, -1): + for curr in pycompat.xrange(start, nullrev, -1): if not revsvisit: break
--- a/mercurial/branchmap.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/branchmap.py Mon Aug 20 09:48:08 2018 -0700 @@ -38,15 +38,11 @@ return filename def read(repo): + f = None try: f = repo.cachevfs(_filename(repo)) - lines = f.read().split('\n') - f.close() - except (IOError, OSError): - return None - - try: - cachekey = lines.pop(0).split(" ", 2) + lineiter = iter(f) + cachekey = next(lineiter).rstrip('\n').split(" ", 2) last, lrev = cachekey[:2] last, lrev = bin(last), int(lrev) filteredhash = None @@ -58,7 +54,8 @@ # invalidate the cache raise ValueError(r'tip differs') cl = repo.changelog - for l in lines: + for l in lineiter: + l = l.rstrip('\n') if not l: continue node, state, label = l.split(" ", 2) @@ -72,6 +69,10 @@ partial.setdefault(label, []).append(node) if state == 'c': partial._closednodes.add(node) + + except (IOError, OSError): + return None + except Exception as inst: if repo.ui.debugflag: msg = 'invalid branchheads cache' @@ -80,6 +81,11 @@ msg += ': %s\n' repo.ui.debug(msg % pycompat.bytestr(inst)) partial = None + + finally: + if f: + f.close() + return partial ### Nearest subset relation
--- a/mercurial/bundle2.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/bundle2.py Mon Aug 20 09:48:08 2018 -0700 @@ -2223,11 +2223,11 @@ total += header[1] + header[2] utf8branch = inpart.read(header[0]) branch = encoding.tolocal(utf8branch) - for x in xrange(header[1]): + for x in pycompat.xrange(header[1]): node = inpart.read(20) rev = cl.rev(node) cache.setdata(branch, rev, node, False) - for x in xrange(header[2]): + for x in pycompat.xrange(header[2]): node = inpart.read(20) rev = cl.rev(node) cache.setdata(branch, rev, node, True)
--- a/mercurial/bundlerepo.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/bundlerepo.py Mon Aug 20 09:48:08 2018 -0700 @@ -80,7 +80,7 @@ # start, size, full unc. size, base (unused), link, p1, p2, node e = (revlog.offset_type(start, flags), size, -1, baserev, link, self.rev(p1), self.rev(p2), node) - self.index.insert(-1, e) + self.index.append(e) self.nodemap[node] = n self.bundlerevs.add(n) n += 1
--- a/mercurial/cext/parsers.c Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/cext/parsers.c Mon Aug 20 09:48:08 2018 -0700 @@ -713,7 +713,7 @@ void manifest_module_init(PyObject *mod); void revlog_module_init(PyObject *mod); -static const int version = 5; +static const int version = 7; static void module_init(PyObject *mod) {
--- a/mercurial/cext/revlog.c Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/cext/revlog.c Mon Aug 20 09:48:08 2018 -0700 @@ -28,15 +28,26 @@ #define PyInt_AsLong PyLong_AsLong #endif +typedef struct indexObjectStruct indexObject; + +typedef struct { + int children[16]; +} nodetreenode; + /* * A base-16 trie for fast node->rev mapping. * * Positive value is index of the next node in the trie - * Negative value is a leaf: -(rev + 1) + * Negative value is a leaf: -(rev + 2) * Zero is empty */ typedef struct { - int children[16]; + indexObject *index; + nodetreenode *nodes; + unsigned length; /* # nodes in use */ + unsigned capacity; /* # nodes allocated */ + int depth; /* maximum depth of tree */ + int splits; /* # splits performed */ } nodetree; /* @@ -51,7 +62,7 @@ * With string keys, we lazily perform a reverse mapping from node to * rev, using a base-16 trie. */ -typedef struct { +struct indexObjectStruct { PyObject_HEAD /* Type-specific fields go here. */ PyObject *data; /* raw bytes of index */ @@ -64,15 +75,11 @@ PyObject *headrevs; /* cache, invalidated on changes */ PyObject *filteredrevs;/* filtered revs set */ nodetree *nt; /* base-16 trie */ - unsigned ntlength; /* # nodes in use */ - unsigned ntcapacity; /* # nodes allocated */ - int ntdepth; /* maximum depth of tree */ - int ntsplits; /* # splits performed */ int ntrev; /* last rev scanned */ int ntlookups; /* # lookups */ int ntmisses; /* # lookups that miss the cache */ int inlined; -} indexObject; +}; static Py_ssize_t index_length(const indexObject *self) { @@ -117,9 +124,8 @@ static inline int index_get_parents(indexObject *self, Py_ssize_t rev, int *ps, int maxrev) { - if (rev >= self->length - 1) { - PyObject *tuple = PyList_GET_ITEM(self->added, - rev - self->length + 1); + if (rev >= self->length) { + PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length); ps[0] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 5)); ps[1] = (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 6)); } else { @@ -158,22 +164,19 @@ Py_ssize_t length = index_length(self); PyObject *entry; - if (pos < 0) - pos += length; + if (pos == -1) { + Py_INCREF(nullentry); + return nullentry; + } if (pos < 0 || pos >= length) { PyErr_SetString(PyExc_IndexError, "revlog index out of range"); return NULL; } - if (pos == length - 1) { - Py_INCREF(nullentry); - return nullentry; - } - - if (pos >= self->length - 1) { + if (pos >= self->length) { PyObject *obj; - obj = PyList_GET_ITEM(self->added, pos - self->length + 1); + obj = PyList_GET_ITEM(self->added, pos - self->length); Py_INCREF(obj); return obj; } @@ -231,15 +234,15 @@ Py_ssize_t length = index_length(self); const char *data; - if (pos == length - 1 || pos == INT_MAX) + if (pos == -1) return nullid; if (pos >= length) return NULL; - if (pos >= self->length - 1) { + if (pos >= self->length) { PyObject *tuple, *str; - tuple = PyList_GET_ITEM(self->added, pos - self->length + 1); + tuple = PyList_GET_ITEM(self->added, pos - self->length); str = PyTuple_GetItem(tuple, 7); return str ? PyBytes_AS_STRING(str) : NULL; } @@ -262,47 +265,34 @@ return node; } -static int nt_insert(indexObject *self, const char *node, int rev); +static int nt_insert(nodetree *self, const char *node, int rev); -static int node_check(PyObject *obj, char **node, Py_ssize_t *nodelen) +static int node_check(PyObject *obj, char **node) { - if (PyBytes_AsStringAndSize(obj, node, nodelen) == -1) + Py_ssize_t nodelen; + if (PyBytes_AsStringAndSize(obj, node, &nodelen) == -1) return -1; - if (*nodelen == 20) + if (nodelen == 20) return 0; PyErr_SetString(PyExc_ValueError, "20-byte hash required"); return -1; } -static PyObject *index_insert(indexObject *self, PyObject *args) +static PyObject *index_append(indexObject *self, PyObject *obj) { - PyObject *obj; char *node; - int index; - Py_ssize_t len, nodelen; - - if (!PyArg_ParseTuple(args, "iO", &index, &obj)) - return NULL; + Py_ssize_t len; if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 8) { PyErr_SetString(PyExc_TypeError, "8-tuple required"); return NULL; } - if (node_check(PyTuple_GET_ITEM(obj, 7), &node, &nodelen) == -1) + if (node_check(PyTuple_GET_ITEM(obj, 7), &node) == -1) return NULL; len = index_length(self); - if (index < 0) - index += len; - - if (index != len - 1) { - PyErr_SetString(PyExc_IndexError, - "insert only supported at index -1"); - return NULL; - } - if (self->added == NULL) { self->added = PyList_New(0); if (self->added == NULL) @@ -313,41 +303,12 @@ return NULL; if (self->nt) - nt_insert(self, node, index); + nt_insert(self->nt, node, (int)len); Py_CLEAR(self->headrevs); Py_RETURN_NONE; } -static void _index_clearcaches(indexObject *self) -{ - if (self->cache) { - Py_ssize_t i; - - for (i = 0; i < self->raw_length; i++) - Py_CLEAR(self->cache[i]); - free(self->cache); - self->cache = NULL; - } - if (self->offsets) { - PyMem_Free(self->offsets); - self->offsets = NULL; - } - free(self->nt); - self->nt = NULL; - Py_CLEAR(self->headrevs); -} - -static PyObject *index_clearcaches(indexObject *self) -{ - _index_clearcaches(self); - self->ntlength = self->ntcapacity = 0; - self->ntdepth = self->ntsplits = 0; - self->ntrev = -1; - self->ntlookups = self->ntmisses = 0; - Py_RETURN_NONE; -} - static PyObject *index_stats(indexObject *self) { PyObject *obj = PyDict_New(); @@ -376,16 +337,18 @@ Py_DECREF(t); } - if (self->raw_length != self->length - 1) + if (self->raw_length != self->length) istat(raw_length, "revs on disk"); istat(length, "revs in memory"); - istat(ntcapacity, "node trie capacity"); - istat(ntdepth, "node trie depth"); - istat(ntlength, "node trie count"); istat(ntlookups, "node trie lookups"); istat(ntmisses, "node trie misses"); istat(ntrev, "node trie last rev scanned"); - istat(ntsplits, "node trie splits"); + if (self->nt) { + istat(nt->capacity, "node trie capacity"); + istat(nt->depth, "node trie depth"); + istat(nt->length, "node trie count"); + istat(nt->splits, "node trie splits"); + } #undef istat @@ -451,7 +414,7 @@ { PyObject *iter = NULL; PyObject *iter_item = NULL; - Py_ssize_t min_idx = index_length(self) + 1; + Py_ssize_t min_idx = index_length(self) + 2; long iter_item_long; if (PyList_GET_SIZE(list) != 0) { @@ -493,7 +456,7 @@ PyObject *reachable = NULL; PyObject *val; - Py_ssize_t len = index_length(self) - 1; + Py_ssize_t len = index_length(self); long revnum; Py_ssize_t k; Py_ssize_t i; @@ -615,7 +578,7 @@ revstates[parents[1] + 1]) & RS_REACHABLE) && !(revstates[i + 1] & RS_REACHABLE)) { revstates[i + 1] |= RS_REACHABLE; - val = PyInt_FromLong(i); + val = PyInt_FromSsize_t(i); if (val == NULL) goto bail; r = PyList_Append(reachable, val); @@ -645,7 +608,7 @@ PyObject *phaseset = NULL; PyObject *phasessetlist = NULL; PyObject *rev = NULL; - Py_ssize_t len = index_length(self) - 1; + Py_ssize_t len = index_length(self); Py_ssize_t numphase = 0; Py_ssize_t minrevallphases = 0; Py_ssize_t minrevphase = 0; @@ -702,7 +665,7 @@ } } /* Transform phase list to a python list */ - phasessize = PyInt_FromLong(len); + phasessize = PyInt_FromSsize_t(len); if (phasessize == NULL) goto release; for (i = 0; i < len; i++) { @@ -711,7 +674,7 @@ * is computed as a difference */ if (phase != 0) { phaseset = PyList_GET_ITEM(phasessetlist, phase); - rev = PyInt_FromLong(i); + rev = PyInt_FromSsize_t(i); if (rev == NULL) goto release; PySet_Add(phaseset, rev); @@ -756,7 +719,7 @@ } } - len = index_length(self) - 1; + len = index_length(self); heads = PyList_New(0); if (heads == NULL) goto bail; @@ -838,9 +801,8 @@ { const char *data; - if (rev >= self->length - 1) { - PyObject *tuple = PyList_GET_ITEM(self->added, - rev - self->length + 1); + if (rev >= self->length) { + PyObject *tuple = PyList_GET_ITEM(self->added, rev - self->length); return (int)PyInt_AS_LONG(PyTuple_GET_ITEM(tuple, 3)); } else { @@ -881,7 +843,7 @@ return NULL; } - if (rev < 0 || rev >= length - 1) { + if (rev < 0 || rev >= length) { PyErr_SetString(PyExc_ValueError, "revlog index out of range"); return NULL; } @@ -924,7 +886,7 @@ break; } - if (iterrev >= length - 1) { + if (iterrev >= length) { PyErr_SetString(PyExc_IndexError, "revision outside index"); return NULL; } @@ -984,7 +946,7 @@ * -2: not found * rest: valid rev */ -static int nt_find(indexObject *self, const char *node, Py_ssize_t nodelen, +static int nt_find(nodetree *self, const char *node, Py_ssize_t nodelen, int hex) { int (*getnybble)(const char *, Py_ssize_t) = hex ? hexdigit : nt_level; @@ -993,9 +955,6 @@ if (nodelen == 20 && node[0] == '\0' && memcmp(node, nullid, 20) == 0) return -1; - if (self->nt == NULL) - return -2; - if (hex) maxlevel = nodelen > 40 ? 40 : (int)nodelen; else @@ -1003,15 +962,15 @@ for (level = off = 0; level < maxlevel; level++) { int k = getnybble(node, level); - nodetree *n = &self->nt[off]; + nodetreenode *n = &self->nodes[off]; int v = n->children[k]; if (v < 0) { const char *n; Py_ssize_t i; - v = -(v + 1); - n = index_node(self, v); + v = -(v + 2); + n = index_node(self->index, v); if (n == NULL) return -2; for (i = level; i < maxlevel; i++) @@ -1027,65 +986,67 @@ return -4; } -static int nt_new(indexObject *self) +static int nt_new(nodetree *self) { - if (self->ntlength == self->ntcapacity) { - if (self->ntcapacity >= INT_MAX / (sizeof(nodetree) * 2)) { - PyErr_SetString(PyExc_MemoryError, - "overflow in nt_new"); + if (self->length == self->capacity) { + unsigned newcapacity; + nodetreenode *newnodes; + newcapacity = self->capacity * 2; + if (newcapacity >= INT_MAX / sizeof(nodetreenode)) { + PyErr_SetString(PyExc_MemoryError, "overflow in nt_new"); return -1; } - self->ntcapacity *= 2; - self->nt = realloc(self->nt, - self->ntcapacity * sizeof(nodetree)); - if (self->nt == NULL) { + newnodes = realloc(self->nodes, newcapacity * sizeof(nodetreenode)); + if (newnodes == NULL) { PyErr_SetString(PyExc_MemoryError, "out of memory"); return -1; } - memset(&self->nt[self->ntlength], 0, - sizeof(nodetree) * (self->ntcapacity - self->ntlength)); + self->capacity = newcapacity; + self->nodes = newnodes; + memset(&self->nodes[self->length], 0, + sizeof(nodetreenode) * (self->capacity - self->length)); } - return self->ntlength++; + return self->length++; } -static int nt_insert(indexObject *self, const char *node, int rev) +static int nt_insert(nodetree *self, const char *node, int rev) { int level = 0; int off = 0; while (level < 40) { int k = nt_level(node, level); - nodetree *n; + nodetreenode *n; int v; - n = &self->nt[off]; + n = &self->nodes[off]; v = n->children[k]; if (v == 0) { - n->children[k] = -rev - 1; + n->children[k] = -rev - 2; return 0; } if (v < 0) { - const char *oldnode = index_node_existing(self, -(v + 1)); + const char *oldnode = index_node_existing(self->index, -(v + 2)); int noff; if (oldnode == NULL) return -1; if (!memcmp(oldnode, node, 20)) { - n->children[k] = -rev - 1; + n->children[k] = -rev - 2; return 0; } noff = nt_new(self); if (noff == -1) return -1; - /* self->nt may have been changed by realloc */ - self->nt[off].children[k] = noff; + /* self->nodes may have been changed by realloc */ + self->nodes[off].children[k] = noff; off = noff; - n = &self->nt[off]; + n = &self->nodes[off]; n->children[nt_level(oldnode, ++level)] = v; - if (level > self->ntdepth) - self->ntdepth = level; - self->ntsplits += 1; + if (level > self->depth) + self->depth = level; + self->splits += 1; } else { level += 1; off = v; @@ -1095,27 +1056,106 @@ return -1; } -static int nt_init(indexObject *self) +static int nt_delete_node(nodetree *self, const char *node) +{ + /* rev==-2 happens to get encoded as 0, which is interpreted as not set */ + return nt_insert(self, node, -2); +} + +static int nt_init(nodetree *self, indexObject *index, unsigned capacity) +{ + self->index = index; + /* The input capacity is in terms of revisions, while the field is in + * terms of nodetree nodes. */ + self->capacity = (capacity < 4 ? 4 : capacity / 2); + self->depth = 0; + self->splits = 0; + if ((size_t)self->capacity > INT_MAX / sizeof(nodetreenode)) { + PyErr_SetString(PyExc_ValueError, "overflow in init_nt"); + return -1; + } + self->nodes = calloc(self->capacity, sizeof(nodetreenode)); + if (self->nodes == NULL) { + PyErr_NoMemory(); + return -1; + } + self->length = 1; + return 0; +} + +static int nt_partialmatch(nodetree *self, const char *node, + Py_ssize_t nodelen) +{ + return nt_find(self, node, nodelen, 1); +} + +/* + * Find the length of the shortest unique prefix of node. + * + * Return values: + * + * -3: error (exception set) + * -2: not found (no exception set) + * rest: length of shortest prefix + */ +static int nt_shortest(nodetree *self, const char *node) +{ + int level, off; + + for (level = off = 0; level < 40; level++) { + int k, v; + nodetreenode *n = &self->nodes[off]; + k = nt_level(node, level); + v = n->children[k]; + if (v < 0) { + const char *n; + v = -(v + 2); + n = index_node_existing(self->index, v); + if (n == NULL) + return -3; + if (memcmp(node, n, 20) != 0) + /* + * Found a unique prefix, but it wasn't for the + * requested node (i.e the requested node does + * not exist). + */ + return -2; + return level + 1; + } + if (v == 0) + return -2; + off = v; + } + /* + * The node was still not unique after 40 hex digits, so this won't + * happen. Also, if we get here, then there's a programming error in + * this file that made us insert a node longer than 40 hex digits. + */ + PyErr_SetString(PyExc_Exception, "broken node tree"); + return -3; +} + +static int index_init_nt(indexObject *self) { if (self->nt == NULL) { - if ((size_t)self->raw_length > INT_MAX / sizeof(nodetree)) { - PyErr_SetString(PyExc_ValueError, "overflow in nt_init"); - return -1; - } - self->ntcapacity = self->raw_length < 4 - ? 4 : (int)self->raw_length / 2; - - self->nt = calloc(self->ntcapacity, sizeof(nodetree)); + self->nt = PyMem_Malloc(sizeof(nodetree)); if (self->nt == NULL) { PyErr_NoMemory(); return -1; } - self->ntlength = 1; - self->ntrev = (int)index_length(self) - 1; + if (nt_init(self->nt, self, self->raw_length) == -1) { + PyMem_Free(self->nt); + self->nt = NULL; + return -1; + } + if (nt_insert(self->nt, nullid, -1) == -1) { + PyMem_Free(self->nt); + self->nt = NULL; + return -1; + } + self->ntrev = (int)index_length(self); self->ntlookups = 1; self->ntmisses = 0; - if (nt_insert(self, nullid, INT_MAX) == -1) - return -1; } return 0; } @@ -1132,14 +1172,14 @@ { int rev; + if (index_init_nt(self) == -1) + return -3; + self->ntlookups++; - rev = nt_find(self, node, nodelen, 0); + rev = nt_find(self->nt, node, nodelen, 0); if (rev >= -1) return rev; - if (nt_init(self) == -1) - return -3; - /* * For the first handful of lookups, we scan the entire index, * and cache only the matching nodes. This optimizes for cases @@ -1155,7 +1195,7 @@ if (n == NULL) return -3; if (memcmp(node, n, nodelen > 20 ? 20 : nodelen) == 0) { - if (nt_insert(self, n, rev) == -1) + if (nt_insert(self->nt, n, rev) == -1) return -3; break; } @@ -1165,7 +1205,7 @@ const char *n = index_node_existing(self, rev); if (n == NULL) return -3; - if (nt_insert(self, n, rev) == -1) { + if (nt_insert(self->nt, n, rev) == -1) { self->ntrev = rev + 1; return -3; } @@ -1214,15 +1254,14 @@ static PyObject *index_getitem(indexObject *self, PyObject *value) { char *node; - Py_ssize_t nodelen; int rev; if (PyInt_Check(value)) return index_get(self, PyInt_AS_LONG(value)); - if (node_check(value, &node, &nodelen) == -1) + if (node_check(value, &node) == -1) return NULL; - rev = index_find_node(self, node, nodelen); + rev = index_find_node(self, node, 20); if (rev >= -1) return PyInt_FromLong(rev); if (rev == -2) @@ -1233,14 +1272,14 @@ /* * Fully populate the radix tree. */ -static int nt_populate(indexObject *self) { +static int index_populate_nt(indexObject *self) { int rev; if (self->ntrev > 0) { for (rev = self->ntrev - 1; rev >= 0; rev--) { const char *n = index_node_existing(self, rev); if (n == NULL) return -1; - if (nt_insert(self, n, rev) == -1) + if (nt_insert(self->nt, n, rev) == -1) return -1; } self->ntrev = -1; @@ -1248,68 +1287,6 @@ return 0; } -static int nt_partialmatch(indexObject *self, const char *node, - Py_ssize_t nodelen) -{ - if (nt_init(self) == -1) - return -3; - if (nt_populate(self) == -1) - return -3; - - return nt_find(self, node, nodelen, 1); -} - -/* - * Find the length of the shortest unique prefix of node. - * - * Return values: - * - * -3: error (exception set) - * -2: not found (no exception set) - * rest: length of shortest prefix - */ -static int nt_shortest(indexObject *self, const char *node) -{ - int level, off; - - if (nt_init(self) == -1) - return -3; - if (nt_populate(self) == -1) - return -3; - - for (level = off = 0; level < 40; level++) { - int k, v; - nodetree *n = &self->nt[off]; - k = nt_level(node, level); - v = n->children[k]; - if (v < 0) { - const char *n; - v = -(v + 1); - n = index_node_existing(self, v); - if (n == NULL) - return -3; - if (memcmp(node, n, 20) != 0) - /* - * Found a unique prefix, but it wasn't for the - * requested node (i.e the requested node does - * not exist). - */ - return -2; - return level + 1; - } - if (v == 0) - return -2; - off = v; - } - /* - * The node was still not unique after 40 hex digits, so this won't - * happen. Also, if we get here, then there's a programming error in - * this file that made us insert a node longer than 40 hex digits. - */ - PyErr_SetString(PyExc_Exception, "broken node tree"); - return -3; -} - static PyObject *index_partialmatch(indexObject *self, PyObject *args) { const char *fullnode; @@ -1338,12 +1315,15 @@ Py_RETURN_NONE; } - rev = nt_partialmatch(self, node, nodelen); + if (index_init_nt(self) == -1) + return NULL; + if (index_populate_nt(self) == -1) + return NULL; + rev = nt_partialmatch(self->nt, node, nodelen); switch (rev) { case -4: raise_revlog_error(); - case -3: return NULL; case -2: Py_RETURN_NONE; @@ -1360,18 +1340,21 @@ static PyObject *index_shortest(indexObject *self, PyObject *args) { - Py_ssize_t nodelen; PyObject *val; char *node; int length; if (!PyArg_ParseTuple(args, "O", &val)) return NULL; - if (node_check(val, &node, &nodelen) == -1) + if (node_check(val, &node) == -1) return NULL; self->ntlookups++; - length = nt_shortest(self, node); + if (index_init_nt(self) == -1) + return NULL; + if (index_populate_nt(self) == -1) + return NULL; + length = nt_shortest(self->nt, node); if (length == -3) return NULL; if (length == -2) { @@ -1383,16 +1366,15 @@ static PyObject *index_m_get(indexObject *self, PyObject *args) { - Py_ssize_t nodelen; PyObject *val; char *node; int rev; if (!PyArg_ParseTuple(args, "O", &val)) return NULL; - if (node_check(val, &node, &nodelen) == -1) + if (node_check(val, &node) == -1) return NULL; - rev = index_find_node(self, node, nodelen); + rev = index_find_node(self, node, 20); if (rev == -3) return NULL; if (rev == -2) @@ -1403,17 +1385,16 @@ static int index_contains(indexObject *self, PyObject *value) { char *node; - Py_ssize_t nodelen; if (PyInt_Check(value)) { long rev = PyInt_AS_LONG(value); return rev >= -1 && rev < index_length(self); } - if (node_check(value, &node, &nodelen) == -1) + if (node_check(value, &node) == -1) return -1; - switch (index_find_node(self, node, nodelen)) { + switch (index_find_node(self, node, 20)) { case -3: return -1; case -2: @@ -1554,7 +1535,7 @@ goto bail; } - interesting = calloc(sizeof(*interesting), 1 << revcount); + interesting = calloc(sizeof(*interesting), ((size_t)1) << revcount); if (interesting == NULL) { PyErr_NoMemory(); goto bail; @@ -1687,7 +1668,7 @@ revs = PyMem_Malloc(argcount * sizeof(*revs)); if (argcount > 0 && revs == NULL) return PyErr_NoMemory(); - len = index_length(self) - 1; + len = index_length(self); for (i = 0; i < argcount; i++) { static const int capacity = 24; @@ -1787,7 +1768,7 @@ /* * Invalidate any trie entries introduced by added revs. */ -static void nt_invalidate_added(indexObject *self, Py_ssize_t start) +static void index_invalidate_added(indexObject *self, Py_ssize_t start) { Py_ssize_t i, len = PyList_GET_SIZE(self->added); @@ -1795,7 +1776,7 @@ PyObject *tuple = PyList_GET_ITEM(self->added, i); PyObject *node = PyTuple_GET_ITEM(tuple, 7); - nt_insert(self, PyBytes_AS_STRING(node), -1); + nt_delete_node(self->nt, PyBytes_AS_STRING(node)); } if (start == 0) @@ -1809,7 +1790,7 @@ static int index_slice_del(indexObject *self, PyObject *item) { Py_ssize_t start, stop, step, slicelength; - Py_ssize_t length = index_length(self); + Py_ssize_t length = index_length(self) + 1; int ret = 0; /* Argument changed from PySliceObject* to PyObject* in Python 3. */ @@ -1845,23 +1826,23 @@ return -1; } - if (start < self->length - 1) { + if (start < self->length) { if (self->nt) { Py_ssize_t i; - for (i = start + 1; i < self->length - 1; i++) { + for (i = start + 1; i < self->length; i++) { const char *node = index_node_existing(self, i); if (node == NULL) return -1; - nt_insert(self, node, -1); + nt_delete_node(self->nt, node); } if (self->added) - nt_invalidate_added(self, 0); + index_invalidate_added(self, 0); if (self->ntrev > start) self->ntrev = (int)start; } - self->length = start + 1; + self->length = start; if (start < self->raw_length) { if (self->cache) { Py_ssize_t i; @@ -1874,12 +1855,12 @@ } if (self->nt) { - nt_invalidate_added(self, start - self->length + 1); + index_invalidate_added(self, start - self->length); if (self->ntrev > start) self->ntrev = (int)start; } if (self->added) - ret = PyList_SetSlice(self->added, start - self->length + 1, + ret = PyList_SetSlice(self->added, start - self->length, PyList_GET_SIZE(self->added), NULL); done: Py_CLEAR(self->headrevs); @@ -1897,17 +1878,16 @@ PyObject *value) { char *node; - Py_ssize_t nodelen; long rev; if (PySlice_Check(item) && value == NULL) return index_slice_del(self, item); - if (node_check(item, &node, &nodelen) == -1) + if (node_check(item, &node) == -1) return -1; if (value == NULL) - return self->nt ? nt_insert(self, node, -1) : 0; + return self->nt ? nt_delete_node(self->nt, node) : 0; rev = PyInt_AsLong(value); if (rev > INT_MAX || rev < 0) { if (!PyErr_Occurred()) @@ -1915,9 +1895,9 @@ return -1; } - if (nt_init(self) == -1) + if (index_init_nt(self) == -1) return -1; - return nt_insert(self, node, (int)rev); + return nt_insert(self->nt, node, (int)rev); } /* @@ -1984,8 +1964,6 @@ self->inlined = inlined_obj && PyObject_IsTrue(inlined_obj); self->data = data_obj; - self->ntlength = self->ntcapacity = 0; - self->ntdepth = self->ntsplits = 0; self->ntlookups = self->ntmisses = 0; self->ntrev = -1; Py_INCREF(self->data); @@ -1995,14 +1973,14 @@ if (len == -1) goto bail; self->raw_length = len; - self->length = len + 1; + self->length = len; } else { if (size % v1_hdrsize) { PyErr_SetString(PyExc_ValueError, "corrupt index file"); goto bail; } self->raw_length = size / v1_hdrsize; - self->length = self->raw_length + 1; + self->length = self->raw_length; } return 0; @@ -2016,6 +1994,36 @@ return (PyObject *)self; } +static void _index_clearcaches(indexObject *self) +{ + if (self->cache) { + Py_ssize_t i; + + for (i = 0; i < self->raw_length; i++) + Py_CLEAR(self->cache[i]); + free(self->cache); + self->cache = NULL; + } + if (self->offsets) { + PyMem_Free((void *)self->offsets); + self->offsets = NULL; + } + if (self->nt != NULL) { + free(self->nt->nodes); + PyMem_Free(self->nt); + } + self->nt = NULL; + Py_CLEAR(self->headrevs); +} + +static PyObject *index_clearcaches(indexObject *self) +{ + _index_clearcaches(self); + self->ntrev = -1; + self->ntlookups = self->ntmisses = 0; + Py_RETURN_NONE; +} + static void index_dealloc(indexObject *self) { _index_clearcaches(self); @@ -2066,8 +2074,8 @@ "get filtered head revisions"}, /* Can always do filtering */ {"deltachain", (PyCFunction)index_deltachain, METH_VARARGS, "determine revisions with deltas to reconstruct fulltext"}, - {"insert", (PyCFunction)index_insert, METH_VARARGS, - "insert an index entry"}, + {"append", (PyCFunction)index_append, METH_O, + "append an index entry"}, {"partialmatch", (PyCFunction)index_partialmatch, METH_VARARGS, "match a potentially ambiguous node ID"}, {"shortest", (PyCFunction)index_shortest, METH_VARARGS,
--- a/mercurial/changegroup.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/changegroup.py Mon Aug 20 09:48:08 2018 -0700 @@ -14,16 +14,24 @@ from .i18n import _ from .node import ( hex, + nullid, nullrev, short, ) +from .thirdparty import ( + attr, +) + from . import ( - dagutil, + dagop, error, + match as matchmod, mdiff, phases, pycompat, + repository, + revlog, util, ) @@ -31,16 +39,12 @@ stringutil, ) -_CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s" -_CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" -_CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" +_CHANGEGROUPV1_DELTA_HEADER = struct.Struct("20s20s20s20s") +_CHANGEGROUPV2_DELTA_HEADER = struct.Struct("20s20s20s20s20s") +_CHANGEGROUPV3_DELTA_HEADER = struct.Struct(">20s20s20s20s20sH") LFS_REQUIREMENT = 'lfs' -# When narrowing is finalized and no longer subject to format changes, -# we should move this to just "narrow" or similar. -NARROW_REQUIREMENT = 'narrowhg-experimental' - readexactly = util.readexactly def getchunk(stream): @@ -61,6 +65,10 @@ """return a changegroup chunk header (string) for a zero-length chunk""" return struct.pack(">l", 0) +def _fileheader(path): + """Obtain a changegroup chunk header for a named path.""" + return chunkheader(len(path)) + path + def writechunks(ui, chunks, filename, vfs=None): """Write chunks to a file and return its filename. @@ -114,7 +122,7 @@ bundlerepo and some debug commands - their use is discouraged. """ deltaheader = _CHANGEGROUPV1_DELTA_HEADER - deltaheadersize = struct.calcsize(deltaheader) + deltaheadersize = deltaheader.size version = '01' _grouplistcount = 1 # One list of files after the manifests @@ -187,7 +195,7 @@ if not l: return {} headerdata = readexactly(self._stream, self.deltaheadersize) - header = struct.unpack(self.deltaheader, headerdata) + header = self.deltaheader.unpack(headerdata) delta = readexactly(self._stream, l - self.deltaheadersize) node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode) return (node, p1, p2, cs, deltabase, delta, flags) @@ -325,7 +333,7 @@ cl = repo.changelog ml = repo.manifestlog # validate incoming csets have their manifests - for cset in xrange(clstart, clend): + for cset in pycompat.xrange(clstart, clend): mfnode = cl.changelogrevision(cset).manifest mfest = ml[mfnode].readdelta() # store file cgnodes we must see @@ -367,7 +375,7 @@ repo.hook('pretxnchangegroup', throw=True, **pycompat.strkwargs(hookargs)) - added = [cl.node(r) for r in xrange(clstart, clend)] + added = [cl.node(r) for r in pycompat.xrange(clstart, clend)] phaseall = None if srctype in ('push', 'serve'): # Old servers can not push the boundary themselves. @@ -446,7 +454,7 @@ remain the same. """ deltaheader = _CHANGEGROUPV2_DELTA_HEADER - deltaheadersize = struct.calcsize(deltaheader) + deltaheadersize = deltaheader.size version = '02' def _deltaheader(self, headertuple, prevnode): @@ -462,7 +470,7 @@ separating manifests and files. """ deltaheader = _CHANGEGROUPV3_DELTA_HEADER - deltaheadersize = struct.calcsize(deltaheader) + deltaheadersize = deltaheader.size version = '03' _grouplistcount = 2 # One list of manifests and one list of files @@ -493,139 +501,466 @@ return d return readexactly(self._fh, n) -class cg1packer(object): - deltaheader = _CHANGEGROUPV1_DELTA_HEADER - version = '01' - def __init__(self, repo, bundlecaps=None): +@attr.s(slots=True, frozen=True) +class revisiondeltarequest(object): + """Describes a request to construct a revision delta. + + Instances are converted into ``revisiondelta`` later. + """ + # Revision whose delta will be generated. + node = attr.ib() + + # Linknode value. + linknode = attr.ib() + + # Parent revisions to record in ``revisiondelta`` instance. + p1node = attr.ib() + p2node = attr.ib() + + # Base revision that delta should be generated against. If nullid, + # the full revision data should be populated. If None, the delta + # may be generated against any base revision that is an ancestor of + # this revision. If any other value, the delta should be produced + # against that revision. + basenode = attr.ib() + + # Whether this should be marked as an ellipsis revision. + ellipsis = attr.ib(default=False) + +@attr.s(slots=True, frozen=True) +class revisiondelta(object): + """Describes a delta entry in a changegroup. + + Captured data is sufficient to serialize the delta into multiple + formats. + + ``revision`` and ``delta`` are mutually exclusive. + """ + # 20 byte node of this revision. + node = attr.ib() + # 20 byte nodes of parent revisions. + p1node = attr.ib() + p2node = attr.ib() + # 20 byte node of node this delta is against. + basenode = attr.ib() + # 20 byte node of changeset revision this delta is associated with. + linknode = attr.ib() + # 2 bytes of flags to apply to revision data. + flags = attr.ib() + # Size of base revision this delta is against. May be None if + # basenode is nullid. + baserevisionsize = attr.ib() + # Raw fulltext revision data. + revision = attr.ib() + # Delta between the basenode and node. + delta = attr.ib() + +def _revisiondeltatochunks(delta, headerfn): + """Serialize a revisiondelta to changegroup chunks.""" + + # The captured revision delta may be encoded as a delta against + # a base revision or as a full revision. The changegroup format + # requires that everything on the wire be deltas. So for full + # revisions, we need to invent a header that says to rewrite + # data. + + if delta.delta is not None: + prefix, data = b'', delta.delta + elif delta.basenode == nullid: + data = delta.revision + prefix = mdiff.trivialdiffheader(len(data)) + else: + data = delta.revision + prefix = mdiff.replacediffheader(delta.baserevisionsize, + len(data)) + + meta = headerfn(delta) + + yield chunkheader(len(meta) + len(prefix) + len(data)) + yield meta + if prefix: + yield prefix + yield data + +def _sortnodesnormal(store, nodes, reorder): + """Sort nodes for changegroup generation and turn into revnums.""" + # for generaldelta revlogs, we linearize the revs; this will both be + # much quicker and generate a much smaller bundle + if (store._generaldelta and reorder is None) or reorder: + revs = set(store.rev(n) for n in nodes) + return dagop.linearize(revs, store.parentrevs) + else: + return sorted([store.rev(n) for n in nodes]) + +def _sortnodesellipsis(store, nodes, cl, lookup): + """Sort nodes for changegroup generation and turn into revnums.""" + # Ellipses serving mode. + # + # In a perfect world, we'd generate better ellipsis-ified graphs + # for non-changelog revlogs. In practice, we haven't started doing + # that yet, so the resulting DAGs for the manifestlog and filelogs + # are actually full of bogus parentage on all the ellipsis + # nodes. This has the side effect that, while the contents are + # correct, the individual DAGs might be completely out of whack in + # a case like 882681bc3166 and its ancestors (back about 10 + # revisions or so) in the main hg repo. + # + # The one invariant we *know* holds is that the new (potentially + # bogus) DAG shape will be valid if we order the nodes in the + # order that they're introduced in dramatis personae by the + # changelog, so what we do is we sort the non-changelog histories + # by the order in which they are used by the changelog. + key = lambda n: cl.rev(lookup(n)) + return [store.rev(n) for n in sorted(nodes, key=key)] + +def _handlerevisiondeltarequest(store, request, prevnode): + """Obtain a revisiondelta from a revisiondeltarequest""" + + node = request.node + rev = store.rev(node) + + # Requesting a full revision. + if request.basenode == nullid: + baserev = nullrev + # Requesting an explicit revision. + elif request.basenode is not None: + baserev = store.rev(request.basenode) + # Allowing us to choose. + else: + p1, p2 = store.parentrevs(rev) + dp = store.deltaparent(rev) + + if dp == nullrev and store.storedeltachains: + # Avoid sending full revisions when delta parent is null. Pick prev + # in that case. It's tempting to pick p1 in this case, as p1 will + # be smaller in the common case. However, computing a delta against + # p1 may require resolving the raw text of p1, which could be + # expensive. The revlog caches should have prev cached, meaning + # less CPU for changegroup generation. There is likely room to add + # a flag and/or config option to control this behavior. + baserev = store.rev(prevnode) + elif dp == nullrev: + # revlog is configured to use full snapshot for a reason, + # stick to full snapshot. + baserev = nullrev + elif dp not in (p1, p2, store.rev(prevnode)): + # Pick prev when we can't be sure remote has the base revision. + baserev = store.rev(prevnode) + else: + baserev = dp + + if baserev != nullrev and not store.candelta(baserev, rev): + baserev = nullrev + + revision = None + delta = None + baserevisionsize = None + + if store.iscensored(baserev) or store.iscensored(rev): + try: + revision = store.revision(node, raw=True) + except error.CensoredNodeError as e: + revision = e.tombstone + + if baserev != nullrev: + baserevisionsize = store.rawsize(baserev) + + elif baserev == nullrev: + revision = store.revision(node, raw=True) + else: + delta = store.revdiff(baserev, rev) + + extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0 + + return revisiondelta( + node=node, + p1node=request.p1node, + p2node=request.p2node, + linknode=request.linknode, + basenode=store.node(baserev), + flags=store.flags(rev) | extraflags, + baserevisionsize=baserevisionsize, + revision=revision, + delta=delta, + ) + +def _makenarrowdeltarequest(cl, store, ischangelog, rev, node, linkrev, + linknode, clrevtolocalrev, fullclnodes, + precomputedellipsis): + linkparents = precomputedellipsis[linkrev] + def local(clrev): + """Turn a changelog revnum into a local revnum. + + The ellipsis dag is stored as revnums on the changelog, + but when we're producing ellipsis entries for + non-changelog revlogs, we need to turn those numbers into + something local. This does that for us, and during the + changelog sending phase will also expand the stored + mappings as needed. + """ + if clrev == nullrev: + return nullrev + + if ischangelog: + return clrev + + # Walk the ellipsis-ized changelog breadth-first looking for a + # change that has been linked from the current revlog. + # + # For a flat manifest revlog only a single step should be necessary + # as all relevant changelog entries are relevant to the flat + # manifest. + # + # For a filelog or tree manifest dirlog however not every changelog + # entry will have been relevant, so we need to skip some changelog + # nodes even after ellipsis-izing. + walk = [clrev] + while walk: + p = walk[0] + walk = walk[1:] + if p in clrevtolocalrev: + return clrevtolocalrev[p] + elif p in fullclnodes: + walk.extend([pp for pp in cl.parentrevs(p) + if pp != nullrev]) + elif p in precomputedellipsis: + walk.extend([pp for pp in precomputedellipsis[p] + if pp != nullrev]) + else: + # In this case, we've got an ellipsis with parents + # outside the current bundle (likely an + # incremental pull). We "know" that we can use the + # value of this same revlog at whatever revision + # is pointed to by linknode. "Know" is in scare + # quotes because I haven't done enough examination + # of edge cases to convince myself this is really + # a fact - it works for all the (admittedly + # thorough) cases in our testsuite, but I would be + # somewhat unsurprised to find a case in the wild + # where this breaks down a bit. That said, I don't + # know if it would hurt anything. + for i in pycompat.xrange(rev, 0, -1): + if store.linkrev(i) == clrev: + return i + # We failed to resolve a parent for this node, so + # we crash the changegroup construction. + raise error.Abort( + 'unable to resolve parent while packing %r %r' + ' for changeset %r' % (store.indexfile, rev, clrev)) + + return nullrev + + if not linkparents or ( + store.parentrevs(rev) == (nullrev, nullrev)): + p1, p2 = nullrev, nullrev + elif len(linkparents) == 1: + p1, = sorted(local(p) for p in linkparents) + p2 = nullrev + else: + p1, p2 = sorted(local(p) for p in linkparents) + + p1node, p2node = store.node(p1), store.node(p2) + + # TODO: try and actually send deltas for ellipsis data blocks + return revisiondeltarequest( + node=node, + p1node=p1node, + p2node=p2node, + linknode=linknode, + basenode=nullid, + ellipsis=True, + ) + +def deltagroup(repo, revs, store, ischangelog, lookup, forcedeltaparentprev, + units=None, + ellipses=False, clrevtolocalrev=None, fullclnodes=None, + precomputedellipsis=None): + """Calculate deltas for a set of revisions. + + Is a generator of ``revisiondelta`` instances. + + If units is not None, progress detail will be generated, units specifies + the type of revlog that is touched (changelog, manifest, etc.). + """ + if not revs: + return + + # We perform two passes over the revisions whose data we will emit. + # + # In the first pass, we obtain information about the deltas that will + # be generated. This involves computing linknodes and adjusting the + # request to take shallow fetching into account. The end result of + # this pass is a list of "request" objects stating which deltas + # to obtain. + # + # The second pass is simply resolving the requested deltas. + + cl = repo.changelog + + # In the first pass, collect info about the deltas we'll be + # generating. + requests = [] + + # Add the parent of the first rev. + revs.insert(0, store.parentrevs(revs[0])[0]) + + for i in pycompat.xrange(len(revs) - 1): + prev = revs[i] + curr = revs[i + 1] + + node = store.node(curr) + linknode = lookup(node) + p1node, p2node = store.parents(node) + + if ellipses: + linkrev = cl.rev(linknode) + clrevtolocalrev[linkrev] = curr + + # This is a node to send in full, because the changeset it + # corresponds to was a full changeset. + if linknode in fullclnodes: + requests.append(revisiondeltarequest( + node=node, + p1node=p1node, + p2node=p2node, + linknode=linknode, + basenode=None, + )) + + elif linkrev not in precomputedellipsis: + pass + else: + requests.append(_makenarrowdeltarequest( + cl, store, ischangelog, curr, node, linkrev, linknode, + clrevtolocalrev, fullclnodes, + precomputedellipsis)) + else: + requests.append(revisiondeltarequest( + node=node, + p1node=p1node, + p2node=p2node, + linknode=linknode, + basenode=store.node(prev) if forcedeltaparentprev else None, + )) + + # We expect the first pass to be fast, so we only engage the progress + # meter for constructing the revision deltas. + progress = None + if units is not None: + progress = repo.ui.makeprogress(_('bundling'), unit=units, + total=len(requests)) + + prevnode = store.node(revs[0]) + for i, request in enumerate(requests): + if progress: + progress.update(i + 1) + + delta = _handlerevisiondeltarequest(store, request, prevnode) + + yield delta + + prevnode = request.node + + if progress: + progress.complete() + +class cgpacker(object): + def __init__(self, repo, filematcher, version, allowreorder, + builddeltaheader, manifestsend, + forcedeltaparentprev=False, + bundlecaps=None, ellipses=False, + shallow=False, ellipsisroots=None, fullnodes=None): """Given a source repo, construct a bundler. + filematcher is a matcher that matches on files to include in the + changegroup. Used to facilitate sparse changegroups. + + allowreorder controls whether reordering of revisions is allowed. + This value is used when ``bundle.reorder`` is ``auto`` or isn't + set. + + forcedeltaparentprev indicates whether delta parents must be against + the previous revision in a delta group. This should only be used for + compatibility with changegroup version 1. + + builddeltaheader is a callable that constructs the header for a group + delta. + + manifestsend is a chunk to send after manifests have been fully emitted. + + ellipses indicates whether ellipsis serving mode is enabled. + bundlecaps is optional and can be used to specify the set of capabilities which can be used to build the bundle. While bundlecaps is unused in core Mercurial, extensions rely on this feature to communicate capabilities to customize the changegroup packer. + + shallow indicates whether shallow data might be sent. The packer may + need to pack file contents not introduced by the changes being packed. + + fullnodes is the set of changelog nodes which should not be ellipsis + nodes. We store this rather than the set of nodes that should be + ellipsis because for very large histories we expect this to be + significantly smaller. """ + assert filematcher + self._filematcher = filematcher + + self.version = version + self._forcedeltaparentprev = forcedeltaparentprev + self._builddeltaheader = builddeltaheader + self._manifestsend = manifestsend + self._ellipses = ellipses + # Set of capabilities we can use to build the bundle. if bundlecaps is None: bundlecaps = set() self._bundlecaps = bundlecaps + self._isshallow = shallow + self._fullclnodes = fullnodes + + # Maps ellipsis revs to their roots at the changelog level. + self._precomputedellipsis = ellipsisroots + # experimental config: bundle.reorder reorder = repo.ui.config('bundle', 'reorder') if reorder == 'auto': - reorder = None + self._reorder = allowreorder else: - reorder = stringutil.parsebool(reorder) + self._reorder = stringutil.parsebool(reorder) + self._repo = repo - self._reorder = reorder + if self._repo.ui.verbose and not self._repo.ui.debugflag: self._verbosenote = self._repo.ui.note else: self._verbosenote = lambda s: None - def close(self): - return closechunk() - - def fileheader(self, fname): - return chunkheader(len(fname)) + fname - - # Extracted both for clarity and for overriding in extensions. - def _sortgroup(self, revlog, nodelist, lookup): - """Sort nodes for change group and turn them into revnums.""" - # for generaldelta revlogs, we linearize the revs; this will both be - # much quicker and generate a much smaller bundle - if (revlog._generaldelta and self._reorder is None) or self._reorder: - dag = dagutil.revlogdag(revlog) - return dag.linearize(set(revlog.rev(n) for n in nodelist)) - else: - return sorted([revlog.rev(n) for n in nodelist]) - - def group(self, nodelist, revlog, lookup, units=None): - """Calculate a delta group, yielding a sequence of changegroup chunks - (strings). - - Given a list of changeset revs, return a set of deltas and - metadata corresponding to nodes. The first delta is - first parent(nodelist[0]) -> nodelist[0], the receiver is - guaranteed to have this parent as it has all history before - these changesets. In the case firstparent is nullrev the - changegroup starts with a full revision. - - If units is not None, progress detail will be generated, units specifies - the type of revlog that is touched (changelog, manifest, etc.). - """ - # if we don't have any revisions touched by these changesets, bail - if len(nodelist) == 0: - yield self.close() - return - - revs = self._sortgroup(revlog, nodelist, lookup) + def generate(self, commonrevs, clnodes, fastpathlinkrev, source): + """Yield a sequence of changegroup byte chunks.""" - # add the parent of the first rev - p = revlog.parentrevs(revs[0])[0] - revs.insert(0, p) - - # build deltas - progress = None - if units is not None: - progress = self._repo.ui.makeprogress(_('bundling'), unit=units, - total=(len(revs) - 1)) - for r in xrange(len(revs) - 1): - if progress: - progress.update(r + 1) - prev, curr = revs[r], revs[r + 1] - linknode = lookup(revlog.node(curr)) - for c in self.revchunk(revlog, curr, prev, linknode): - yield c - - if progress: - progress.complete() - yield self.close() - - # filter any nodes that claim to be part of the known set - def prune(self, revlog, missing, commonrevs): - rr, rl = revlog.rev, revlog.linkrev - return [n for n in missing if rl(rr(n)) not in commonrevs] - - def _packmanifests(self, dir, mfnodes, lookuplinknode): - """Pack flat manifests into a changegroup stream.""" - assert not dir - for chunk in self.group(mfnodes, self._repo.manifestlog._revlog, - lookuplinknode, units=_('manifests')): - yield chunk - - def _manifestsdone(self): - return '' - - def generate(self, commonrevs, clnodes, fastpathlinkrev, source): - '''yield a sequence of changegroup chunks (strings)''' repo = self._repo cl = repo.changelog - clrevorder = {} - mfs = {} # needed manifests - fnodes = {} # needed file nodes - changedfiles = set() - - # Callback for the changelog, used to collect changed files and manifest - # nodes. - # Returns the linkrev node (identity in the changelog case). - def lookupcl(x): - c = cl.read(x) - clrevorder[x] = len(clrevorder) - n = c[0] - # record the first changeset introducing this manifest version - mfs.setdefault(n, x) - # Record a complete list of potentially-changed files in - # this manifest. - changedfiles.update(c[3]) - return x - self._verbosenote(_('uncompressed size of bundle content:\n')) size = 0 - for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): - size += len(chunk) - yield chunk + + clstate, deltas = self._generatechangelog(cl, clnodes) + for delta in deltas: + for chunk in _revisiondeltatochunks(delta, self._builddeltaheader): + size += len(chunk) + yield chunk + + close = closechunk() + size += len(close) + yield closechunk() + self._verbosenote(_('%8.i (changelog)\n') % size) + clrevorder = clstate['clrevorder'] + mfs = clstate['mfs'] + changedfiles = clstate['changedfiles'] + # We need to make sure that the linkrev in the changegroup refers to # the first changeset that introduced the manifest or file revision. # The fastpath is usually safer than the slowpath, because the filelogs @@ -648,40 +983,153 @@ fastpathlinkrev = fastpathlinkrev and ( 'treemanifest' not in repo.requirements) - for chunk in self.generatemanifests(commonrevs, clrevorder, - fastpathlinkrev, mfs, fnodes, source): - yield chunk + fnodes = {} # needed file nodes + + size = 0 + it = self.generatemanifests( + commonrevs, clrevorder, fastpathlinkrev, mfs, fnodes, source, + clstate['clrevtomanifestrev']) + + for dir, deltas in it: + if dir: + assert self.version == b'03' + chunk = _fileheader(dir) + size += len(chunk) + yield chunk + + for delta in deltas: + chunks = _revisiondeltatochunks(delta, self._builddeltaheader) + for chunk in chunks: + size += len(chunk) + yield chunk + + close = closechunk() + size += len(close) + yield close + + self._verbosenote(_('%8.i (manifests)\n') % size) + yield self._manifestsend + + mfdicts = None + if self._ellipses and self._isshallow: + mfdicts = [(self._repo.manifestlog[n].read(), lr) + for (n, lr) in mfs.iteritems()] + mfs.clear() clrevs = set(cl.rev(x) for x in clnodes) - if not fastpathlinkrev: - def linknodes(unused, fname): - return fnodes.get(fname, {}) - else: - cln = cl.node - def linknodes(filerevlog, fname): - llr = filerevlog.linkrev - fln = filerevlog.node - revs = ((r, llr(r)) for r in filerevlog) - return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) + it = self.generatefiles(changedfiles, commonrevs, + source, mfdicts, fastpathlinkrev, + fnodes, clrevs) + + for path, deltas in it: + h = _fileheader(path) + size = len(h) + yield h - for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, - source): - yield chunk + for delta in deltas: + chunks = _revisiondeltatochunks(delta, self._builddeltaheader) + for chunk in chunks: + size += len(chunk) + yield chunk - yield self.close() + close = closechunk() + size += len(close) + yield close + + self._verbosenote(_('%8.i %s\n') % (size, path)) + + yield closechunk() if clnodes: repo.hook('outgoing', node=hex(clnodes[0]), source=source) + def _generatechangelog(self, cl, nodes): + """Generate data for changelog chunks. + + Returns a 2-tuple of a dict containing state and an iterable of + byte chunks. The state will not be fully populated until the + chunk stream has been fully consumed. + """ + clrevorder = {} + mfs = {} # needed manifests + mfl = self._repo.manifestlog + # TODO violates storage abstraction. + mfrevlog = mfl._revlog + changedfiles = set() + clrevtomanifestrev = {} + + # Callback for the changelog, used to collect changed files and + # manifest nodes. + # Returns the linkrev node (identity in the changelog case). + def lookupcl(x): + c = cl.read(x) + clrevorder[x] = len(clrevorder) + + if self._ellipses: + # Only update mfs if x is going to be sent. Otherwise we + # end up with bogus linkrevs specified for manifests and + # we skip some manifest nodes that we should otherwise + # have sent. + if (x in self._fullclnodes + or cl.rev(x) in self._precomputedellipsis): + n = c[0] + # Record the first changeset introducing this manifest + # version. + mfs.setdefault(n, x) + # Set this narrow-specific dict so we have the lowest + # manifest revnum to look up for this cl revnum. (Part of + # mapping changelog ellipsis parents to manifest ellipsis + # parents) + clrevtomanifestrev.setdefault(cl.rev(x), mfrevlog.rev(n)) + # We can't trust the changed files list in the changeset if the + # client requested a shallow clone. + if self._isshallow: + changedfiles.update(mfl[c[0]].read().keys()) + else: + changedfiles.update(c[3]) + else: + + n = c[0] + # record the first changeset introducing this manifest version + mfs.setdefault(n, x) + # Record a complete list of potentially-changed files in + # this manifest. + changedfiles.update(c[3]) + + return x + + # Changelog doesn't benefit from reordering revisions. So send out + # revisions in store order. + revs = sorted(cl.rev(n) for n in nodes) + + state = { + 'clrevorder': clrevorder, + 'mfs': mfs, + 'changedfiles': changedfiles, + 'clrevtomanifestrev': clrevtomanifestrev, + } + + gen = deltagroup( + self._repo, revs, cl, True, lookupcl, + self._forcedeltaparentprev, + ellipses=self._ellipses, + units=_('changesets'), + clrevtolocalrev={}, + fullclnodes=self._fullclnodes, + precomputedellipsis=self._precomputedellipsis) + + return state, gen + def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs, - fnodes, source): + fnodes, source, clrevtolocalrev): """Returns an iterator of changegroup chunks containing manifests. `source` is unused here, but is used by extensions like remotefilelog to change what is sent based in pulls vs pushes, etc. """ repo = self._repo + cl = repo.changelog mfl = repo.manifestlog dirlog = mfl._revlog.dirlog tmfnodes = {'': mfs} @@ -728,21 +1176,91 @@ return clnode return lookupmflinknode - size = 0 while tmfnodes: dir, nodes = tmfnodes.popitem() - prunednodes = self.prune(dirlog(dir), nodes, commonrevs) - if not dir or prunednodes: - for x in self._packmanifests(dir, prunednodes, - makelookupmflinknode(dir, nodes)): - size += len(x) - yield x - self._verbosenote(_('%8.i (manifests)\n') % size) - yield self._manifestsdone() + store = dirlog(dir) + + if not self._filematcher.visitdir(store._dir[:-1] or '.'): + prunednodes = [] + else: + frev, flr = store.rev, store.linkrev + prunednodes = [n for n in nodes + if flr(frev(n)) not in commonrevs] + + if dir and not prunednodes: + continue + + lookupfn = makelookupmflinknode(dir, nodes) + + if self._ellipses: + revs = _sortnodesellipsis(store, prunednodes, cl, + lookupfn) + else: + revs = _sortnodesnormal(store, prunednodes, + self._reorder) + + deltas = deltagroup( + self._repo, revs, store, False, lookupfn, + self._forcedeltaparentprev, + ellipses=self._ellipses, + units=_('manifests'), + clrevtolocalrev=clrevtolocalrev, + fullclnodes=self._fullclnodes, + precomputedellipsis=self._precomputedellipsis) + + yield dir, deltas # The 'source' parameter is useful for extensions - def generatefiles(self, changedfiles, linknodes, commonrevs, source): + def generatefiles(self, changedfiles, commonrevs, source, + mfdicts, fastpathlinkrev, fnodes, clrevs): + changedfiles = list(filter(self._filematcher, changedfiles)) + + if not fastpathlinkrev: + def normallinknodes(unused, fname): + return fnodes.get(fname, {}) + else: + cln = self._repo.changelog.node + + def normallinknodes(store, fname): + flinkrev = store.linkrev + fnode = store.node + revs = ((r, flinkrev(r)) for r in store) + return dict((fnode(r), cln(lr)) + for r, lr in revs if lr in clrevs) + + clrevtolocalrev = {} + + if self._isshallow: + # In a shallow clone, the linknodes callback needs to also include + # those file nodes that are in the manifests we sent but weren't + # introduced by those manifests. + commonctxs = [self._repo[c] for c in commonrevs] + clrev = self._repo.changelog.rev + + # Defining this function has a side-effect of overriding the + # function of the same name that was passed in as an argument. + # TODO have caller pass in appropriate function. + def linknodes(flog, fname): + for c in commonctxs: + try: + fnode = c.filenode(fname) + clrevtolocalrev[c.rev()] = flog.rev(fnode) + except error.ManifestLookupError: + pass + links = normallinknodes(flog, fname) + if len(links) != len(mfdicts): + for mf, lr in mfdicts: + fnode = mf.get(fname, None) + if fnode in links: + links[fnode] = min(links[fnode], lr, key=clrev) + elif fnode: + links[fnode] = lr + return links + else: + linknodes = normallinknodes + repo = self._repo + cl = repo.changelog progress = repo.ui.makeprogress(_('bundling'), unit=_('files'), total=len(changedfiles)) for i, fname in enumerate(sorted(changedfiles)): @@ -751,129 +1269,96 @@ raise error.Abort(_("empty or missing file data for %s") % fname) + clrevtolocalrev.clear() + linkrevnodes = linknodes(filerevlog, fname) # Lookup for filenodes, we collected the linkrev nodes above in the # fastpath case and with lookupmf in the slowpath case. def lookupfilelog(x): return linkrevnodes[x] - filenodes = self.prune(filerevlog, linkrevnodes, commonrevs) - if filenodes: - progress.update(i + 1, item=fname) - h = self.fileheader(fname) - size = len(h) - yield h - for chunk in self.group(filenodes, filerevlog, lookupfilelog): - size += len(chunk) - yield chunk - self._verbosenote(_('%8.i %s\n') % (size, fname)) + frev, flr = filerevlog.rev, filerevlog.linkrev + filenodes = [n for n in linkrevnodes + if flr(frev(n)) not in commonrevs] + + if not filenodes: + continue + + if self._ellipses: + revs = _sortnodesellipsis(filerevlog, filenodes, + cl, lookupfilelog) + else: + revs = _sortnodesnormal(filerevlog, filenodes, + self._reorder) + + progress.update(i + 1, item=fname) + + deltas = deltagroup( + self._repo, revs, filerevlog, False, lookupfilelog, + self._forcedeltaparentprev, + ellipses=self._ellipses, + clrevtolocalrev=clrevtolocalrev, + fullclnodes=self._fullclnodes, + precomputedellipsis=self._precomputedellipsis) + + yield fname, deltas + progress.complete() - def deltaparent(self, revlog, rev, p1, p2, prev): - if not revlog.candelta(prev, rev): - raise error.ProgrammingError('cg1 should not be used in this case') - return prev - - def revchunk(self, revlog, rev, prev, linknode): - node = revlog.node(rev) - p1, p2 = revlog.parentrevs(rev) - base = self.deltaparent(revlog, rev, p1, p2, prev) +def _makecg1packer(repo, filematcher, bundlecaps, ellipses=False, + shallow=False, ellipsisroots=None, fullnodes=None): + builddeltaheader = lambda d: _CHANGEGROUPV1_DELTA_HEADER.pack( + d.node, d.p1node, d.p2node, d.linknode) - prefix = '' - if revlog.iscensored(base) or revlog.iscensored(rev): - try: - delta = revlog.revision(node, raw=True) - except error.CensoredNodeError as e: - delta = e.tombstone - if base == nullrev: - prefix = mdiff.trivialdiffheader(len(delta)) - else: - baselen = revlog.rawsize(base) - prefix = mdiff.replacediffheader(baselen, len(delta)) - elif base == nullrev: - delta = revlog.revision(node, raw=True) - prefix = mdiff.trivialdiffheader(len(delta)) - else: - delta = revlog.revdiff(base, rev) - p1n, p2n = revlog.parents(node) - basenode = revlog.node(base) - flags = revlog.flags(rev) - meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags) - meta += prefix - l = len(meta) + len(delta) - yield chunkheader(l) - yield meta - yield delta - def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): - # do nothing with basenode, it is implicitly the previous one in HG10 - # do nothing with flags, it is implicitly 0 for cg1 and cg2 - return struct.pack(self.deltaheader, node, p1n, p2n, linknode) + return cgpacker(repo, filematcher, b'01', + allowreorder=None, + builddeltaheader=builddeltaheader, + manifestsend=b'', + forcedeltaparentprev=True, + bundlecaps=bundlecaps, + ellipses=ellipses, + shallow=shallow, + ellipsisroots=ellipsisroots, + fullnodes=fullnodes) -class cg2packer(cg1packer): - version = '02' - deltaheader = _CHANGEGROUPV2_DELTA_HEADER - - def __init__(self, repo, bundlecaps=None): - super(cg2packer, self).__init__(repo, bundlecaps) - if self._reorder is None: - # Since generaldelta is directly supported by cg2, reordering - # generally doesn't help, so we disable it by default (treating - # bundle.reorder=auto just like bundle.reorder=False). - self._reorder = False +def _makecg2packer(repo, filematcher, bundlecaps, ellipses=False, + shallow=False, ellipsisroots=None, fullnodes=None): + builddeltaheader = lambda d: _CHANGEGROUPV2_DELTA_HEADER.pack( + d.node, d.p1node, d.p2node, d.basenode, d.linknode) - def deltaparent(self, revlog, rev, p1, p2, prev): - dp = revlog.deltaparent(rev) - if dp == nullrev and revlog.storedeltachains: - # Avoid sending full revisions when delta parent is null. Pick prev - # in that case. It's tempting to pick p1 in this case, as p1 will - # be smaller in the common case. However, computing a delta against - # p1 may require resolving the raw text of p1, which could be - # expensive. The revlog caches should have prev cached, meaning - # less CPU for changegroup generation. There is likely room to add - # a flag and/or config option to control this behavior. - base = prev - elif dp == nullrev: - # revlog is configured to use full snapshot for a reason, - # stick to full snapshot. - base = nullrev - elif dp not in (p1, p2, prev): - # Pick prev when we can't be sure remote has the base revision. - return prev - else: - base = dp - if base != nullrev and not revlog.candelta(base, rev): - base = nullrev - return base + # Since generaldelta is directly supported by cg2, reordering + # generally doesn't help, so we disable it by default (treating + # bundle.reorder=auto just like bundle.reorder=False). + return cgpacker(repo, filematcher, b'02', + allowreorder=False, + builddeltaheader=builddeltaheader, + manifestsend=b'', + bundlecaps=bundlecaps, + ellipses=ellipses, + shallow=shallow, + ellipsisroots=ellipsisroots, + fullnodes=fullnodes) - def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): - # Do nothing with flags, it is implicitly 0 in cg1 and cg2 - return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode) - -class cg3packer(cg2packer): - version = '03' - deltaheader = _CHANGEGROUPV3_DELTA_HEADER - - def _packmanifests(self, dir, mfnodes, lookuplinknode): - if dir: - yield self.fileheader(dir) +def _makecg3packer(repo, filematcher, bundlecaps, ellipses=False, + shallow=False, ellipsisroots=None, fullnodes=None): + builddeltaheader = lambda d: _CHANGEGROUPV3_DELTA_HEADER.pack( + d.node, d.p1node, d.p2node, d.basenode, d.linknode, d.flags) - dirlog = self._repo.manifestlog._revlog.dirlog(dir) - for chunk in self.group(mfnodes, dirlog, lookuplinknode, - units=_('manifests')): - yield chunk - - def _manifestsdone(self): - return self.close() + return cgpacker(repo, filematcher, b'03', + allowreorder=False, + builddeltaheader=builddeltaheader, + manifestsend=closechunk(), + bundlecaps=bundlecaps, + ellipses=ellipses, + shallow=shallow, + ellipsisroots=ellipsisroots, + fullnodes=fullnodes) - def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags): - return struct.pack( - self.deltaheader, node, p1n, p2n, basenode, linknode, flags) - -_packermap = {'01': (cg1packer, cg1unpacker), +_packermap = {'01': (_makecg1packer, cg1unpacker), # cg2 adds support for exchanging generaldelta - '02': (cg2packer, cg2unpacker), + '02': (_makecg2packer, cg2unpacker), # cg3 adds support for exchanging revlog flags and treemanifests - '03': (cg3packer, cg3unpacker), + '03': (_makecg3packer, cg3unpacker), } def allsupportedversions(repo): @@ -899,7 +1384,7 @@ # support versions 01 and 02. versions.discard('01') versions.discard('02') - if NARROW_REQUIREMENT in repo.requirements: + if repository.NARROW_REQUIREMENT in repo.requirements: # Versions 01 and 02 don't support revlog flags, and we need to # support that for stripping and unbundling to work. versions.discard('01') @@ -927,9 +1412,32 @@ assert versions return min(versions) -def getbundler(version, repo, bundlecaps=None): +def getbundler(version, repo, bundlecaps=None, filematcher=None, + ellipses=False, shallow=False, ellipsisroots=None, + fullnodes=None): assert version in supportedoutgoingversions(repo) - return _packermap[version][0](repo, bundlecaps) + + if filematcher is None: + filematcher = matchmod.alwaysmatcher(repo.root, '') + + if version == '01' and not filematcher.always(): + raise error.ProgrammingError('version 01 changegroups do not support ' + 'sparse file matchers') + + if ellipses and version in (b'01', b'02'): + raise error.Abort( + _('ellipsis nodes require at least cg3 on client and server, ' + 'but negotiated version %s') % version) + + # Requested files could include files not in the local store. So + # filter those out. + filematcher = matchmod.intersectmatchers(repo.narrowmatch(), + filematcher) + + fn = _packermap[version][0] + return fn(repo, filematcher, bundlecaps, ellipses=ellipses, + shallow=shallow, ellipsisroots=ellipsisroots, + fullnodes=fullnodes) def getunbundler(version, fh, alg, extras=None): return _packermap[version][1](fh, alg, extras=extras) @@ -950,8 +1458,9 @@ {'clcount': len(outgoing.missing) }) def makestream(repo, outgoing, version, source, fastpath=False, - bundlecaps=None): - bundler = getbundler(version, repo, bundlecaps=bundlecaps) + bundlecaps=None, filematcher=None): + bundler = getbundler(version, repo, bundlecaps=bundlecaps, + filematcher=filematcher) repo = repo.unfiltered() commonrevs = outgoing.common @@ -989,7 +1498,7 @@ revisions += len(fl) - o if f in needfiles: needs = needfiles[f] - for new in xrange(o, len(fl)): + for new in pycompat.xrange(o, len(fl)): n = fl.node(new) if n in needs: needs.remove(n)
--- a/mercurial/changelog.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/changelog.py Mon Aug 20 09:48:08 2018 -0700 @@ -22,7 +22,6 @@ error, pycompat, revlog, - util, ) from .utils import ( dateutil, @@ -313,7 +312,7 @@ self.filteredrevs = frozenset() def tiprev(self): - for i in xrange(len(self) -1, -2, -1): + for i in pycompat.xrange(len(self) -1, -2, -1): if i not in self.filteredrevs: return i @@ -332,7 +331,7 @@ return revlog.revlog.__iter__(self) def filterediter(): - for i in xrange(len(self)): + for i in pycompat.xrange(len(self)): if i not in self.filteredrevs: yield i @@ -344,12 +343,6 @@ if i not in self.filteredrevs: yield i - @util.propertycache - def nodemap(self): - # XXX need filtering too - self.rev(self.node(0)) - return self._nodecache - def reachableroots(self, minroot, heads, roots, includepath=False): return self.index.reachableroots2(minroot, heads, roots, includepath) @@ -563,8 +556,8 @@ if revs is not None: if revs: assert revs[-1] + 1 == rev - revs = xrange(revs[0], rev + 1) + revs = pycompat.xrange(revs[0], rev + 1) else: - revs = xrange(rev, rev + 1) + revs = pycompat.xrange(rev, rev + 1) transaction.changes['revs'] = revs return node
--- a/mercurial/cmdutil.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/cmdutil.py Mon Aug 20 09:48:08 2018 -0700 @@ -607,17 +607,13 @@ def _unshelvemsg(): return _helpmessage('hg unshelve --continue', 'hg unshelve --abort') -def _updatecleanmsg(dest=None): - warning = _('warning: this will discard uncommitted changes') - return 'hg update --clean %s (%s)' % (dest or '.', warning) - def _graftmsg(): # tweakdefaults requires `update` to have a rev hence the `.` - return _helpmessage('hg graft --continue', _updatecleanmsg()) + return _helpmessage('hg graft --continue', 'hg graft --abort') def _mergemsg(): # tweakdefaults requires `update` to have a rev hence the `.` - return _helpmessage('hg commit', _updatecleanmsg()) + return _helpmessage('hg commit', 'hg merge --abort') def _bisectmsg(): msg = _('To mark the changeset good: hg bisect --good\n' @@ -1755,7 +1751,7 @@ """ cl_count = len(repo) revs = [] - for j in xrange(0, last + 1): + for j in pycompat.xrange(0, last + 1): linkrev = filelog.linkrev(j) if linkrev < minrev: continue @@ -1889,9 +1885,6 @@ revs = _walkrevs(repo, opts) if not revs: return [] - if allfiles and len(revs) > 1: - raise error.Abort(_("multiple revisions not supported with " - "--all-files")) wanted = set() slowpath = match.anypats() or (not match.always() and opts.get('removed')) fncache = {} @@ -1902,7 +1895,7 @@ # wanted: a cache of filenames that were changed (ctx.files()) and that # match the file filtering conditions. - if match.always(): + if match.always() or allfiles: # No files, no patterns. Display all revs. wanted = revs elif not slowpath: @@ -1966,7 +1959,7 @@ rev = repo[rev].rev() ff = _followfilter(repo) stop = min(revs[0], revs[-1]) - for x in xrange(rev, stop - 1, -1): + for x in pycompat.xrange(rev, stop - 1, -1): if ff.match(x): wanted = wanted - [x] @@ -1985,7 +1978,7 @@ stopiteration = False for windowsize in increasingwindows(): nrevs = [] - for i in xrange(windowsize): + for i in pycompat.xrange(windowsize): rev = next(it, None) if rev is None: stopiteration = True @@ -2038,7 +2031,8 @@ cca(f) names.append(f) if ui.verbose or not exact: - ui.status(_('adding %s\n') % match.rel(f)) + ui.status(_('adding %s\n') % match.rel(f), + label='addremove.added') for subpath in sorted(wctx.substate): sub = wctx.sub(subpath) @@ -2136,7 +2130,8 @@ for f in forget: if ui.verbose or not match.exact(f) or interactive: - ui.status(_('removing %s\n') % match.rel(f)) + ui.status(_('removing %s\n') % match.rel(f), + label='addremove.removed') if not dryrun: rejected = wctx.forget(forget, prefix) @@ -2269,7 +2264,8 @@ for f in list: if ui.verbose or not m.exact(f): progress.increment() - ui.status(_('removing %s\n') % m.rel(f)) + ui.status(_('removing %s\n') % m.rel(f), + label='addremove.removed') progress.complete() if not dryrun: @@ -2428,7 +2424,7 @@ if len(old.parents()) > 1: # ctx.files() isn't reliable for merges, so fall back to the # slower repo.status() method - files = set([fn for st in repo.status(base, old)[:3] + files = set([fn for st in base.status(old)[:3] for fn in st]) else: files = set(old.files()) @@ -2556,8 +2552,10 @@ obsmetadata = None if opts.get('note'): obsmetadata = {'note': encoding.fromlocal(opts['note'])} + backup = ui.configbool('ui', 'history-editing-backup') scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata, - fixphase=True, targetphase=commitphase) + fixphase=True, targetphase=commitphase, + backup=backup) # Fixing the dirstate because localrepo.commitctx does not update # it. This is rather convenient because we did not need to update
--- a/mercurial/color.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/color.py Mon Aug 20 09:48:08 2018 -0700 @@ -83,6 +83,8 @@ 'grep.filename': 'magenta', 'grep.user': 'magenta', 'grep.date': 'magenta', + 'addremove.added': 'green', + 'addremove.removed': 'red', 'bookmarks.active': 'green', 'branches.active': 'none', 'branches.closed': 'black bold', @@ -117,6 +119,7 @@ 'formatvariant.config.default': 'green', 'formatvariant.default': '', 'histedit.remaining': 'red bold', + 'ui.error': 'red', 'ui.prompt': 'yellow', 'log.changeset': 'yellow', 'patchbomb.finalsummary': '',
--- a/mercurial/commands.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/commands.py Mon Aug 20 09:48:08 2018 -0700 @@ -35,6 +35,7 @@ error, exchange, extensions, + filemerge, formatter, graphmod, hbisect, @@ -2532,6 +2533,7 @@ """ opts = pycompat.byteskwargs(opts) diff = opts.get('all') or opts.get('diff') + all_files = opts.get('all_files') if diff and opts.get('all_files'): raise error.Abort(_('--diff and --all-files are mutually exclusive')) # TODO: remove "not opts.get('rev')" if --all-files -rMULTIREV gets working @@ -2606,16 +2608,16 @@ def difflinestates(a, b): sm = difflib.SequenceMatcher(None, a, b) for tag, alo, ahi, blo, bhi in sm.get_opcodes(): - if tag == 'insert': - for i in xrange(blo, bhi): + if tag == r'insert': + for i in pycompat.xrange(blo, bhi): yield ('+', b[i]) - elif tag == 'delete': - for i in xrange(alo, ahi): + elif tag == r'delete': + for i in pycompat.xrange(alo, ahi): yield ('-', a[i]) - elif tag == 'replace': - for i in xrange(alo, ahi): + elif tag == r'replace': + for i in pycompat.xrange(alo, ahi): yield ('-', a[i]) - for i in xrange(blo, bhi): + for i in pycompat.xrange(blo, bhi): yield ('+', b[i]) def display(fm, fn, ctx, pstates, states): @@ -2623,7 +2625,7 @@ if fm.isplain(): formatuser = ui.shortuser else: - formatuser = str + formatuser = pycompat.bytestr if ui.quiet: datefmt = '%Y-%m-%d' else: @@ -2648,20 +2650,22 @@ fm.data(node=fm.hexfunc(scmutil.binnode(ctx))) cols = [ - ('filename', fn, True), - ('rev', rev, not plaingrep), - ('linenumber', l.linenum, opts.get('line_number')), + ('filename', '%s', fn, True), + ('rev', '%d', rev, not plaingrep), + ('linenumber', '%d', l.linenum, opts.get('line_number')), ] if diff: - cols.append(('change', change, True)) + cols.append(('change', '%s', change, True)) cols.extend([ - ('user', formatuser(ctx.user()), opts.get('user')), - ('date', fm.formatdate(ctx.date(), datefmt), opts.get('date')), + ('user', '%s', formatuser(ctx.user()), opts.get('user')), + ('date', '%s', fm.formatdate(ctx.date(), datefmt), + opts.get('date')), ]) - lastcol = next(name for name, data, cond in reversed(cols) if cond) - for name, data, cond in cols: + lastcol = next( + name for name, fmt, data, cond in reversed(cols) if cond) + for name, fmt, data, cond in cols: field = fieldnamemap.get(name, name) - fm.condwrite(cond, field, '%s', data, label='grep.%s' % name) + fm.condwrite(cond, field, fmt, data, label='grep.%s' % name) if cond and name != lastcol: fm.plain(sep, label='grep.sep') if not opts.get('files_with_matches'): @@ -2756,7 +2760,7 @@ if pstates or states: r = display(fm, fn, ctx, pstates, states) found = found or r - if r and not diff: + if r and not diff and not all_files: skip[fn] = True if copy: skip[copy] = True @@ -4528,6 +4532,7 @@ """ opts = pycompat.byteskwargs(opts) + confirm = ui.configbool('commands', 'resolve.confirm') flaglist = 'all mark unmark list no_status'.split() all, mark, unmark, show, nostatus = \ [opts.get(o) for o in flaglist] @@ -4540,6 +4545,20 @@ raise error.Abort(_('no files or directories specified'), hint=('use --all to re-merge all unresolved files')) + if confirm: + if all: + if ui.promptchoice(_(b're-merge all unresolved files (yn)?' + b'$$ &Yes $$ &No')): + raise error.Abort(_('user quit')) + if mark and not pats: + if ui.promptchoice(_(b'mark all unresolved files as resolved (yn)?' + b'$$ &Yes $$ &No')): + raise error.Abort(_('user quit')) + if unmark and not pats: + if ui.promptchoice(_(b'mark all resolved files as unresolved (yn)?' + b'$$ &Yes $$ &No')): + raise error.Abort(_('user quit')) + if show: ui.pager('resolve') fm = ui.formatter('resolve', opts) @@ -4594,6 +4613,12 @@ runconclude = False tocomplete = [] + hasconflictmarkers = [] + if mark: + markcheck = ui.config('commands', 'resolve.mark-check') + if markcheck not in ['warn', 'abort']: + # Treat all invalid / unrecognized values as 'none'. + markcheck = False for f in ms: if not m(f): continue @@ -4629,6 +4654,12 @@ continue if mark: + if markcheck: + with repo.wvfs(f) as fobj: + fdata = fobj.read() + if filemerge.hasconflictmarkers(fdata) and \ + ms[f] != mergemod.MERGE_RECORD_RESOLVED: + hasconflictmarkers.append(f) ms.mark(f, mergemod.MERGE_RECORD_RESOLVED) elif unmark: ms.mark(f, mergemod.MERGE_RECORD_UNRESOLVED) @@ -4663,6 +4694,13 @@ if inst.errno != errno.ENOENT: raise + if hasconflictmarkers: + ui.warn(_('warning: the following files still have conflict ' + 'markers:\n ') + '\n '.join(hasconflictmarkers) + '\n') + if markcheck == 'abort' and not all: + raise error.Abort(_('conflict markers detected'), + hint=_('use --all to mark anyway')) + for f in tocomplete: try: # resolve file
--- a/mercurial/commandserver.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/commandserver.py Mon Aug 20 09:48:08 2018 -0700 @@ -353,7 +353,7 @@ # handle exceptions that may be raised by command server. most of # known exceptions are caught by dispatch. except error.Abort as inst: - ui.warn(_('abort: %s\n') % inst) + ui.error(_('abort: %s\n') % inst) except IOError as inst: if inst.errno != errno.EPIPE: raise
--- a/mercurial/configitems.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/configitems.py Mon Aug 20 09:48:08 2018 -0700 @@ -190,6 +190,12 @@ coreconfigitem('commands', 'grep.all-files', default=False, ) +coreconfigitem('commands', 'resolve.confirm', + default=False, +) +coreconfigitem('commands', 'resolve.mark-check', + default='none', +) coreconfigitem('commands', 'show.aliasprefix', default=list, ) @@ -584,9 +590,15 @@ coreconfigitem('experimental', 'removeemptydirs', default=True, ) +coreconfigitem('experimental', 'revisions.prefixhexnode', + default=False, +) coreconfigitem('experimental', 'revlogv2', default=None, ) +coreconfigitem('experimental', 'revisions.disambiguatewithin', + default=None, +) coreconfigitem('experimental', 'single-head-per-branch', default=False, ) @@ -759,6 +771,9 @@ coreconfigitem('merge', 'preferancestor', default=lambda: ['*'], ) +coreconfigitem('merge', 'strict-capability-check', + default=False, +) coreconfigitem('merge-tools', '.*', default=None, generic=True,
--- a/mercurial/context.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/context.py Mon Aug 20 09:48:08 2018 -0700 @@ -372,6 +372,10 @@ for rfiles, sfiles in zip(r, s): rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) + narrowmatch = self._repo.narrowmatch() + if not narrowmatch.always(): + for l in r: + l[:] = list(filter(narrowmatch, l)) for l in r: l.sort() @@ -438,7 +442,6 @@ "unsupported changeid '%s' of type %s" % (changeid, type(changeid))) - # lookup failed except (error.FilteredIndexError, error.FilteredLookupError): raise error.FilteredRepoLookupError(_("filtered revision '%s'") % pycompat.bytestr(changeid)) @@ -590,12 +593,6 @@ short(n) for n in sorted(cahs) if n != anc)) return changectx(self._repo, anc) - def descendant(self, other): - msg = (b'ctx.descendant(other) is deprecated, ' - b'use ctx.isancestorof(other)') - self._repo.ui.deprecwarn(msg, b'4.7') - return self.isancestorof(other) - def isancestorof(self, other): """True if this changeset is an ancestor of other""" return self._repo.changelog.isancestorrev(self._rev, other._rev) @@ -1903,9 +1900,9 @@ # Test that each new directory to be created to write this path from p2 # is not a file in p1. components = path.split('/') - for i in xrange(len(components)): + for i in pycompat.xrange(len(components)): component = "/".join(components[0:i]) - if component in self.p1(): + if component in self.p1() and self._cache[component]['exists']: fail(path, component) # Test the other direction -- that this path from p2 isn't a directory @@ -1929,8 +1926,13 @@ flags=flags) def setflags(self, path, l, x): + flag = '' + if l: + flag = 'l' + elif x: + flag = 'x' self._markdirty(path, exists=True, date=dateutil.makedate(), - flags=(l and 'l' or '') + (x and 'x' or '')) + flags=flag) def remove(self, path): self._markdirty(path, exists=False) @@ -2037,6 +2039,13 @@ return keys def _markdirty(self, path, exists, data=None, date=None, flags=''): + # data not provided, let's see if we already have some; if not, let's + # grab it from our underlying context, so that we always have data if + # the file is marked as existing. + if exists and data is None: + oldentry = self._cache.get(path) or {} + data = oldentry.get('data') or self._wrappedctx[path].data() + self._cache[path] = { 'exists': exists, 'data': data,
--- a/mercurial/dagop.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/dagop.py Mon Aug 20 09:48:08 2018 -0700 @@ -195,7 +195,7 @@ """Build map of 'rev -> child revs', offset from startrev""" cl = repo.changelog nullrev = node.nullrev - descmap = [[] for _rev in xrange(startrev, len(cl))] + descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))] for currev in cl.revs(startrev + 1): p1rev, p2rev = cl.parentrevs(currev) if p1rev >= startrev: @@ -435,7 +435,7 @@ for idx, (parent, blocks) in enumerate(pblocks): for (a1, a2, b1, b2), _t in blocks: if a2 - a1 >= b2 - b1: - for bk in xrange(b1, b2): + for bk in pycompat.xrange(b1, b2): if child.fctxs[bk] == childfctx: ak = min(a1 + (bk - b1), a2 - 1) child.fctxs[bk] = parent.fctxs[ak] @@ -448,7 +448,7 @@ # line. for parent, blocks in remaining: for a1, a2, b1, b2 in blocks: - for bk in xrange(b1, b2): + for bk in pycompat.xrange(b1, b2): if child.fctxs[bk] == childfctx: ak = min(a1 + (bk - b1), a2 - 1) child.fctxs[bk] = parent.fctxs[ak] @@ -715,3 +715,63 @@ for g in groups: for r in g[0]: yield r + +def headrevs(revs, parentsfn): + """Resolve the set of heads from a set of revisions. + + Receives an iterable of revision numbers and a callbable that receives a + revision number and returns an iterable of parent revision numbers, possibly + including nullrev. + + Returns a set of revision numbers that are DAG heads within the passed + subset. + + ``nullrev`` is never included in the returned set, even if it is provided in + the input set. + """ + headrevs = set(revs) + + for rev in revs: + for prev in parentsfn(rev): + headrevs.discard(prev) + + headrevs.discard(node.nullrev) + + return headrevs + +def linearize(revs, parentsfn): + """Linearize and topologically sort a list of revisions. + + The linearization process tires to create long runs of revs where a child + rev comes immediately after its first parent. This is done by visiting the + heads of the revs in inverse topological order, and for each visited rev, + visiting its second parent, then its first parent, then adding the rev + itself to the output list. + + Returns a list of revision numbers. + """ + visit = list(sorted(headrevs(revs, parentsfn), reverse=True)) + finished = set() + result = [] + + while visit: + rev = visit.pop() + if rev < 0: + rev = -rev - 1 + + if rev not in finished: + result.append(rev) + finished.add(rev) + + else: + visit.append(-rev - 1) + + for prev in parentsfn(rev): + if prev == node.nullrev or prev not in revs or prev in finished: + continue + + visit.append(prev) + + assert len(result) == len(revs) + + return result
--- a/mercurial/dagparser.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/dagparser.py Mon Aug 20 09:48:08 2018 -0700 @@ -222,7 +222,7 @@ elif c == '+': c, digs = nextrun(nextch(), pycompat.bytestr(string.digits)) n = int(digs) - for i in xrange(0, n): + for i in pycompat.xrange(0, n): yield 'n', (r, [p1]) p1 = r r += 1
--- a/mercurial/dagutil.py Sun Aug 19 13:27:02 2018 +0900 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,287 +0,0 @@ -# dagutil.py - dag utilities for mercurial -# -# Copyright 2010 Benoit Boissinot <bboissin@gmail.com> -# and Peter Arrenbrecht <peter@arrenbrecht.ch> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -from .i18n import _ -from .node import nullrev - -class basedag(object): - '''generic interface for DAGs - - terms: - "ix" (short for index) identifies a nodes internally, - "id" identifies one externally. - - All params are ixs unless explicitly suffixed otherwise. - Pluralized params are lists or sets. - ''' - - def __init__(self): - self._inverse = None - - def nodeset(self): - '''set of all node ixs''' - raise NotImplementedError - - def heads(self): - '''list of head ixs''' - raise NotImplementedError - - def parents(self, ix): - '''list of parents ixs of ix''' - raise NotImplementedError - - def inverse(self): - '''inverse DAG, where parents becomes children, etc.''' - raise NotImplementedError - - def ancestorset(self, starts, stops=None): - ''' - set of all ancestors of starts (incl), but stop walk at stops (excl) - ''' - raise NotImplementedError - - def descendantset(self, starts, stops=None): - ''' - set of all descendants of starts (incl), but stop walk at stops (excl) - ''' - return self.inverse().ancestorset(starts, stops) - - def headsetofconnecteds(self, ixs): - ''' - subset of connected list of ixs so that no node has a descendant in it - - By "connected list" we mean that if an ancestor and a descendant are in - the list, then so is at least one path connecting them. - ''' - raise NotImplementedError - - def externalize(self, ix): - '''return a node id''' - return self._externalize(ix) - - def externalizeall(self, ixs): - '''return a list of (or set if given a set) of node ids''' - ids = self._externalizeall(ixs) - if isinstance(ixs, set): - return set(ids) - return list(ids) - - def internalize(self, id): - '''return a node ix''' - return self._internalize(id) - - def internalizeall(self, ids, filterunknown=False): - '''return a list of (or set if given a set) of node ixs''' - ixs = self._internalizeall(ids, filterunknown) - if isinstance(ids, set): - return set(ixs) - return list(ixs) - - -class genericdag(basedag): - '''generic implementations for DAGs''' - - def ancestorset(self, starts, stops=None): - if stops: - stops = set(stops) - else: - stops = set() - seen = set() - pending = list(starts) - while pending: - n = pending.pop() - if n not in seen and n not in stops: - seen.add(n) - pending.extend(self.parents(n)) - return seen - - def headsetofconnecteds(self, ixs): - hds = set(ixs) - if not hds: - return hds - for n in ixs: - for p in self.parents(n): - hds.discard(p) - assert hds - return hds - - -class revlogbaseddag(basedag): - '''generic dag interface to a revlog''' - - def __init__(self, revlog, nodeset): - basedag.__init__(self) - self._revlog = revlog - self._heads = None - self._nodeset = nodeset - - def nodeset(self): - return self._nodeset - - def heads(self): - if self._heads is None: - self._heads = self._getheads() - return self._heads - - def _externalize(self, ix): - return self._revlog.index[ix][7] - def _externalizeall(self, ixs): - idx = self._revlog.index - return [idx[i][7] for i in ixs] - - def _internalize(self, id): - ix = self._revlog.rev(id) - if ix == nullrev: - raise LookupError(id, self._revlog.indexfile, _('nullid')) - return ix - def _internalizeall(self, ids, filterunknown): - rl = self._revlog - if filterunknown: - return [r for r in map(rl.nodemap.get, ids) - if (r is not None - and r != nullrev - and r not in rl.filteredrevs)] - return [self._internalize(i) for i in ids] - - -class revlogdag(revlogbaseddag): - '''dag interface to a revlog''' - - def __init__(self, revlog, localsubset=None): - revlogbaseddag.__init__(self, revlog, set(revlog)) - self._heads = localsubset - - def _getheads(self): - return [r for r in self._revlog.headrevs() if r != nullrev] - - def parents(self, ix): - rlog = self._revlog - idx = rlog.index - revdata = idx[ix] - prev = revdata[5] - if prev != nullrev: - prev2 = revdata[6] - if prev2 == nullrev: - return [prev] - return [prev, prev2] - prev2 = revdata[6] - if prev2 != nullrev: - return [prev2] - return [] - - def inverse(self): - if self._inverse is None: - self._inverse = inverserevlogdag(self) - return self._inverse - - def ancestorset(self, starts, stops=None): - rlog = self._revlog - idx = rlog.index - if stops: - stops = set(stops) - else: - stops = set() - seen = set() - pending = list(starts) - while pending: - rev = pending.pop() - if rev not in seen and rev not in stops: - seen.add(rev) - revdata = idx[rev] - for i in [5, 6]: - prev = revdata[i] - if prev != nullrev: - pending.append(prev) - return seen - - def headsetofconnecteds(self, ixs): - if not ixs: - return set() - rlog = self._revlog - idx = rlog.index - headrevs = set(ixs) - for rev in ixs: - revdata = idx[rev] - for i in [5, 6]: - prev = revdata[i] - if prev != nullrev: - headrevs.discard(prev) - assert headrevs - return headrevs - - def linearize(self, ixs): - '''linearize and topologically sort a list of revisions - - The linearization process tries to create long runs of revs where - a child rev comes immediately after its first parent. This is done by - visiting the heads of the given revs in inverse topological order, - and for each visited rev, visiting its second parent, then its first - parent, then adding the rev itself to the output list. - ''' - sorted = [] - visit = list(self.headsetofconnecteds(ixs)) - visit.sort(reverse=True) - finished = set() - - while visit: - cur = visit.pop() - if cur < 0: - cur = -cur - 1 - if cur not in finished: - sorted.append(cur) - finished.add(cur) - else: - visit.append(-cur - 1) - visit += [p for p in self.parents(cur) - if p in ixs and p not in finished] - assert len(sorted) == len(ixs) - return sorted - - -class inverserevlogdag(revlogbaseddag, genericdag): - '''inverse of an existing revlog dag; see revlogdag.inverse()''' - - def __init__(self, orig): - revlogbaseddag.__init__(self, orig._revlog, orig._nodeset) - self._orig = orig - self._children = {} - self._roots = [] - self._walkfrom = len(self._revlog) - 1 - - def _walkto(self, walkto): - rev = self._walkfrom - cs = self._children - roots = self._roots - idx = self._revlog.index - while rev >= walkto: - data = idx[rev] - isroot = True - for prev in [data[5], data[6]]: # parent revs - if prev != nullrev: - cs.setdefault(prev, []).append(rev) - isroot = False - if isroot: - roots.append(rev) - rev -= 1 - self._walkfrom = rev - - def _getheads(self): - self._walkto(nullrev) - return self._roots - - def parents(self, ix): - if ix is None: - return [] - if ix <= self._walkfrom: - self._walkto(ix) - return self._children.get(ix, []) - - def inverse(self): - return self._orig
--- a/mercurial/debugcommands.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/debugcommands.py Mon Aug 20 09:48:08 2018 -0700 @@ -42,13 +42,12 @@ color, context, dagparser, - dagutil, encoding, error, exchange, extensions, filemerge, - fileset, + filesetlang, formatter, hg, httppeer, @@ -177,7 +176,8 @@ if mergeable_file: linesperrev = 2 # make a file with k lines per rev - initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)] + initialmergedlines = ['%d' % i + for i in pycompat.xrange(0, total * linesperrev)] initialmergedlines.append("") tags = [] @@ -790,9 +790,10 @@ if not opts.get('nonheads'): ui.write(("unpruned common: %s\n") % " ".join(sorted(short(n) for n in common))) - dag = dagutil.revlogdag(repo.changelog) - all = dag.ancestorset(dag.internalizeall(common)) - common = dag.externalizeall(dag.headsetofconnecteds(all)) + + clnode = repo.changelog.node + common = repo.revs('heads(::%ln)', common) + common = {clnode(r) for r in common} else: nodes = None if pushedrevs: @@ -887,15 +888,45 @@ @command('debugfileset', [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')), ('', 'all-files', False, - _('test files from all revisions and working directory'))], - _('[-r REV] [--all-files] FILESPEC')) + _('test files from all revisions and working directory')), + ('s', 'show-matcher', None, + _('print internal representation of matcher')), + ('p', 'show-stage', [], + _('print parsed tree at the given stage'), _('NAME'))], + _('[-r REV] [--all-files] [OPTION]... FILESPEC')) def debugfileset(ui, repo, expr, **opts): '''parse and apply a fileset specification''' + from . import fileset + fileset.symbols # force import of fileset so we have predicates to optimize opts = pycompat.byteskwargs(opts) ctx = scmutil.revsingle(repo, opts.get('rev'), None) - if ui.verbose: - tree = fileset.parse(expr) - ui.note(fileset.prettyformat(tree), "\n") + + stages = [ + ('parsed', pycompat.identity), + ('analyzed', filesetlang.analyze), + ('optimized', filesetlang.optimize), + ] + stagenames = set(n for n, f in stages) + + showalways = set() + if ui.verbose and not opts['show_stage']: + # show parsed tree by --verbose (deprecated) + showalways.add('parsed') + if opts['show_stage'] == ['all']: + showalways.update(stagenames) + else: + for n in opts['show_stage']: + if n not in stagenames: + raise error.Abort(_('invalid stage name: %s') % n) + showalways.update(opts['show_stage']) + + tree = filesetlang.parse(expr) + for n, f in stages: + tree = f(tree) + if n in showalways: + if opts['show_stage'] or n != 'parsed': + ui.write(("* %s:\n") % n) + ui.write(filesetlang.prettyformat(tree), "\n") files = set() if opts['all_files']: @@ -914,14 +945,15 @@ files.update(ctx.substate) m = ctx.matchfileset(expr) + if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose): + ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n') for f in sorted(files): if not m(f): continue ui.write("%s\n" % f) @command('debugformat', - [] + cmdutil.formatteropts, - _('')) + [] + cmdutil.formatteropts) def debugformat(ui, repo, **opts): """display format information about the current repository @@ -1446,6 +1478,53 @@ return held +@command('debugmanifestfulltextcache', [ + ('', 'clear', False, _('clear the cache')), + ('a', 'add', '', _('add the given manifest node to the cache'), + _('NODE')) + ], '') +def debugmanifestfulltextcache(ui, repo, add=None, **opts): + """show, clear or amend the contents of the manifest fulltext cache""" + with repo.lock(): + r = repo.manifestlog._revlog + try: + cache = r._fulltextcache + except AttributeError: + ui.warn(_( + "Current revlog implementation doesn't appear to have a " + 'manifest fulltext cache\n')) + return + + if opts.get(r'clear'): + cache.clear() + + if add: + try: + manifest = repo.manifestlog[r.lookup(add)] + except error.LookupError as e: + raise error.Abort(e, hint="Check your manifest node id") + manifest.read() # stores revisision in cache too + + if not len(cache): + ui.write(_('Cache empty')) + else: + ui.write( + _('Cache contains %d manifest entries, in order of most to ' + 'least recent:\n') % (len(cache),)) + totalsize = 0 + for nodeid in cache: + # Use cache.get to not update the LRU order + data = cache.get(nodeid) + size = len(data) + totalsize += size + 24 # 20 bytes nodeid, 4 bytes size + ui.write(_('id: %s, size %s\n') % ( + hex(nodeid), util.bytecount(size))) + ondisk = cache._opener.stat('manifestfulltextcache').st_size + ui.write( + _('Total cache data size %s, on-disk %s\n') % ( + util.bytecount(totalsize), util.bytecount(ondisk)) + ) + @command('debugmergestate', [], '') def debugmergestate(ui, repo, *args): """print merge state @@ -1971,7 +2050,7 @@ ts = 0 heads = set() - for rev in xrange(numrevs): + for rev in pycompat.xrange(numrevs): dbase = r.deltaparent(rev) if dbase == -1: dbase = rev @@ -2006,20 +2085,43 @@ if not flags: flags = ['(none)'] + ### tracks merge vs single parent nummerges = 0 + + ### tracks ways the "delta" are build + # nodelta + numempty = 0 + numemptytext = 0 + numemptydelta = 0 + # full file content numfull = 0 + # intermediate snapshot against a prior snapshot + numsemi = 0 + # snapshot count per depth + numsnapdepth = collections.defaultdict(lambda: 0) + # delta against previous revision numprev = 0 + # delta against first or second parent (not prev) nump1 = 0 nump2 = 0 + # delta against neither prev nor parents numother = 0 + # delta against prev that are also first or second parent + # (details of `numprev`) nump1prev = 0 nump2prev = 0 + + # data about delta chain of each revs chainlengths = [] chainbases = [] chainspans = [] + # data about each revision datasize = [None, 0, 0] fullsize = [None, 0, 0] + semisize = [None, 0, 0] + # snapshot count per depth + snapsizedepth = collections.defaultdict(lambda: [None, 0, 0]) deltasize = [None, 0, 0] chunktypecounts = {} chunktypesizes = {} @@ -2032,7 +2134,7 @@ l[2] += size numrevs = len(r) - for rev in xrange(numrevs): + for rev in pycompat.xrange(numrevs): p1, p2 = r.parentrevs(rev) delta = r.deltaparent(rev) if format > 0: @@ -2044,30 +2146,49 @@ chainlengths.append(0) chainbases.append(r.start(rev)) chainspans.append(size) - numfull += 1 - addsize(size, fullsize) + if size == 0: + numempty += 1 + numemptytext += 1 + else: + numfull += 1 + numsnapdepth[0] += 1 + addsize(size, fullsize) + addsize(size, snapsizedepth[0]) else: chainlengths.append(chainlengths[delta] + 1) baseaddr = chainbases[delta] revaddr = r.start(rev) chainbases.append(baseaddr) chainspans.append((revaddr - baseaddr) + size) - addsize(size, deltasize) - if delta == rev - 1: - numprev += 1 - if delta == p1: - nump1prev += 1 + if size == 0: + numempty += 1 + numemptydelta += 1 + elif r.issnapshot(rev): + addsize(size, semisize) + numsemi += 1 + depth = r.snapshotdepth(rev) + numsnapdepth[depth] += 1 + addsize(size, snapsizedepth[depth]) + else: + addsize(size, deltasize) + if delta == rev - 1: + numprev += 1 + if delta == p1: + nump1prev += 1 + elif delta == p2: + nump2prev += 1 + elif delta == p1: + nump1 += 1 elif delta == p2: - nump2prev += 1 - elif delta == p1: - nump1 += 1 - elif delta == p2: - nump2 += 1 - elif delta != nullrev: - numother += 1 + nump2 += 1 + elif delta != nullrev: + numother += 1 # Obtain data on the raw chunks in the revlog. - segment = r._getsegmentforrevs(rev, rev)[1] + if util.safehasattr(r, '_getsegmentforrevs'): + segment = r._getsegmentforrevs(rev, rev)[1] + else: + segment = r._revlog._getsegmentforrevs(rev, rev)[1] if segment: chunktype = bytes(segment[0:1]) else: @@ -2081,20 +2202,28 @@ chunktypesizes[chunktype] += size # Adjust size min value for empty cases - for size in (datasize, fullsize, deltasize): + for size in (datasize, fullsize, semisize, deltasize): if size[0] is None: size[0] = 0 - numdeltas = numrevs - numfull + numdeltas = numrevs - numfull - numempty - numsemi numoprev = numprev - nump1prev - nump2prev totalrawsize = datasize[2] datasize[2] /= numrevs fulltotal = fullsize[2] fullsize[2] /= numfull + semitotal = semisize[2] + snaptotal = {} + if 0 < numsemi: + semisize[2] /= numsemi + for depth in snapsizedepth: + snaptotal[depth] = snapsizedepth[depth][2] + snapsizedepth[depth][2] /= numsnapdepth[depth] + deltatotal = deltasize[2] - if numrevs - numfull > 0: - deltasize[2] /= numrevs - numfull - totalsize = fulltotal + deltatotal + if numdeltas > 0: + deltasize[2] /= numdeltas + totalsize = fulltotal + semitotal + deltatotal avgchainlen = sum(chainlengths) / numrevs maxchainlen = max(chainlengths) maxchainspan = max(chainspans) @@ -2126,10 +2255,22 @@ ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) ui.write(('revisions : ') + fmt2 % numrevs) - ui.write((' full : ') + fmt % pcfmt(numfull, numrevs)) + ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs)) + ui.write((' text : ') + + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)) + ui.write((' delta : ') + + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)) + ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs)) + for depth in sorted(numsnapdepth): + ui.write((' lvl-%-3d : ' % depth) + + fmt % pcfmt(numsnapdepth[depth], numrevs)) ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) ui.write(('revision size : ') + fmt2 % totalsize) - ui.write((' full : ') + fmt % pcfmt(fulltotal, totalsize)) + ui.write((' snapshot : ') + + fmt % pcfmt(fulltotal + semitotal, totalsize)) + for depth in sorted(numsnapdepth): + ui.write((' lvl-%-3d : ' % depth) + + fmt % pcfmt(snaptotal[depth], totalsize)) ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) def fmtchunktype(chunktype): @@ -2163,6 +2304,13 @@ % tuple(datasize)) ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') % tuple(fullsize)) + ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n') + % tuple(semisize)) + for depth in sorted(snapsizedepth): + if depth == 0: + continue + ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n') + % ((depth,) + tuple(snapsizedepth[depth]))) ui.write(('delta size (min/max/avg) : %d / %d / %d\n') % tuple(deltasize)) @@ -2642,7 +2790,7 @@ if line.startswith(b'#'): continue - if not line.startswith(' '): + if not line.startswith(b' '): # New block. Flush previous one. if activeaction: yield activeaction, blocklines @@ -3122,13 +3270,14 @@ # urllib.Request insists on using has_data() as a proxy for # determining the request method. Override that to use our # explicitly requested method. - req.get_method = lambda: method + req.get_method = lambda: pycompat.sysstr(method) try: res = opener.open(req) body = res.read() except util.urlerr.urlerror as e: - e.read() + # read() method must be called, but only exists in Python 2 + getattr(e, 'read', lambda: None)() continue if res.headers.get('Content-Type') == 'application/mercurial-cbor':
--- a/mercurial/diffhelper.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/diffhelper.py Mon Aug 20 09:48:08 2018 -0700 @@ -11,6 +11,7 @@ from . import ( error, + pycompat, ) def addlines(fp, hunk, lena, lenb, a, b): @@ -26,7 +27,7 @@ num = max(todoa, todob) if num == 0: break - for i in xrange(num): + for i in pycompat.xrange(num): s = fp.readline() if not s: raise error.ParseError(_('incomplete hunk')) @@ -71,7 +72,7 @@ blen = len(b) if alen > blen - bstart or bstart < 0: return False - for i in xrange(alen): + for i in pycompat.xrange(alen): if a[i][1:] != b[i + bstart]: return False return True
--- a/mercurial/dirstate.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/dirstate.py Mon Aug 20 09:48:08 2018 -0700 @@ -893,8 +893,11 @@ wadd = work.append while work: nd = work.pop() - if not match.visitdir(nd): + visitentries = match.visitchildrenset(nd) + if not visitentries: continue + if visitentries == 'this' or visitentries == 'all': + visitentries = None skip = None if nd == '.': nd = '' @@ -909,6 +912,13 @@ continue raise for f, kind, st in entries: + # If we needed to inspect any files, visitentries would have + # been 'this' or 'all', and we would have set it to None + # above. If we have visitentries populated here, we don't + # care about any files in this directory, so no need to + # check the type of `f`. + if visitentries and f not in visitentries: + continue if normalizefile: # even though f might be a directory, we're only # interested in comparing it to files currently in the
--- a/mercurial/dirstateguard.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/dirstateguard.py Mon Aug 20 09:48:08 2018 -0700 @@ -11,6 +11,7 @@ from . import ( error, + narrowspec, util, ) @@ -33,7 +34,10 @@ self._active = False self._closed = False self._backupname = 'dirstate.backup.%s.%d' % (name, id(self)) + self._narrowspecbackupname = ('narrowspec.backup.%s.%d' % + (name, id(self))) repo.dirstate.savebackup(repo.currenttransaction(), self._backupname) + narrowspec.savebackup(repo, self._narrowspecbackupname) self._active = True def __del__(self): @@ -52,10 +56,12 @@ self._repo.dirstate.clearbackup(self._repo.currenttransaction(), self._backupname) + narrowspec.clearbackup(self._repo, self._narrowspecbackupname) self._active = False self._closed = True def _abort(self): + narrowspec.restorebackup(self._repo, self._narrowspecbackupname) self._repo.dirstate.restorebackup(self._repo.currenttransaction(), self._backupname) self._active = False
--- a/mercurial/dispatch.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/dispatch.py Mon Aug 20 09:48:08 2018 -0700 @@ -212,9 +212,9 @@ try: ret = _runcatch(req) or 0 except error.ProgrammingError as inst: - req.ui.warn(_('** ProgrammingError: %s\n') % inst) + req.ui.error(_('** ProgrammingError: %s\n') % inst) if inst.hint: - req.ui.warn(_('** (%s)\n') % inst.hint) + req.ui.error(_('** (%s)\n') % inst.hint) raise except KeyboardInterrupt as inst: try: @@ -222,7 +222,7 @@ msg = _("killed!\n") else: msg = _("interrupted!\n") - req.ui.warn(msg) + req.ui.error(msg) except error.SignalInterrupt: # maybe pager would quit without consuming all the output, and # SIGPIPE was raised. we cannot print anything in this case. @@ -370,9 +370,8 @@ ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes)) commands.help_(ui, inst.args[0], full=False, command=True) else: - ui.pager('help') ui.warn(_("hg: %s\n") % inst.args[1]) - commands.help_(ui, 'shortlist') + ui.warn(_("(use 'hg help -v' for a list of global options)\n")) except error.ParseError as inst: _formatparse(ui.warn, inst) return -1 @@ -394,9 +393,8 @@ _reportsimilar(ui.warn, sim) suggested = True if not suggested: - ui.pager('help') ui.warn(nocmdmsg) - commands.help_(ui, 'shortlist') + ui.warn(_("(use 'hg help' for a list of commands)\n")) except IOError: raise except KeyboardInterrupt:
--- a/mercurial/encoding.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/encoding.py Mon Aug 20 09:48:08 2018 -0700 @@ -251,7 +251,7 @@ def getcols(s, start, c): '''Use colwidth to find a c-column substring of s starting at byte index start''' - for x in xrange(start + c, len(s)): + for x in pycompat.xrange(start + c, len(s)): t = s[start:x] if colwidth(t) == c: return t @@ -346,7 +346,7 @@ else: uslice = lambda i: u[:-i] concat = lambda s: s + ellipsis - for i in xrange(1, len(u)): + for i in pycompat.xrange(1, len(u)): usub = uslice(i) if ucolwidth(usub) <= width: return concat(usub.encode(_sysstr(encoding)))
--- a/mercurial/error.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/error.py Mon Aug 20 09:48:08 2018 -0700 @@ -58,6 +58,9 @@ def __str__(self): return RevlogError.__str__(self) +class AmbiguousPrefixLookupError(LookupError): + pass + class FilteredLookupError(LookupError): pass
--- a/mercurial/exchange.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/exchange.py Mon Aug 20 09:48:08 2018 -0700 @@ -15,6 +15,7 @@ bin, hex, nullid, + nullrev, ) from .thirdparty import ( attr, @@ -27,10 +28,12 @@ error, lock as lockmod, logexchange, + narrowspec, obsolete, phases, pushkey, pycompat, + repository, scmutil, sslutil, streamclone, @@ -44,6 +47,8 @@ urlerr = util.urlerr urlreq = util.urlreq +_NARROWACL_SECTION = 'narrowhgacl' + # Maps bundle version human names to changegroup versions. _bundlespeccgversions = {'v1': '01', 'v2': '02', @@ -1427,7 +1432,7 @@ old_heads = unficl.heads() clstart = len(unficl) _pullbundle2(pullop) - if changegroup.NARROW_REQUIREMENT in repo.requirements: + if repository.NARROW_REQUIREMENT in repo.requirements: # XXX narrow clones filter the heads on the server side during # XXX getbundle and result in partial replies as well. # XXX Disable pull bundles in this case as band aid to avoid @@ -1830,6 +1835,176 @@ pullop.repo.invalidatevolatilesets() return tr +def applynarrowacl(repo, kwargs): + """Apply narrow fetch access control. + + This massages the named arguments for getbundle wire protocol commands + so requested data is filtered through access control rules. + """ + ui = repo.ui + # TODO this assumes existence of HTTP and is a layering violation. + username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username()) + user_includes = ui.configlist( + _NARROWACL_SECTION, username + '.includes', + ui.configlist(_NARROWACL_SECTION, 'default.includes')) + user_excludes = ui.configlist( + _NARROWACL_SECTION, username + '.excludes', + ui.configlist(_NARROWACL_SECTION, 'default.excludes')) + if not user_includes: + raise error.Abort(_("{} configuration for user {} is empty") + .format(_NARROWACL_SECTION, username)) + + user_includes = [ + 'path:.' if p == '*' else 'path:' + p for p in user_includes] + user_excludes = [ + 'path:.' if p == '*' else 'path:' + p for p in user_excludes] + + req_includes = set(kwargs.get(r'includepats', [])) + req_excludes = set(kwargs.get(r'excludepats', [])) + + req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns( + req_includes, req_excludes, user_includes, user_excludes) + + if invalid_includes: + raise error.Abort( + _("The following includes are not accessible for {}: {}") + .format(username, invalid_includes)) + + new_args = {} + new_args.update(kwargs) + new_args[r'narrow'] = True + new_args[r'includepats'] = req_includes + if req_excludes: + new_args[r'excludepats'] = req_excludes + + return new_args + +def _computeellipsis(repo, common, heads, known, match, depth=None): + """Compute the shape of a narrowed DAG. + + Args: + repo: The repository we're transferring. + common: The roots of the DAG range we're transferring. + May be just [nullid], which means all ancestors of heads. + heads: The heads of the DAG range we're transferring. + match: The narrowmatcher that allows us to identify relevant changes. + depth: If not None, only consider nodes to be full nodes if they are at + most depth changesets away from one of heads. + + Returns: + A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: + + visitnodes: The list of nodes (either full or ellipsis) which + need to be sent to the client. + relevant_nodes: The set of changelog nodes which change a file inside + the narrowspec. The client needs these as non-ellipsis nodes. + ellipsisroots: A dict of {rev: parents} that is used in + narrowchangegroup to produce ellipsis nodes with the + correct parents. + """ + cl = repo.changelog + mfl = repo.manifestlog + + clrev = cl.rev + + commonrevs = {clrev(n) for n in common} | {nullrev} + headsrevs = {clrev(n) for n in heads} + + if depth: + revdepth = {h: 0 for h in headsrevs} + + ellipsisheads = collections.defaultdict(set) + ellipsisroots = collections.defaultdict(set) + + def addroot(head, curchange): + """Add a root to an ellipsis head, splitting heads with 3 roots.""" + ellipsisroots[head].add(curchange) + # Recursively split ellipsis heads with 3 roots by finding the + # roots' youngest common descendant which is an elided merge commit. + # That descendant takes 2 of the 3 roots as its own, and becomes a + # root of the head. + while len(ellipsisroots[head]) > 2: + child, roots = splithead(head) + splitroots(head, child, roots) + head = child # Recurse in case we just added a 3rd root + + def splitroots(head, child, roots): + ellipsisroots[head].difference_update(roots) + ellipsisroots[head].add(child) + ellipsisroots[child].update(roots) + ellipsisroots[child].discard(child) + + def splithead(head): + r1, r2, r3 = sorted(ellipsisroots[head]) + for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): + mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)', + nr1, head, nr2, head) + for j in mid: + if j == nr2: + return nr2, (nr1, nr2) + if j not in ellipsisroots or len(ellipsisroots[j]) < 2: + return j, (nr1, nr2) + raise error.Abort(_('Failed to split up ellipsis node! head: %d, ' + 'roots: %d %d %d') % (head, r1, r2, r3)) + + missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) + visit = reversed(missing) + relevant_nodes = set() + visitnodes = [cl.node(m) for m in missing] + required = set(headsrevs) | known + for rev in visit: + clrev = cl.changelogrevision(rev) + ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev] + if depth is not None: + curdepth = revdepth[rev] + for p in ps: + revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) + needed = False + shallow_enough = depth is None or revdepth[rev] <= depth + if shallow_enough: + curmf = mfl[clrev.manifest].read() + if ps: + # We choose to not trust the changed files list in + # changesets because it's not always correct. TODO: could + # we trust it for the non-merge case? + p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() + needed = bool(curmf.diff(p1mf, match)) + if not needed and len(ps) > 1: + # For merge changes, the list of changed files is not + # helpful, since we need to emit the merge if a file + # in the narrow spec has changed on either side of the + # merge. As a result, we do a manifest diff to check. + p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() + needed = bool(curmf.diff(p2mf, match)) + else: + # For a root node, we need to include the node if any + # files in the node match the narrowspec. + needed = any(curmf.walk(match)) + + if needed: + for head in ellipsisheads[rev]: + addroot(head, rev) + for p in ps: + required.add(p) + relevant_nodes.add(cl.node(rev)) + else: + if not ps: + ps = [nullrev] + if rev in required: + for head in ellipsisheads[rev]: + addroot(head, rev) + for p in ps: + ellipsisheads[p].add(rev) + else: + for p in ps: + ellipsisheads[p] |= ellipsisheads[rev] + + # add common changesets as roots of their reachable ellipsis heads + for c in commonrevs: + for head in ellipsisheads[c]: + addroot(head, c) + return visitnodes, relevant_nodes, ellipsisroots + def caps20to10(repo, role): """return a set with appropriate options to use bundle20 during getbundle""" caps = {'HG20'} @@ -1924,30 +2099,52 @@ def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None, b2caps=None, heads=None, common=None, **kwargs): """add a changegroup part to the requested bundle""" - cgstream = None - if kwargs.get(r'cg', True): - # build changegroup bundle here. - version = '01' - cgversions = b2caps.get('changegroup') - if cgversions: # 3.1 and 3.2 ship with an empty value - cgversions = [v for v in cgversions - if v in changegroup.supportedoutgoingversions(repo)] - if not cgversions: - raise ValueError(_('no common changegroup version')) - version = max(cgversions) - outgoing = _computeoutgoing(repo, heads, common) - if outgoing.missing: - cgstream = changegroup.makestream(repo, outgoing, version, source, - bundlecaps=bundlecaps) + if not kwargs.get(r'cg', True): + return + + version = '01' + cgversions = b2caps.get('changegroup') + if cgversions: # 3.1 and 3.2 ship with an empty value + cgversions = [v for v in cgversions + if v in changegroup.supportedoutgoingversions(repo)] + if not cgversions: + raise ValueError(_('no common changegroup version')) + version = max(cgversions) + + outgoing = _computeoutgoing(repo, heads, common) + if not outgoing.missing: + return - if cgstream: - part = bundler.newpart('changegroup', data=cgstream) - if cgversions: - part.addparam('version', version) - part.addparam('nbchanges', '%d' % len(outgoing.missing), - mandatory=False) - if 'treemanifest' in repo.requirements: - part.addparam('treemanifest', '1') + if kwargs.get(r'narrow', False): + include = sorted(filter(bool, kwargs.get(r'includepats', []))) + exclude = sorted(filter(bool, kwargs.get(r'excludepats', []))) + filematcher = narrowspec.match(repo.root, include=include, + exclude=exclude) + else: + filematcher = None + + cgstream = changegroup.makestream(repo, outgoing, version, source, + bundlecaps=bundlecaps, + filematcher=filematcher) + + part = bundler.newpart('changegroup', data=cgstream) + if cgversions: + part.addparam('version', version) + + part.addparam('nbchanges', '%d' % len(outgoing.missing), + mandatory=False) + + if 'treemanifest' in repo.requirements: + part.addparam('treemanifest', '1') + + if kwargs.get(r'narrow', False) and (include or exclude): + narrowspecpart = bundler.newpart('narrow:spec') + if include: + narrowspecpart.addparam( + 'include', '\n'.join(include), mandatory=True) + if exclude: + narrowspecpart.addparam( + 'exclude', '\n'.join(exclude), mandatory=True) @getbundle2partsgenerator('bookmarks') def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None, @@ -2069,8 +2266,13 @@ # Don't send unless: # - changeset are being exchanged, # - the client supports it. - if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps: + # - narrow bundle isn't in play (not currently compatible). + if (not kwargs.get(r'cg', True) + or 'rev-branch-cache' not in b2caps + or kwargs.get(r'narrow', False) + or repo.ui.has_section(_NARROWACL_SECTION)): return + outgoing = _computeoutgoing(repo, heads, common) bundle2.addpartrevbranchcache(repo, bundler, outgoing)
--- a/mercurial/extensions.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/extensions.py Mon Aug 20 09:48:08 2018 -0700 @@ -124,7 +124,7 @@ # note: this ui.debug happens before --debug is processed, # Use --config ui.debug=1 to see them. if ui.configbool('devel', 'debug.extensions'): - ui.debug('could not import %s (%s): trying %s\n' + ui.debug('debug.extensions: - could not import %s (%s): trying %s\n' % (failed, stringutil.forcebytestr(err), next)) if ui.debugflag: ui.traceback() @@ -166,7 +166,7 @@ _rejectunicode(t, o._table) _validatecmdtable(ui, getattr(mod, 'cmdtable', {})) -def load(ui, name, path): +def load(ui, name, path, log=lambda *a: None): if name.startswith('hgext.') or name.startswith('hgext/'): shortname = name[6:] else: @@ -175,8 +175,11 @@ return None if shortname in _extensions: return _extensions[shortname] + log(' - loading extension: %r\n', shortname) _extensions[shortname] = None - mod = _importext(name, path, bind(_reportimporterror, ui)) + with util.timedcm() as stats: + mod = _importext(name, path, bind(_reportimporterror, ui)) + log(' > %r extension loaded in %s\n', shortname, stats) # Before we do anything with the extension, check against minimum stated # compatibility. This gives extension authors a mechanism to have their @@ -187,12 +190,16 @@ ui.warn(_('(third party extension %s requires version %s or newer ' 'of Mercurial; disabling)\n') % (shortname, minver)) return + log(' - validating extension tables: %r\n', shortname) _validatetables(ui, mod) _extensions[shortname] = mod _order.append(shortname) - for fn in _aftercallbacks.get(shortname, []): - fn(loaded=True) + log(' - invoking registered callbacks: %r\n', shortname) + with util.timedcm() as stats: + for fn in _aftercallbacks.get(shortname, []): + fn(loaded=True) + log(' > callbacks completed in %s\n', stats) return mod def _runuisetup(name, ui): @@ -225,28 +232,41 @@ return True def loadall(ui, whitelist=None): + if ui.configbool('devel', 'debug.extensions'): + log = lambda msg, *values: ui.debug('debug.extensions: ', + msg % values, label='debug.extensions') + else: + log = lambda *a, **kw: None result = ui.configitems("extensions") if whitelist is not None: result = [(k, v) for (k, v) in result if k in whitelist] newindex = len(_order) - for (name, path) in result: - if path: - if path[0:1] == '!': - _disabledextensions[name] = path[1:] - continue - try: - load(ui, name, path) - except Exception as inst: - msg = stringutil.forcebytestr(inst) + log('loading %sextensions\n', 'additional ' if newindex else '') + log('- processing %d entries\n', len(result)) + with util.timedcm() as stats: + for (name, path) in result: if path: - ui.warn(_("*** failed to import extension %s from %s: %s\n") - % (name, path, msg)) - else: - ui.warn(_("*** failed to import extension %s: %s\n") - % (name, msg)) - if isinstance(inst, error.Hint) and inst.hint: - ui.warn(_("*** (%s)\n") % inst.hint) - ui.traceback() + if path[0:1] == '!': + if name not in _disabledextensions: + log(' - skipping disabled extension: %r\n', name) + _disabledextensions[name] = path[1:] + continue + try: + load(ui, name, path, log) + except Exception as inst: + msg = stringutil.forcebytestr(inst) + if path: + ui.warn(_("*** failed to import extension %s from %s: %s\n") + % (name, path, msg)) + else: + ui.warn(_("*** failed to import extension %s: %s\n") + % (name, msg)) + if isinstance(inst, error.Hint) and inst.hint: + ui.warn(_("*** (%s)\n") % inst.hint) + ui.traceback() + + log('> loaded %d extensions, total time %s\n', + len(_order) - newindex, stats) # list of (objname, loadermod, loadername) tuple: # - objname is the name of an object in extension module, # from which extra information is loaded @@ -258,29 +278,47 @@ earlyextraloaders = [ ('configtable', configitems, 'loadconfigtable'), ] + + log('- loading configtable attributes\n') _loadextra(ui, newindex, earlyextraloaders) broken = set() + log('- executing uisetup hooks\n') for name in _order[newindex:]: - if not _runuisetup(name, ui): - broken.add(name) + log(' - running uisetup for %r\n', name) + with util.timedcm() as stats: + if not _runuisetup(name, ui): + log(' - the %r extension uisetup failed\n', name) + broken.add(name) + log(' > uisetup for %r took %s\n', name, stats) + log('- executing extsetup hooks\n') for name in _order[newindex:]: if name in broken: continue - if not _runextsetup(name, ui): - broken.add(name) + log(' - running extsetup for %r\n', name) + with util.timedcm() as stats: + if not _runextsetup(name, ui): + log(' - the %r extension extsetup failed\n', name) + broken.add(name) + log(' > extsetup for %r took %s\n', name, stats) for name in broken: + log(' - disabling broken %r extension\n', name) _extensions[name] = None # Call aftercallbacks that were never met. - for shortname in _aftercallbacks: - if shortname in _extensions: - continue + log('- executing remaining aftercallbacks\n') + with util.timedcm() as stats: + for shortname in _aftercallbacks: + if shortname in _extensions: + continue - for fn in _aftercallbacks[shortname]: - fn(loaded=False) + for fn in _aftercallbacks[shortname]: + log(' - extension %r not loaded, notify callbacks\n', + shortname) + fn(loaded=False) + log('> remaining aftercallbacks completed in %s\n', stats) # loadall() is called multiple times and lingering _aftercallbacks # entries could result in double execution. See issue4646. @@ -304,6 +342,7 @@ # - loadermod is the module where loader is placed # - loadername is the name of the function, # which takes (ui, extensionname, extraobj) arguments + log('- loading extension registration objects\n') extraloaders = [ ('cmdtable', commands, 'loadcmdtable'), ('colortable', color, 'loadcolortable'), @@ -314,7 +353,10 @@ ('templatefunc', templatefuncs, 'loadfunction'), ('templatekeyword', templatekw, 'loadkeyword'), ] - _loadextra(ui, newindex, extraloaders) + with util.timedcm() as stats: + _loadextra(ui, newindex, extraloaders) + log('> extension registration object loading took %s\n', stats) + log('extension loading complete\n') def _loadextra(ui, newindex, extraloaders): for name in _order[newindex:]:
--- a/mercurial/filemerge.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/filemerge.py Mon Aug 20 09:48:08 2018 -0700 @@ -137,6 +137,14 @@ return procutil.findexe(util.expandpath(exe)) def _picktool(repo, ui, path, binary, symlink, changedelete): + strictcheck = ui.configbool('merge', 'strict-capability-check') + + def hascapability(tool, capability, strict=False): + if strict and tool in internals: + if internals[tool].capabilities.get(capability): + return True + return _toolbool(ui, tool, capability) + def supportscd(tool): return tool in internals and internals[tool].mergetype == nomerge @@ -149,9 +157,9 @@ ui.warn(_("couldn't find merge tool %s\n") % tmsg) else: # configured but non-existing tools are more silent ui.note(_("couldn't find merge tool %s\n") % tmsg) - elif symlink and not _toolbool(ui, tool, "symlink"): + elif symlink and not hascapability(tool, "symlink", strictcheck): ui.warn(_("tool %s can't handle symlinks\n") % tmsg) - elif binary and not _toolbool(ui, tool, "binary"): + elif binary and not hascapability(tool, "binary", strictcheck): ui.warn(_("tool %s can't handle binary\n") % tmsg) elif changedelete and not supportscd(tool): # the nomerge tools are the only tools that support change/delete @@ -186,9 +194,19 @@ return (hgmerge, hgmerge) # then patterns + + # whether binary capability should be checked strictly + binarycap = binary and strictcheck + for pat, tool in ui.configitems("merge-patterns"): mf = match.match(repo.root, '', [pat]) - if mf(path) and check(tool, pat, symlink, False, changedelete): + if mf(path) and check(tool, pat, symlink, binarycap, changedelete): + if binary and not hascapability(tool, "binary", strict=True): + ui.warn(_("warning: check merge-patterns configurations," + " if %r for binary file %r is unintentional\n" + "(see 'hg help merge-tools'" + " for binary files capability)\n") + % (pycompat.bytestr(tool), pycompat.bytestr(path))) toolpath = _findtool(ui, tool) return (tool, _quotetoolpath(toolpath)) @@ -208,9 +226,10 @@ if uimerge: # external tools defined in uimerge won't be able to handle # change/delete conflicts - if uimerge not in names and not changedelete: - return (uimerge, uimerge) - tools.insert(0, (None, uimerge)) # highest priority + if check(uimerge, path, symlink, binary, changedelete): + if uimerge not in names and not changedelete: + return (uimerge, uimerge) + tools.insert(0, (None, uimerge)) # highest priority tools.append((None, "hgmerge")) # the old default, if found for p, t in tools: if check(t, None, symlink, binary, changedelete): @@ -469,7 +488,7 @@ success, status = tagmerge.merge(repo, fcd, fco, fca) return success, status, False -@internaltool('dump', fullmerge) +@internaltool('dump', fullmerge, binary=True, symlink=True) def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): """ Creates three versions of the files to merge, containing the @@ -495,7 +514,7 @@ repo.wwrite(fd + ".base", fca.data(), fca.flags()) return False, 1, False -@internaltool('forcedump', mergeonly) +@internaltool('forcedump', mergeonly, binary=True, symlink=True) def _forcedump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): """ @@ -916,14 +935,17 @@ _haltmerge() # default action is 'continue', in which case we neither prompt nor halt +def hasconflictmarkers(data): + return bool(re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", data, + re.MULTILINE)) + def _check(repo, r, ui, tool, fcd, files): fd = fcd.path() unused, unused, unused, back = files if not r and (_toolbool(ui, tool, "checkconflicts") or 'conflicts' in _toollist(ui, tool, "check")): - if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(), - re.MULTILINE): + if hasconflictmarkers(fcd.data()): r = 1 checked = False @@ -967,6 +989,12 @@ internals['internal:' + name] = func internalsdoc[fullname] = func + capabilities = sorted([k for k, v in func.capabilities.items() if v]) + if capabilities: + capdesc = _("(actual capabilities: %s)") % ', '.join(capabilities) + func.__doc__ = (func.__doc__ + + pycompat.sysstr("\n\n %s" % capdesc)) + # load built-in merge tools explicitly to setup internalsdoc loadinternalmerge(None, None, internaltool)
--- a/mercurial/fileset.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/fileset.py Mon Aug 20 09:48:08 2018 -0700 @@ -13,9 +13,9 @@ from .i18n import _ from . import ( error, + filesetlang, match as matchmod, merge, - parser, pycompat, registrar, scmutil, @@ -25,126 +25,28 @@ stringutil, ) -elements = { - # token-type: binding-strength, primary, prefix, infix, suffix - "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None), - ":": (15, None, None, ("kindpat", 15), None), - "-": (5, None, ("negate", 19), ("minus", 5), None), - "not": (10, None, ("not", 10), None, None), - "!": (10, None, ("not", 10), None, None), - "and": (5, None, None, ("and", 5), None), - "&": (5, None, None, ("and", 5), None), - "or": (4, None, None, ("or", 4), None), - "|": (4, None, None, ("or", 4), None), - "+": (4, None, None, ("or", 4), None), - ",": (2, None, None, ("list", 2), None), - ")": (0, None, None, None, None), - "symbol": (0, "symbol", None, None, None), - "string": (0, "string", None, None, None), - "end": (0, None, None, None, None), -} - -keywords = {'and', 'or', 'not'} - -globchars = ".*{}[]?/\\_" +# common weight constants +_WEIGHT_CHECK_FILENAME = filesetlang.WEIGHT_CHECK_FILENAME +_WEIGHT_READ_CONTENTS = filesetlang.WEIGHT_READ_CONTENTS +_WEIGHT_STATUS = filesetlang.WEIGHT_STATUS +_WEIGHT_STATUS_THOROUGH = filesetlang.WEIGHT_STATUS_THOROUGH -def tokenize(program): - pos, l = 0, len(program) - program = pycompat.bytestr(program) - while pos < l: - c = program[pos] - if c.isspace(): # skip inter-token whitespace - pass - elif c in "(),-:|&+!": # handle simple operators - yield (c, None, pos) - elif (c in '"\'' or c == 'r' and - program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings - if c == 'r': - pos += 1 - c = program[pos] - decode = lambda x: x - else: - decode = parser.unescapestr - pos += 1 - s = pos - while pos < l: # find closing quote - d = program[pos] - if d == '\\': # skip over escaped characters - pos += 2 - continue - if d == c: - yield ('string', decode(program[s:pos]), s) - break - pos += 1 - else: - raise error.ParseError(_("unterminated string"), s) - elif c.isalnum() or c in globchars or ord(c) > 127: - # gather up a symbol/keyword - s = pos - pos += 1 - while pos < l: # find end of symbol - d = program[pos] - if not (d.isalnum() or d in globchars or ord(d) > 127): - break - pos += 1 - sym = program[s:pos] - if sym in keywords: # operator keywords - yield (sym, None, s) - else: - yield ('symbol', sym, s) - pos -= 1 - else: - raise error.ParseError(_("syntax error"), pos) - pos += 1 - yield ('end', None, pos) - -def parse(expr): - p = parser.parser(elements) - tree, pos = p.parse(tokenize(expr)) - if pos != len(expr): - raise error.ParseError(_("invalid token"), pos) - return tree - -def getsymbol(x): - if x and x[0] == 'symbol': - return x[1] - raise error.ParseError(_('not a symbol')) - -def getstring(x, err): - if x and (x[0] == 'string' or x[0] == 'symbol'): - return x[1] - raise error.ParseError(err) - -def _getkindpat(x, y, allkinds, err): - kind = getsymbol(x) - pat = getstring(y, err) - if kind not in allkinds: - raise error.ParseError(_("invalid pattern kind: %s") % kind) - return '%s:%s' % (kind, pat) - -def getpattern(x, allkinds, err): - if x and x[0] == 'kindpat': - return _getkindpat(x[1], x[2], allkinds, err) - return getstring(x, err) - -def getlist(x): - if not x: - return [] - if x[0] == 'list': - return getlist(x[1]) + [x[2]] - return [x] - -def getargs(x, min, max, err): - l = getlist(x) - if len(l) < min or len(l) > max: - raise error.ParseError(err) - return l +# helpers for processing parsed tree +getsymbol = filesetlang.getsymbol +getstring = filesetlang.getstring +_getkindpat = filesetlang.getkindpat +getpattern = filesetlang.getpattern +getargs = filesetlang.getargs def getmatch(mctx, x): if not x: raise error.ParseError(_("missing argument")) return methods[x[0]](mctx, *x[1:]) +def getmatchwithstatus(mctx, x, hint): + keys = set(getstring(hint, 'status hint must be a string').split()) + return getmatch(mctx.withstatus(keys), x) + def stringmatch(mctx, x): return mctx.matcher([x]) @@ -152,15 +54,20 @@ return stringmatch(mctx, _getkindpat(x, y, matchmod.allpatternkinds, _("pattern must be a string"))) +def patternsmatch(mctx, *xs): + allkinds = matchmod.allpatternkinds + patterns = [getpattern(x, allkinds, _("pattern must be a string")) + for x in xs] + return mctx.matcher(patterns) + def andmatch(mctx, x, y): xm = getmatch(mctx, x) - ym = getmatch(mctx, y) + ym = getmatch(mctx.narrowed(xm), y) return matchmod.intersectmatchers(xm, ym) -def ormatch(mctx, x, y): - xm = getmatch(mctx, x) - ym = getmatch(mctx, y) - return matchmod.unionmatcher([xm, ym]) +def ormatch(mctx, *xs): + ms = [getmatch(mctx, x) for x in xs] + return matchmod.unionmatcher(ms) def notmatch(mctx, x): m = getmatch(mctx, x) @@ -168,15 +75,12 @@ def minusmatch(mctx, x, y): xm = getmatch(mctx, x) - ym = getmatch(mctx, y) + ym = getmatch(mctx.narrowed(xm), y) return matchmod.differencematcher(xm, ym) -def negatematch(mctx, x): - raise error.ParseError(_("can't use negate operator in this context")) - -def listmatch(mctx, x, y): +def listmatch(mctx, *xs): raise error.ParseError(_("can't use a list in this context"), - hint=_('see hg help "filesets.x or y"')) + hint=_('see \'hg help "filesets.x or y"\'')) def func(mctx, a, b): funcname = getsymbol(a) @@ -193,14 +97,11 @@ # with: # mctx - current matchctx instance # x - argument in tree form -symbols = {} +symbols = filesetlang.symbols -# filesets using matchctx.status() -_statuscallers = set() +predicate = registrar.filesetpredicate(symbols) -predicate = registrar.filesetpredicate() - -@predicate('modified()', callstatus=True) +@predicate('modified()', callstatus=True, weight=_WEIGHT_STATUS) def modified(mctx, x): """File that is modified according to :hg:`status`. """ @@ -209,7 +110,7 @@ s = set(mctx.status().modified) return mctx.predicate(s.__contains__, predrepr='modified') -@predicate('added()', callstatus=True) +@predicate('added()', callstatus=True, weight=_WEIGHT_STATUS) def added(mctx, x): """File that is added according to :hg:`status`. """ @@ -218,7 +119,7 @@ s = set(mctx.status().added) return mctx.predicate(s.__contains__, predrepr='added') -@predicate('removed()', callstatus=True) +@predicate('removed()', callstatus=True, weight=_WEIGHT_STATUS) def removed(mctx, x): """File that is removed according to :hg:`status`. """ @@ -227,7 +128,7 @@ s = set(mctx.status().removed) return mctx.predicate(s.__contains__, predrepr='removed') -@predicate('deleted()', callstatus=True) +@predicate('deleted()', callstatus=True, weight=_WEIGHT_STATUS) def deleted(mctx, x): """Alias for ``missing()``. """ @@ -236,7 +137,7 @@ s = set(mctx.status().deleted) return mctx.predicate(s.__contains__, predrepr='deleted') -@predicate('missing()', callstatus=True) +@predicate('missing()', callstatus=True, weight=_WEIGHT_STATUS) def missing(mctx, x): """File that is missing according to :hg:`status`. """ @@ -245,7 +146,7 @@ s = set(mctx.status().deleted) return mctx.predicate(s.__contains__, predrepr='deleted') -@predicate('unknown()', callstatus=True) +@predicate('unknown()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH) def unknown(mctx, x): """File that is unknown according to :hg:`status`.""" # i18n: "unknown" is a keyword @@ -253,7 +154,7 @@ s = set(mctx.status().unknown) return mctx.predicate(s.__contains__, predrepr='unknown') -@predicate('ignored()', callstatus=True) +@predicate('ignored()', callstatus=True, weight=_WEIGHT_STATUS_THOROUGH) def ignored(mctx, x): """File that is ignored according to :hg:`status`.""" # i18n: "ignored" is a keyword @@ -261,7 +162,7 @@ s = set(mctx.status().ignored) return mctx.predicate(s.__contains__, predrepr='ignored') -@predicate('clean()', callstatus=True) +@predicate('clean()', callstatus=True, weight=_WEIGHT_STATUS) def clean(mctx, x): """File that is clean according to :hg:`status`. """ @@ -277,7 +178,7 @@ getargs(x, 0, 0, _("tracked takes no arguments")) return mctx.predicate(mctx.ctx.__contains__, predrepr='tracked') -@predicate('binary()') +@predicate('binary()', weight=_WEIGHT_READ_CONTENTS) def binary(mctx, x): """File that appears to be binary (contains NUL bytes). """ @@ -304,7 +205,7 @@ ctx = mctx.ctx return mctx.predicate(lambda f: ctx.flags(f) == 'l', predrepr='symlink') -@predicate('resolved()') +@predicate('resolved()', weight=_WEIGHT_STATUS) def resolved(mctx, x): """File that is marked resolved according to :hg:`resolve -l`. """ @@ -316,7 +217,7 @@ return mctx.predicate(lambda f: f in ms and ms[f] == 'r', predrepr='resolved') -@predicate('unresolved()') +@predicate('unresolved()', weight=_WEIGHT_STATUS) def unresolved(mctx, x): """File that is marked unresolved according to :hg:`resolve -l`. """ @@ -328,7 +229,7 @@ return mctx.predicate(lambda f: f in ms and ms[f] == 'u', predrepr='unresolved') -@predicate('hgignore()') +@predicate('hgignore()', weight=_WEIGHT_STATUS) def hgignore(mctx, x): """File that matches the active .hgignore pattern. """ @@ -336,7 +237,7 @@ getargs(x, 0, 0, _("hgignore takes no arguments")) return mctx.ctx.repo().dirstate._ignore -@predicate('portable()') +@predicate('portable()', weight=_WEIGHT_CHECK_FILENAME) def portable(mctx, x): """File that has a portable name. (This doesn't include filenames with case collisions.) @@ -346,7 +247,7 @@ return mctx.predicate(lambda f: util.checkwinfilename(f) is None, predrepr='portable') -@predicate('grep(regex)') +@predicate('grep(regex)', weight=_WEIGHT_READ_CONTENTS) def grep(mctx, x): """File contains the given regular expression. """ @@ -400,7 +301,7 @@ b = _sizetomax(expr) return lambda x: x >= a and x <= b -@predicate('size(expression)') +@predicate('size(expression)', weight=_WEIGHT_STATUS) def size(mctx, x): """File size matches the given expression. Examples: @@ -415,7 +316,7 @@ return mctx.fpredicate(lambda fctx: m(fctx.size()), predrepr=('size(%r)', expr), cache=True) -@predicate('encoding(name)') +@predicate('encoding(name)', weight=_WEIGHT_READ_CONTENTS) def encoding(mctx, x): """File can be successfully decoded with the given character encoding. May not be useful for encodings other than ASCII and @@ -437,7 +338,7 @@ return mctx.fpredicate(encp, predrepr=('encoding(%r)', enc), cache=True) -@predicate('eol(style)') +@predicate('eol(style)', weight=_WEIGHT_READ_CONTENTS) def eol(mctx, x): """File contains newlines of the given style (dos, unix, mac). Binary files are excluded, files with mixed line endings match multiple @@ -471,7 +372,7 @@ return p and p[0].path() != fctx.path() return mctx.fpredicate(copiedp, predrepr='copied', cache=True) -@predicate('revs(revs, pattern)') +@predicate('revs(revs, pattern)', weight=_WEIGHT_STATUS) def revs(mctx, x): """Evaluate set in the specified revisions. If the revset match multiple revs, this will return file matching pattern in any of the revision. @@ -486,14 +387,15 @@ matchers = [] for r in revs: ctx = repo[r] - matchers.append(getmatch(mctx.switch(ctx, _buildstatus(ctx, x)), x)) + mc = mctx.switch(ctx.p1(), ctx) + matchers.append(getmatch(mc, x)) if not matchers: return mctx.never() if len(matchers) == 1: return matchers[0] return matchmod.unionmatcher(matchers) -@predicate('status(base, rev, pattern)') +@predicate('status(base, rev, pattern)', weight=_WEIGHT_STATUS) def status(mctx, x): """Evaluate predicate using status change between ``base`` and ``rev``. Examples: @@ -513,7 +415,8 @@ if not revspec: raise error.ParseError(reverr) basectx, ctx = scmutil.revpair(repo, [baserevspec, revspec]) - return getmatch(mctx.switch(ctx, _buildstatus(ctx, x, basectx=basectx)), x) + mc = mctx.switch(basectx, ctx) + return getmatch(mc, x) @predicate('subrepo([pattern])') def subrepo(mctx, x): @@ -539,24 +442,52 @@ return mctx.predicate(sstate.__contains__, predrepr='subrepo') methods = { + 'withstatus': getmatchwithstatus, 'string': stringmatch, 'symbol': stringmatch, 'kindpat': kindpatmatch, + 'patterns': patternsmatch, 'and': andmatch, 'or': ormatch, 'minus': minusmatch, - 'negate': negatematch, 'list': listmatch, - 'group': getmatch, 'not': notmatch, 'func': func, } class matchctx(object): - def __init__(self, ctx, status=None, badfn=None): + def __init__(self, basectx, ctx, badfn=None): + self._basectx = basectx self.ctx = ctx - self._status = status self._badfn = badfn + self._match = None + self._status = None + + def narrowed(self, match): + """Create matchctx for a sub-tree narrowed by the given matcher""" + mctx = matchctx(self._basectx, self.ctx, self._badfn) + mctx._match = match + # leave wider status which we don't have to care + mctx._status = self._status + return mctx + + def switch(self, basectx, ctx): + mctx = matchctx(basectx, ctx, self._badfn) + mctx._match = self._match + return mctx + + def withstatus(self, keys): + """Create matchctx which has precomputed status specified by the keys""" + mctx = matchctx(self._basectx, self.ctx, self._badfn) + mctx._match = self._match + mctx._buildstatus(keys) + return mctx + + def _buildstatus(self, keys): + self._status = self._basectx.status(self.ctx, self._match, + listignored='ignored' in keys, + listclean='clean' in keys, + listunknown='unknown' in keys) def status(self): return self._status @@ -612,62 +543,20 @@ return matchmod.nevermatcher(repo.root, repo.getcwd(), badfn=self._badfn) - def switch(self, ctx, status=None): - return matchctx(ctx, status, self._badfn) - -# filesets using matchctx.switch() -_switchcallers = [ - 'revs', - 'status', -] - -def _intree(funcs, tree): - if isinstance(tree, tuple): - if tree[0] == 'func' and tree[1][0] == 'symbol': - if tree[1][1] in funcs: - return True - if tree[1][1] in _switchcallers: - # arguments won't be evaluated in the current context - return False - for s in tree[1:]: - if _intree(funcs, s): - return True - return False - def match(ctx, expr, badfn=None): """Create a matcher for a single fileset expression""" - tree = parse(expr) - mctx = matchctx(ctx, _buildstatus(ctx, tree), badfn=badfn) + tree = filesetlang.parse(expr) + tree = filesetlang.analyze(tree) + tree = filesetlang.optimize(tree) + mctx = matchctx(ctx.p1(), ctx, badfn=badfn) return getmatch(mctx, tree) -def _buildstatus(ctx, tree, basectx=None): - # do we need status info? - - if _intree(_statuscallers, tree): - unknown = _intree(['unknown'], tree) - ignored = _intree(['ignored'], tree) - - r = ctx.repo() - if basectx is None: - basectx = ctx.p1() - return r.status(basectx, ctx, - unknown=unknown, ignored=ignored, clean=True) - else: - return None - -def prettyformat(tree): - return parser.prettyformat(tree, ('string', 'symbol')) def loadpredicate(ui, extname, registrarobj): """Load fileset predicates from specified registrarobj """ for name, func in registrarobj._table.iteritems(): symbols[name] = func - if func._callstatus: - _statuscallers.add(name) - -# load built-in predicates explicitly to setup _statuscallers -loadpredicate(None, None, predicate) # tell hggettext to extract docstrings from these functions: i18nfunctions = symbols.values()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/filesetlang.py Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,330 @@ +# filesetlang.py - parser, tokenizer and utility for file set language +# +# Copyright 2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from .i18n import _ +from . import ( + error, + parser, + pycompat, +) + +# common weight constants for static optimization +# (see registrar.filesetpredicate for details) +WEIGHT_CHECK_FILENAME = 0.5 +WEIGHT_READ_CONTENTS = 30 +WEIGHT_STATUS = 10 +WEIGHT_STATUS_THOROUGH = 50 + +elements = { + # token-type: binding-strength, primary, prefix, infix, suffix + "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None), + ":": (15, None, None, ("kindpat", 15), None), + "-": (5, None, ("negate", 19), ("minus", 5), None), + "not": (10, None, ("not", 10), None, None), + "!": (10, None, ("not", 10), None, None), + "and": (5, None, None, ("and", 5), None), + "&": (5, None, None, ("and", 5), None), + "or": (4, None, None, ("or", 4), None), + "|": (4, None, None, ("or", 4), None), + "+": (4, None, None, ("or", 4), None), + ",": (2, None, None, ("list", 2), None), + ")": (0, None, None, None, None), + "symbol": (0, "symbol", None, None, None), + "string": (0, "string", None, None, None), + "end": (0, None, None, None, None), +} + +keywords = {'and', 'or', 'not'} + +symbols = {} + +globchars = ".*{}[]?/\\_" + +def tokenize(program): + pos, l = 0, len(program) + program = pycompat.bytestr(program) + while pos < l: + c = program[pos] + if c.isspace(): # skip inter-token whitespace + pass + elif c in "(),-:|&+!": # handle simple operators + yield (c, None, pos) + elif (c in '"\'' or c == 'r' and + program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings + if c == 'r': + pos += 1 + c = program[pos] + decode = lambda x: x + else: + decode = parser.unescapestr + pos += 1 + s = pos + while pos < l: # find closing quote + d = program[pos] + if d == '\\': # skip over escaped characters + pos += 2 + continue + if d == c: + yield ('string', decode(program[s:pos]), s) + break + pos += 1 + else: + raise error.ParseError(_("unterminated string"), s) + elif c.isalnum() or c in globchars or ord(c) > 127: + # gather up a symbol/keyword + s = pos + pos += 1 + while pos < l: # find end of symbol + d = program[pos] + if not (d.isalnum() or d in globchars or ord(d) > 127): + break + pos += 1 + sym = program[s:pos] + if sym in keywords: # operator keywords + yield (sym, None, s) + else: + yield ('symbol', sym, s) + pos -= 1 + else: + raise error.ParseError(_("syntax error"), pos) + pos += 1 + yield ('end', None, pos) + +def parse(expr): + p = parser.parser(elements) + tree, pos = p.parse(tokenize(expr)) + if pos != len(expr): + raise error.ParseError(_("invalid token"), pos) + return parser.simplifyinfixops(tree, {'list', 'or'}) + +def getsymbol(x): + if x and x[0] == 'symbol': + return x[1] + raise error.ParseError(_('not a symbol')) + +def getstring(x, err): + if x and (x[0] == 'string' or x[0] == 'symbol'): + return x[1] + raise error.ParseError(err) + +def getkindpat(x, y, allkinds, err): + kind = getsymbol(x) + pat = getstring(y, err) + if kind not in allkinds: + raise error.ParseError(_("invalid pattern kind: %s") % kind) + return '%s:%s' % (kind, pat) + +def getpattern(x, allkinds, err): + if x and x[0] == 'kindpat': + return getkindpat(x[1], x[2], allkinds, err) + return getstring(x, err) + +def getlist(x): + if not x: + return [] + if x[0] == 'list': + return list(x[1:]) + return [x] + +def getargs(x, min, max, err): + l = getlist(x) + if len(l) < min or len(l) > max: + raise error.ParseError(err) + return l + +def _analyze(x): + if x is None: + return x + + op = x[0] + if op in {'string', 'symbol'}: + return x + if op == 'kindpat': + getsymbol(x[1]) # kind must be a symbol + t = _analyze(x[2]) + return (op, x[1], t) + if op == 'group': + return _analyze(x[1]) + if op == 'negate': + raise error.ParseError(_("can't use negate operator in this context")) + if op == 'not': + t = _analyze(x[1]) + return (op, t) + if op == 'and': + ta = _analyze(x[1]) + tb = _analyze(x[2]) + return (op, ta, tb) + if op == 'minus': + return _analyze(('and', x[1], ('not', x[2]))) + if op in {'list', 'or'}: + ts = tuple(_analyze(y) for y in x[1:]) + return (op,) + ts + if op == 'func': + getsymbol(x[1]) # function name must be a symbol + ta = _analyze(x[2]) + return (op, x[1], ta) + raise error.ProgrammingError('invalid operator %r' % op) + +def _insertstatushints(x): + """Insert hint nodes where status should be calculated (first path) + + This works in bottom-up way, summing up status names and inserting hint + nodes at 'and' and 'or' as needed. Thus redundant hint nodes may be left. + + Returns (status-names, new-tree) at the given subtree, where status-names + is a sum of status names referenced in the given subtree. + """ + if x is None: + return (), x + + op = x[0] + if op in {'string', 'symbol', 'kindpat'}: + return (), x + if op == 'not': + h, t = _insertstatushints(x[1]) + return h, (op, t) + if op == 'and': + ha, ta = _insertstatushints(x[1]) + hb, tb = _insertstatushints(x[2]) + hr = ha + hb + if ha and hb: + return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr))) + return hr, (op, ta, tb) + if op == 'or': + hs, ts = zip(*(_insertstatushints(y) for y in x[1:])) + hr = sum(hs, ()) + if sum(bool(h) for h in hs) > 1: + return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr))) + return hr, (op,) + ts + if op == 'list': + hs, ts = zip(*(_insertstatushints(y) for y in x[1:])) + return sum(hs, ()), (op,) + ts + if op == 'func': + f = getsymbol(x[1]) + # don't propagate 'ha' crossing a function boundary + ha, ta = _insertstatushints(x[2]) + if getattr(symbols.get(f), '_callstatus', False): + return (f,), ('withstatus', (op, x[1], ta), ('string', f)) + return (), (op, x[1], ta) + raise error.ProgrammingError('invalid operator %r' % op) + +def _mergestatushints(x, instatus): + """Remove redundant status hint nodes (second path) + + This is the top-down path to eliminate inner hint nodes. + """ + if x is None: + return x + + op = x[0] + if op == 'withstatus': + if instatus: + # drop redundant hint node + return _mergestatushints(x[1], instatus) + t = _mergestatushints(x[1], instatus=True) + return (op, t, x[2]) + if op in {'string', 'symbol', 'kindpat'}: + return x + if op == 'not': + t = _mergestatushints(x[1], instatus) + return (op, t) + if op == 'and': + ta = _mergestatushints(x[1], instatus) + tb = _mergestatushints(x[2], instatus) + return (op, ta, tb) + if op in {'list', 'or'}: + ts = tuple(_mergestatushints(y, instatus) for y in x[1:]) + return (op,) + ts + if op == 'func': + # don't propagate 'instatus' crossing a function boundary + ta = _mergestatushints(x[2], instatus=False) + return (op, x[1], ta) + raise error.ProgrammingError('invalid operator %r' % op) + +def analyze(x): + """Transform raw parsed tree to evaluatable tree which can be fed to + optimize() or getmatch() + + All pseudo operations should be mapped to real operations or functions + defined in methods or symbols table respectively. + """ + t = _analyze(x) + _h, t = _insertstatushints(t) + return _mergestatushints(t, instatus=False) + +def _optimizeandops(op, ta, tb): + if tb is not None and tb[0] == 'not': + return ('minus', ta, tb[1]) + return (op, ta, tb) + +def _optimizeunion(xs): + # collect string patterns so they can be compiled into a single regexp + ws, ts, ss = [], [], [] + for x in xs: + w, t = _optimize(x) + if t is not None and t[0] in {'string', 'symbol', 'kindpat'}: + ss.append(t) + continue + ws.append(w) + ts.append(t) + if ss: + ws.append(WEIGHT_CHECK_FILENAME) + ts.append(('patterns',) + tuple(ss)) + return ws, ts + +def _optimize(x): + if x is None: + return 0, x + + op = x[0] + if op == 'withstatus': + w, t = _optimize(x[1]) + return w, (op, t, x[2]) + if op in {'string', 'symbol'}: + return WEIGHT_CHECK_FILENAME, x + if op == 'kindpat': + w, t = _optimize(x[2]) + return w, (op, x[1], t) + if op == 'not': + w, t = _optimize(x[1]) + return w, (op, t) + if op == 'and': + wa, ta = _optimize(x[1]) + wb, tb = _optimize(x[2]) + if wa <= wb: + return wa, _optimizeandops(op, ta, tb) + else: + return wb, _optimizeandops(op, tb, ta) + if op == 'or': + ws, ts = _optimizeunion(x[1:]) + if len(ts) == 1: + return ws[0], ts[0] # 'or' operation is fully optimized out + ts = tuple(it[1] for it in sorted(enumerate(ts), + key=lambda it: ws[it[0]])) + return max(ws), (op,) + ts + if op == 'list': + ws, ts = zip(*(_optimize(y) for y in x[1:])) + return sum(ws), (op,) + ts + if op == 'func': + f = getsymbol(x[1]) + w = getattr(symbols.get(f), '_weight', 1) + wa, ta = _optimize(x[2]) + return w + wa, (op, x[1], ta) + raise error.ProgrammingError('invalid operator %r' % op) + +def optimize(x): + """Reorder/rewrite evaluatable tree for optimization + + All pseudo operations should be transformed beforehand. + """ + _w, t = _optimize(x) + return t + +def prettyformat(tree): + return parser.prettyformat(tree, ('string', 'symbol'))
--- a/mercurial/graphmod.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/graphmod.py Mon Aug 20 09:48:08 2018 -0700 @@ -22,6 +22,7 @@ from .node import nullrev from . import ( dagop, + pycompat, smartset, util, ) @@ -426,16 +427,16 @@ # shift_interline is the line containing the non-vertical # edges between this entry and the next shift_interline = echars[:idx * 2] - for i in xrange(2 + coldiff): + for i in pycompat.xrange(2 + coldiff): shift_interline.append(' ') count = ncols - idx - 1 if coldiff == -1: - for i in xrange(count): + for i in pycompat.xrange(count): shift_interline.extend(['/', ' ']) elif coldiff == 0: shift_interline.extend(echars[(idx + 1) * 2:ncols * 2]) else: - for i in xrange(count): + for i in pycompat.xrange(count): shift_interline.extend(['\\', ' ']) # draw edges from the current node to its parents
--- a/mercurial/help/config.txt Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/help/config.txt Mon Aug 20 09:48:08 2018 -0700 @@ -438,6 +438,20 @@ ``commands`` ------------ +``resolve.confirm`` + Confirm before performing action if no filename is passed. + (default: False) + +``resolve.mark-check`` + Determines what level of checking :hg:`resolve --mark` will perform before + marking files as resolved. Valid values are ``none`, ``warn``, and + ``abort``. ``warn`` will output a warning listing the file(s) that still + have conflict markers in them, but will still mark everything resolved. + ``abort`` will output the same warning but will not mark things as resolved. + If --all is passed and this is set to ``abort``, only a warning will be + shown (an error will not be raised). + (default: ``none``) + ``status.relative`` Make paths in :hg:`status` output relative to the current directory. (default: False) @@ -1333,6 +1347,11 @@ halted, the repository is left in a normal ``unresolved`` merge state. (default: ``continue``) +``strict-capability-check`` + Whether capabilities of internal merge tools are checked strictly + or not, while examining rules to decide merge tool to be used. + (default: False) + ``merge-patterns`` ------------------
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/help/internals/linelog.txt Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,302 @@ +linelog is a storage format inspired by the "Interleaved deltas" idea. See +https://en.wikipedia.org/wiki/Interleaved_deltas for its introduction. + +0. SCCS Weave + + To understand what linelog is, first we have a quick look at a simplified + (with header removed) SCCS weave format, which is an implementation of the + "Interleaved deltas" idea. + +0.1 Basic SCCS Weave File Format + + A SCCS weave file consists of plain text lines. Each line is either a + special instruction starting with "^A" or part of the content of the real + file the weave tracks. There are 3 important operations, where REV denotes + the revision number: + + ^AI REV, marking the beginning of an insertion block introduced by REV + ^AD REV, marking the beginning of a deletion block introduced by REV + ^AE REV, marking the end of the block started by "^AI REV" or "^AD REV" + + Note on revision numbers: For any two different revision numbers, one must + be an ancestor of the other to make them comparable. This enforces linear + history. Besides, the comparison functions (">=", "<") should be efficient. + This means, if revisions are strings like git or hg, an external map is + required to convert them into integers. + + For example, to represent the following changes: + + REV 1 | REV 2 | REV 3 + ------+-------+------- + a | a | a + b | b | 2 + c | 1 | c + | 2 | + | c | + + A possible weave file looks like: + + ^AI 1 + a + ^AD 3 + b + ^AI 2 + 1 + ^AE 3 + 2 + ^AE 2 + c + ^AE 1 + + An "^AE" does not always match its nearest operation ("^AI" or "^AD"). In + the above example, "^AE 3" does not match the nearest "^AI 2" but "^AD 3". + Therefore we need some extra information for "^AE". The SCCS weave uses a + revision number. It could also be a boolean value about whether it is an + insertion or a deletion (see section 0.4). + +0.2 Checkout + + The "checkout" operation is to retrieve file content at a given revision, + say X. It's doable by going through the file line by line and: + + - If meet ^AI rev, and rev > X, find the corresponding ^AE and jump there + - If meet ^AD rev, and rev <= X, find the corresponding ^AE and jump there + - Ignore ^AE + - For normal lines, just output them + +0.3 Annotate + + The "annotate" operation is to show extra metadata like the revision number + and the original line number a line comes from. + + It's basically just a "Checkout". For the extra metadata, they can be stored + side by side with the line contents. Alternatively, we can infer the + revision number from "^AI"s. + + Some SCM tools have to calculate diffs on the fly and thus are much slower + on this operation. + +0.4 Tree Structure + + The word "interleaved" is used because "^AI" .. "^AE" and "^AD" .. "^AE" + blocks can be interleaved. + + If we consider insertions and deletions separately, they can form tree + structures, respectively. + + +--- ^AI 1 +--- ^AD 3 + | +- ^AI 2 | +- ^AD 2 + | | | | + | +- ^AE 2 | +- ^AE 2 + | | + +--- ^AE 1 +--- ^AE 3 + + More specifically, it's possible to build a tree for all insertions, where + the tree node has the structure "(rev, startline, endline)". "startline" is + the line number of "^AI" and "endline" is the line number of the matched + "^AE". The tree will have these properties: + + 1. child.rev > parent.rev + 2. child.startline > parent.startline + 3. child.endline < parent.endline + + A similar tree for all deletions can also be built with the first property + changed to: + + 1. child.rev < parent.rev + +0.5 Malformed Cases + + The following cases are considered malformed in our implementation: + + 1. Interleaved insertions, or interleaved deletions. + It can be rewritten to a non-interleaved tree structure. + + Take insertions as example, deletions are similar: + + ^AI x ^AI x + a a + ^AI x + 1 -> ^AI x + 1 + b b + ^AE x ^AE x + 1 + c ^AE x + ^AE x + 1 ^AI x + 1 + c + ^AE x + 1 + + 2. Nested insertions, where the inner one has a smaller revision number. + Or nested deletions, where the inner one has a larger revision number. + It can be rewritten to a non-nested form. + + Take insertions as example, deletions are similar: + + ^AI x + 1 ^AI x + 1 + a a + ^AI x -> ^AE x + 1 + b ^AI x + ^AE x b + c ^AE x + ^AE x + 1 ^AI x + 1 + c + ^AE x + 1 + + 3. Insertion inside deletion with a smaller revision number. + + Rewrite by duplicating the content inserted: + + ^AD x ^AD x + a a + ^AI x + 1 -> b + b c + ^AE x + 1 ^AE x + c ^AI x + 1 + ^AE x b + ^AE x + 1 + + Note: If "annotate" purely depends on "^AI" information, then the + duplication content will lose track of where "b" is originally from. + + Some of them may be valid in other implementations for special purposes. For + example, to "revive" a previously deleted block in a newer revision. + +0.6 Cases Can Be Optimized + + It's always better to get things nested. For example, the left is more + efficient than the right while they represent the same content: + + +--- ^AD 2 +- ^AD 1 + | +- ^AD 1 | LINE A + | | LINE A +- ^AE 1 + | +- ^AE 1 +- ^AD 2 + | LINE B | LINE B + +--- ^AE 2 +- ^AE 2 + + Our implementation sometimes generates the less efficient data. To always + get the optimal form, it requires extra code complexity that seems unworthy. + +0.7 Inefficiency + + The file format can be slow because: + + - Inserting a new line at position P requires rewriting all data after P. + - Finding "^AE" requires walking through the content (O(N), where N is the + number of lines between "^AI/D" and "^AE"). + +1. Linelog + + The linelog is a binary format that dedicates to speed up mercurial (or + git)'s "annotate" operation. It's designed to avoid issues mentioned in + section 0.7. + +1.1 Content Stored + + Linelog is not another storage for file contents. It only stores line + numbers and corresponding revision numbers, instead of actual line content. + This is okay for the "annotate" operation because usually the external + source is fast to checkout the content of a file at a specific revision. + + A typical SCCS weave is also fast on the "grep" operation, which needs + random accesses to line contents from different revisions of a file. This + can be slow with linelog's no-line-content design. However we could use + an extra map ((rev, line num) -> line content) to speed it up. + + Note the revision numbers in linelog should be independent from mercurial + integer revision numbers. There should be some mapping between linelog rev + and hg hash stored side by side, to make the files reusable after being + copied to another machine. + +1.2 Basic Format + + A linelog file consists of "instruction"s. An "instruction" can be either: + + - JGE REV ADDR # jump to ADDR if rev >= REV + - JL REV ADDR # jump to ADDR if rev < REV + - LINE REV LINENUM # append the (LINENUM+1)-th line in revision REV + + For example, here is the example linelog representing the same file with + 3 revisions mentioned in section 0.1: + + SCCS | Linelog + Weave | Addr : Instruction + ------+------+------------- + ^AI 1 | 0 : JL 1 8 + a | 1 : LINE 1 0 + ^AD 3 | 2 : JGE 3 6 + b | 3 : LINE 1 1 + ^AI 2 | 4 : JL 2 7 + 1 | 5 : LINE 2 2 + ^AE 3 | + 2 | 6 : LINE 2 3 + ^AE 2 | + c | 7 : LINE 1 2 + ^AE 1 | + | 8 : END + + This way, "find ^AE" is O(1) because we just jump there. And we can insert + new lines without rewriting most part of the file by appending new lines and + changing a single instruction to jump to them. + + The current implementation uses 64 bits for an instruction: The opcode (JGE, + JL or LINE) takes 2 bits, REV takes 30 bits and ADDR or LINENUM takes 32 + bits. It also stores the max revision number and buffer size at the first + 64 bits for quick access to these values. + +1.3 Comparing with Mercurial's revlog format + + Apparently, linelog is very different from revlog: linelog stores rev and + line numbers, while revlog has line contents and other metadata (like + parents, flags). However, the revlog format could also be used to store rev + and line numbers. For example, to speed up the annotate operation, we could + also pre-calculate annotate results and just store them using the revlog + format. + + Therefore, linelog is actually somehow similar to revlog, with the important + trade-off that it only supports linear history (mentioned in section 0.1). + Essentially, the differences are: + + a) Linelog is full of deltas, while revlog could contain full file + contents sometimes. So linelog is smaller. Revlog could trade + reconstruction speed for file size - best case, revlog is as small as + linelog. + b) The interleaved delta structure allows skipping large portion of + uninteresting deltas so linelog's content reconstruction is faster than + the delta-only version of revlog (however it's possible to construct + a case where interleaved deltas degrade to plain deltas, so linelog + worst case would be delta-only revlog). Revlog could trade file size + for reconstruction speed. + c) Linelog implicitly maintains the order of all lines it stores. So it + could dump all the lines from all revisions, with a reasonable order. + While revlog could also dump all line additions, it requires extra + computation to figure out the order putting those lines - that's some + kind of "merge". + + "c" makes "hg absorb" easier to implement and makes it possible to do + "annotate --deleted". + +1.4 Malformed Cases Handling + + The following "case 1", "case 2", and "case 3" refer to cases mentioned + in section 0.5. + + Using the exposed API (replacelines), case 1 is impossible to generate, + although it's possible to generate it by constructing rawdata and load that + via linelog.fromdata. + + Doing annotate(maxrev) before replacelines (aka. a1, a2 passed to + replacelines are related to the latest revision) eliminates the possibility + of case 3. That makes sense since usually you'd like to make edits on top of + the latest revision. Practically, both absorb and fastannotate do this. + + Doing annotate(maxrev), plus replacelines(rev, ...) where rev >= maxrev + eliminates the possibility of case 2. That makes sense since usually the + edits belong to "new revisions", not "old revisions". Practically, + fastannotate does this. Absorb calls replacelines with rev < maxrev to edit + past revisions. So it needs some extra care to not generate case 2. + + If case 1 occurs, that probably means linelog file corruption (assuming + linelog is edited via public APIs) the checkout or annotate result could + be less meaningful or even error out, but linelog wouldn't enter an infinite + loop. + + If either case 2 or 3 occurs, linelog works as if the inner "^AI/D" and "^AE" + operations on the left side are silently ignored.
--- a/mercurial/help/merge-tools.txt Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/help/merge-tools.txt Mon Aug 20 09:48:08 2018 -0700 @@ -36,8 +36,9 @@ .. internaltoolsmarker -Internal tools are always available and do not require a GUI but will by default -not handle symlinks or binary files. +Internal tools are always available and do not require a GUI but will +by default not handle symlinks or binary files. See next section for +detail about "actual capabilities" described above. Choosing a merge tool ===================== @@ -54,8 +55,7 @@ 3. If the filename of the file to be merged matches any of the patterns in the merge-patterns configuration section, the first usable merge tool - corresponding to a matching pattern is used. Here, binary capabilities of the - merge tool are not considered. + corresponding to a matching pattern is used. 4. If ui.merge is set it will be considered next. If the value is not the name of a configured tool, the specified value is used and must be executable by @@ -72,6 +72,23 @@ 8. Otherwise, ``:prompt`` is used. +For historical reason, Mercurial assumes capabilities of internal +merge tools as below while examining rules above, regardless of actual +capabilities of them. + +==== =============== ====== ======= +step specified via binary symlink +==== =============== ====== ======= +1. --tool o o +2. HGMERGE o o +3. merge-patterns o (*) x (*) +4. ui.merge x (*) x (*) +==== =============== ====== ======= + +If ``merge.strict-capability-check`` configuration is true, Mercurial +checks capabilities of internal merge tools strictly in (*) cases +above. It is false by default for backward compatibility. + .. note:: After selecting a merge program, Mercurial will by default attempt
--- a/mercurial/hg.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/hg.py Mon Aug 20 09:48:08 2018 -0700 @@ -9,6 +9,7 @@ from __future__ import absolute_import import errno +import functools import hashlib import os import shutil @@ -162,9 +163,16 @@ """return a repository object for the specified path""" obj = _peerlookup(path).instance(ui, path, create, intents=intents) ui = getattr(obj, "ui", ui) + if ui.configbool('devel', 'debug.extensions'): + log = functools.partial( + ui.debug, 'debug.extensions: ', label='debug.extensions') + else: + log = lambda *a, **kw: None for f in presetupfuncs or []: f(ui, obj) + log('- executing reposetup hooks\n') for name, module in extensions.extensions(ui): + log(' - running reposetup for %s\n' % (name,)) hook = getattr(module, 'reposetup', None) if hook: hook(ui, obj) @@ -258,7 +266,7 @@ raise error.Abort(_('destination already exists')) if not destwvfs.isdir(): - destwvfs.mkdir() + destwvfs.makedirs() destvfs.makedir() requirements = '' @@ -626,7 +634,7 @@ srcrepo.hook('preoutgoing', throw=True, source='clone') hgdir = os.path.realpath(os.path.join(dest, ".hg")) if not os.path.exists(dest): - os.mkdir(dest) + util.makedirs(dest) else: # only clean up directories we create ourselves cleandir = hgdir
--- a/mercurial/hgweb/hgweb_mod.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/hgweb/hgweb_mod.py Mon Aug 20 09:48:08 2018 -0700 @@ -140,11 +140,6 @@ if not staticurl.endswith('/'): staticurl += '/' - # some functions for the templater - - def motd(**map): - yield self.config('web', 'motd') - # figure out which style to use vars = {} @@ -177,12 +172,16 @@ 'urlbase': req.advertisedbaseurl, 'repo': self.reponame, 'encoding': encoding.encoding, - 'motd': motd, 'sessionvars': sessionvars, 'pathdef': makebreadcrumb(req.apppath), 'style': style, 'nonce': self.nonce, } + templatekeyword = registrar.templatekeyword(defaults) + @templatekeyword('motd', requires=()) + def motd(context, mapping): + yield self.config('web', 'motd') + tres = formatter.templateresources(self.repo.ui, self.repo) tmpl = templater.templater.frommapfile(mapfile, filters=filters,
--- a/mercurial/hgweb/hgwebdir_mod.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/hgweb/hgwebdir_mod.py Mon Aug 20 09:48:08 2018 -0700 @@ -33,6 +33,7 @@ hg, profiling, pycompat, + registrar, scmutil, templater, templateutil, @@ -495,12 +496,6 @@ def templater(self, req, nonce): - def motd(**map): - if self.motd is not None: - yield self.motd - else: - yield config('web', 'motd') - def config(section, name, default=uimod._unset, untrusted=True): return self.ui.config(section, name, default, untrusted) @@ -520,7 +515,6 @@ defaults = { "encoding": encoding.encoding, - "motd": motd, "url": req.apppath + '/', "logourl": logourl, "logoimg": logoimg, @@ -529,5 +523,13 @@ "style": style, "nonce": nonce, } + templatekeyword = registrar.templatekeyword(defaults) + @templatekeyword('motd', requires=()) + def motd(context, mapping): + if self.motd is not None: + yield self.motd + else: + yield config('web', 'motd') + tmpl = templater.templater.frommapfile(mapfile, defaults=defaults) return tmpl
--- a/mercurial/hgweb/webcommands.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/hgweb/webcommands.py Mon Aug 20 09:48:08 2018 -0700 @@ -215,7 +215,7 @@ def revgen(): cl = web.repo.changelog - for i in xrange(len(web.repo) - 1, 0, -100): + for i in pycompat.xrange(len(web.repo) - 1, 0, -100): l = [] for j in cl.revs(max(0, i - 99), i): ctx = web.repo[j]
--- a/mercurial/hgweb/webutil.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/hgweb/webutil.py Mon Aug 20 09:48:08 2018 -0700 @@ -408,6 +408,12 @@ whyunstable._requires = {'repo', 'ctx'} +# helper to mark a function as a new-style template keyword; can be removed +# once old-style function gets unsupported and new-style becomes the default +def _kwfunc(f): + f._requires = () + return f + def commonentry(repo, ctx): node = ctx.node() return { @@ -432,8 +438,8 @@ 'branches': nodebranchdict(repo, ctx), 'tags': nodetagsdict(repo, node), 'bookmarks': nodebookmarksdict(repo, node), - 'parent': lambda **x: parents(ctx), - 'child': lambda **x: children(ctx), + 'parent': _kwfunc(lambda context, mapping: parents(ctx)), + 'child': _kwfunc(lambda context, mapping: children(ctx)), } def changelistentry(web, ctx): @@ -450,9 +456,9 @@ entry = commonentry(repo, ctx) entry.update( - allparents=lambda **x: parents(ctx), - parent=lambda **x: parents(ctx, rev - 1), - child=lambda **x: children(ctx, rev + 1), + allparents=_kwfunc(lambda context, mapping: parents(ctx)), + parent=_kwfunc(lambda context, mapping: parents(ctx, rev - 1)), + child=_kwfunc(lambda context, mapping: children(ctx, rev + 1)), changelogtag=showtags, files=files, ) @@ -521,7 +527,7 @@ changesetbranch=showbranch, files=templateutil.mappedgenerator(_listfilesgen, args=(ctx, web.stripecount)), - diffsummary=lambda **x: diffsummary(diffstatsgen), + diffsummary=_kwfunc(lambda context, mapping: diffsummary(diffstatsgen)), diffstat=diffstats, archives=web.archivelist(ctx.hex()), **pycompat.strkwargs(commonentry(web.repo, ctx))) @@ -613,21 +619,21 @@ len1 = lhi - llo len2 = rhi - rlo count = min(len1, len2) - for i in xrange(count): + for i in pycompat.xrange(count): yield _compline(type=type, leftlineno=llo + i + 1, leftline=leftlines[llo + i], rightlineno=rlo + i + 1, rightline=rightlines[rlo + i]) if len1 > len2: - for i in xrange(llo + count, lhi): + for i in pycompat.xrange(llo + count, lhi): yield _compline(type=type, leftlineno=i + 1, leftline=leftlines[i], rightlineno=None, rightline=None) elif len2 > len1: - for i in xrange(rlo + count, rhi): + for i in pycompat.xrange(rlo + count, rhi): yield _compline(type=type, leftlineno=None, leftline=None,
--- a/mercurial/httppeer.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/httppeer.py Mon Aug 20 09:48:08 2018 -0700 @@ -64,7 +64,7 @@ result = [] n = 0 - for i in xrange(0, len(value), valuelen): + for i in pycompat.xrange(0, len(value), valuelen): n += 1 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/linelog.py Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,436 @@ +# linelog - efficient cache for annotate data +# +# Copyright 2018 Google LLC. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +"""linelog is an efficient cache for annotate data inspired by SCCS Weaves. + +SCCS Weaves are an implementation of +https://en.wikipedia.org/wiki/Interleaved_deltas. See +mercurial/help/internals/linelog.txt for an exploration of SCCS weaves +and how linelog works in detail. + +Here's a hacker's summary: a linelog is a program which is executed in +the context of a revision. Executing the program emits information +about lines, including the revision that introduced them and the line +number in the file at the introducing revision. When an insertion or +deletion is performed on the file, a jump instruction is used to patch +in a new body of annotate information. +""" +from __future__ import absolute_import, print_function + +import abc +import struct + +from .thirdparty import ( + attr, +) +from . import ( + pycompat, +) + +_llentry = struct.Struct('>II') + +class LineLogError(Exception): + """Error raised when something bad happens internally in linelog.""" + +@attr.s +class lineinfo(object): + # Introducing revision of this line. + rev = attr.ib() + # Line number for this line in its introducing revision. + linenum = attr.ib() + # Private. Offset in the linelog program of this line. Used internally. + _offset = attr.ib() + +@attr.s +class annotateresult(object): + rev = attr.ib() + lines = attr.ib() + _eof = attr.ib() + + def __iter__(self): + return iter(self.lines) + +class _llinstruction(object): + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def __init__(self, op1, op2): + pass + + @abc.abstractmethod + def __str__(self): + pass + + def __repr__(self): + return str(self) + + @abc.abstractmethod + def __eq__(self, other): + pass + + @abc.abstractmethod + def encode(self): + """Encode this instruction to the binary linelog format.""" + + @abc.abstractmethod + def execute(self, rev, pc, emit): + """Execute this instruction. + + Args: + rev: The revision we're annotating. + pc: The current offset in the linelog program. + emit: A function that accepts a single lineinfo object. + + Returns: + The new value of pc. Returns None if exeuction should stop + (that is, we've found the end of the file.) + """ + +class _jge(_llinstruction): + """If the current rev is greater than or equal to op1, jump to op2.""" + + def __init__(self, op1, op2): + self._cmprev = op1 + self._target = op2 + + def __str__(self): + return r'JGE %d %d' % (self._cmprev, self._target) + + def __eq__(self, other): + return (type(self) == type(other) + and self._cmprev == other._cmprev + and self._target == other._target) + + def encode(self): + return _llentry.pack(self._cmprev << 2, self._target) + + def execute(self, rev, pc, emit): + if rev >= self._cmprev: + return self._target + return pc + 1 + +class _jump(_llinstruction): + """Unconditional jumps are expressed as a JGE with op1 set to 0.""" + + def __init__(self, op1, op2): + if op1 != 0: + raise LineLogError("malformed JUMP, op1 must be 0, got %d" % op1) + self._target = op2 + + def __str__(self): + return r'JUMP %d' % (self._target) + + def __eq__(self, other): + return (type(self) == type(other) + and self._target == other._target) + + def encode(self): + return _llentry.pack(0, self._target) + + def execute(self, rev, pc, emit): + return self._target + +class _eof(_llinstruction): + """EOF is expressed as a JGE that always jumps to 0.""" + + def __init__(self, op1, op2): + if op1 != 0: + raise LineLogError("malformed EOF, op1 must be 0, got %d" % op1) + if op2 != 0: + raise LineLogError("malformed EOF, op2 must be 0, got %d" % op2) + + def __str__(self): + return r'EOF' + + def __eq__(self, other): + return type(self) == type(other) + + def encode(self): + return _llentry.pack(0, 0) + + def execute(self, rev, pc, emit): + return None + +class _jl(_llinstruction): + """If the current rev is less than op1, jump to op2.""" + + def __init__(self, op1, op2): + self._cmprev = op1 + self._target = op2 + + def __str__(self): + return r'JL %d %d' % (self._cmprev, self._target) + + def __eq__(self, other): + return (type(self) == type(other) + and self._cmprev == other._cmprev + and self._target == other._target) + + def encode(self): + return _llentry.pack(1 | (self._cmprev << 2), self._target) + + def execute(self, rev, pc, emit): + if rev < self._cmprev: + return self._target + return pc + 1 + +class _line(_llinstruction): + """Emit a line.""" + + def __init__(self, op1, op2): + # This line was introduced by this revision number. + self._rev = op1 + # This line had the specified line number in the introducing revision. + self._origlineno = op2 + + def __str__(self): + return r'LINE %d %d' % (self._rev, self._origlineno) + + def __eq__(self, other): + return (type(self) == type(other) + and self._rev == other._rev + and self._origlineno == other._origlineno) + + def encode(self): + return _llentry.pack(2 | (self._rev << 2), self._origlineno) + + def execute(self, rev, pc, emit): + emit(lineinfo(self._rev, self._origlineno, pc)) + return pc + 1 + +def _decodeone(data, offset): + """Decode a single linelog instruction from an offset in a buffer.""" + try: + op1, op2 = _llentry.unpack_from(data, offset) + except struct.error as e: + raise LineLogError('reading an instruction failed: %r' % e) + opcode = op1 & 0b11 + op1 = op1 >> 2 + if opcode == 0: + if op1 == 0: + if op2 == 0: + return _eof(op1, op2) + return _jump(op1, op2) + return _jge(op1, op2) + elif opcode == 1: + return _jl(op1, op2) + elif opcode == 2: + return _line(op1, op2) + raise NotImplementedError('Unimplemented opcode %r' % opcode) + +class linelog(object): + """Efficient cache for per-line history information.""" + + def __init__(self, program=None, maxrev=0): + if program is None: + # We pad the program with an extra leading EOF so that our + # offsets will match the C code exactly. This means we can + # interoperate with the C code. + program = [_eof(0, 0), _eof(0, 0)] + self._program = program + self._lastannotate = None + self._maxrev = maxrev + + def __eq__(self, other): + return (type(self) == type(other) + and self._program == other._program + and self._maxrev == other._maxrev) + + def __repr__(self): + return '<linelog at %s: maxrev=%d size=%d>' % ( + hex(id(self)), self._maxrev, len(self._program)) + + def debugstr(self): + fmt = r'%%%dd %%s' % len(str(len(self._program))) + return pycompat.sysstr('\n').join( + fmt % (idx, i) for idx, i in enumerate(self._program[1:], 1)) + + @classmethod + def fromdata(cls, buf): + if len(buf) % _llentry.size != 0: + raise LineLogError( + "invalid linelog buffer size %d (must be a multiple of %d)" % ( + len(buf), _llentry.size)) + expected = len(buf) / _llentry.size + fakejge = _decodeone(buf, 0) + if isinstance(fakejge, _jump): + maxrev = 0 + else: + maxrev = fakejge._cmprev + numentries = fakejge._target + if expected != numentries: + raise LineLogError("corrupt linelog data: claimed" + " %d entries but given data for %d entries" % ( + expected, numentries)) + instructions = [_eof(0, 0)] + for offset in pycompat.xrange(1, numentries): + instructions.append(_decodeone(buf, offset * _llentry.size)) + return cls(instructions, maxrev=maxrev) + + def encode(self): + hdr = _jge(self._maxrev, len(self._program)).encode() + return hdr + ''.join(i.encode() for i in self._program[1:]) + + def clear(self): + self._program = [] + self._maxrev = 0 + self._lastannotate = None + + def replacelines_vec(self, rev, a1, a2, blines): + return self.replacelines(rev, a1, a2, 0, len(blines), + _internal_blines=blines) + + def replacelines(self, rev, a1, a2, b1, b2, _internal_blines=None): + """Replace lines [a1, a2) with lines [b1, b2).""" + if self._lastannotate: + # TODO(augie): make replacelines() accept a revision at + # which we're editing as well as a revision to mark + # responsible for the edits. In hg-experimental it's + # stateful like this, so we're doing the same thing to + # retain compatibility with absorb until that's imported. + ar = self._lastannotate + else: + ar = self.annotate(rev) + # ar = self.annotate(self._maxrev) + if a1 > len(ar.lines): + raise LineLogError( + '%d contains %d lines, tried to access line %d' % ( + rev, len(ar.lines), a1)) + elif a1 == len(ar.lines): + # Simulated EOF instruction since we're at EOF, which + # doesn't have a "real" line. + a1inst = _eof(0, 0) + a1info = lineinfo(0, 0, ar._eof) + else: + a1info = ar.lines[a1] + a1inst = self._program[a1info._offset] + programlen = self._program.__len__ + oldproglen = programlen() + appendinst = self._program.append + + # insert + blineinfos = [] + bappend = blineinfos.append + if b1 < b2: + # Determine the jump target for the JGE at the start of + # the new block. + tgt = oldproglen + (b2 - b1 + 1) + # Jump to skip the insert if we're at an older revision. + appendinst(_jl(rev, tgt)) + for linenum in pycompat.xrange(b1, b2): + if _internal_blines is None: + bappend(lineinfo(rev, linenum, programlen())) + appendinst(_line(rev, linenum)) + else: + newrev, newlinenum = _internal_blines[linenum] + bappend(lineinfo(newrev, newlinenum, programlen())) + appendinst(_line(newrev, newlinenum)) + # delete + if a1 < a2: + if a2 > len(ar.lines): + raise LineLogError( + '%d contains %d lines, tried to access line %d' % ( + rev, len(ar.lines), a2)) + elif a2 == len(ar.lines): + endaddr = ar._eof + else: + endaddr = ar.lines[a2]._offset + if a2 > 0 and rev < self._maxrev: + # If we're here, we're deleting a chunk of an old + # commit, so we need to be careful and not touch + # invisible lines between a2-1 and a2 (IOW, lines that + # are added later). + endaddr = ar.lines[a2 - 1]._offset + 1 + appendinst(_jge(rev, endaddr)) + # copy instruction from a1 + a1instpc = programlen() + appendinst(a1inst) + # if a1inst isn't a jump or EOF, then we need to add an unconditional + # jump back into the program here. + if not isinstance(a1inst, (_jump, _eof)): + appendinst(_jump(0, a1info._offset + 1)) + # Patch instruction at a1, which makes our patch live. + self._program[a1info._offset] = _jump(0, oldproglen) + + # Update self._lastannotate in place. This serves as a cache to avoid + # expensive "self.annotate" in this function, when "replacelines" is + # used continuously. + if len(self._lastannotate.lines) > a1: + self._lastannotate.lines[a1]._offset = a1instpc + else: + assert isinstance(a1inst, _eof) + self._lastannotate._eof = a1instpc + self._lastannotate.lines[a1:a2] = blineinfos + self._lastannotate.rev = max(self._lastannotate.rev, rev) + + if rev > self._maxrev: + self._maxrev = rev + + def annotate(self, rev): + pc = 1 + lines = [] + executed = 0 + # Sanity check: if instructions executed exceeds len(program), we + # hit an infinite loop in the linelog program somehow and we + # should stop. + while pc is not None and executed < len(self._program): + inst = self._program[pc] + lastpc = pc + pc = inst.execute(rev, pc, lines.append) + executed += 1 + if pc is not None: + raise LineLogError( + r'Probably hit an infinite loop in linelog. Program:\n' + + self.debugstr()) + ar = annotateresult(rev, lines, lastpc) + self._lastannotate = ar + return ar + + @property + def maxrev(self): + return self._maxrev + + # Stateful methods which depend on the value of the last + # annotation run. This API is for compatiblity with the original + # linelog, and we should probably consider refactoring it. + @property + def annotateresult(self): + """Return the last annotation result. C linelog code exposed this.""" + return [(l.rev, l.linenum) for l in self._lastannotate.lines] + + def getoffset(self, line): + return self._lastannotate.lines[line]._offset + + def getalllines(self, start=0, end=0): + """Get all lines that ever occurred in [start, end). + + Passing start == end == 0 means "all lines ever". + + This works in terms of *internal* program offsets, not line numbers. + """ + pc = start or 1 + lines = [] + # only take as many steps as there are instructions in the + # program - if we don't find an EOF or our stop-line before + # then, something is badly broken. + for step in pycompat.xrange(len(self._program)): + inst = self._program[pc] + nextpc = pc + 1 + if isinstance(inst, _jump): + nextpc = inst._target + elif isinstance(inst, _eof): + return lines + elif isinstance(inst, (_jl, _jge)): + pass + elif isinstance(inst, _line): + lines.append((inst._rev, inst._origlineno)) + else: + raise LineLogError("Illegal instruction %r" % inst) + if nextpc == end: + return lines + pc = nextpc + raise LineLogError("Failed to perform getalllines")
--- a/mercurial/localrepo.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/localrepo.py Mon Aug 20 09:48:08 2018 -0700 @@ -495,6 +495,11 @@ ' dummy changelog to prevent using the old repo layout' ) else: + try: + self.vfs.stat() + except OSError as inst: + if inst.errno != errno.ENOENT: + raise raise error.RepoError(_("repository %s not found") % path) elif create: raise error.RepoError(_("repository %s already exists") % path) @@ -811,7 +816,7 @@ " working parent %s!\n") % short(node)) return nullid - @repofilecache(narrowspec.FILENAME) + @storecache(narrowspec.FILENAME) def narrowpats(self): """matcher patterns for this repository's narrowspec @@ -823,9 +828,9 @@ source = hg.sharedreposource(self) return narrowspec.load(source) - @repofilecache(narrowspec.FILENAME) + @storecache(narrowspec.FILENAME) def _narrowmatch(self): - if changegroup.NARROW_REQUIREMENT not in self.requirements: + if repository.NARROW_REQUIREMENT not in self.requirements: return matchmod.always(self.root, '') include, exclude = self.narrowpats return narrowspec.match(self.root, include=include, exclude=exclude) @@ -850,7 +855,7 @@ if isinstance(changeid, slice): # wdirrev isn't contiguous so the slice shouldn't include it return [context.changectx(self, i) - for i in xrange(*changeid.indices(len(self))) + for i in pycompat.xrange(*changeid.indices(len(self))) if i not in self.changelog.filteredrevs] try: return context.changectx(self, changeid) @@ -860,7 +865,8 @@ def __contains__(self, changeid): """True if the given changeid exists - error.LookupError is raised if an ambiguous node specified. + error.AmbiguousPrefixLookupError is raised if an ambiguous node + specified. """ try: self[changeid] @@ -1372,6 +1378,7 @@ else: # discard all changes (including ones already written # out) in this transaction + narrowspec.restorebackup(self, 'journal.narrowspec') repo.dirstate.restorebackup(None, 'journal.dirstate') repo.invalidate(clearfilecache=True) @@ -1385,7 +1392,7 @@ releasefn=releasefn, checkambigfiles=_cachedfiles, name=desc) - tr.changes['revs'] = xrange(0, 0) + tr.changes['revs'] = pycompat.xrange(0, 0) tr.changes['obsmarkers'] = set() tr.changes['phases'] = {} tr.changes['bookmarks'] = {} @@ -1460,6 +1467,7 @@ @unfilteredmethod def _writejournal(self, desc): self.dirstate.savebackup(None, 'journal.dirstate') + narrowspec.savebackup(self, 'journal.narrowspec') self.vfs.write("journal.branch", encoding.fromlocal(self.dirstate.branch())) self.vfs.write("journal.desc", @@ -1547,6 +1555,7 @@ # prevent dirstateguard from overwriting already restored one dsguard.close() + narrowspec.restorebackup(self, 'undo.narrowspec') self.dirstate.restorebackup(None, 'undo.dirstate') try: branch = self.vfs.read('undo.branch') @@ -1612,6 +1621,10 @@ rbc.branchinfo(r) rbc.write() + # ensure the working copy parents are in the manifestfulltextcache + for ctx in self['.'].parents(): + ctx.manifest() # accessing the manifest is enough + def invalidatecaches(self): if '_tagscache' in vars(self): @@ -2026,6 +2039,11 @@ def commitctx(self, ctx, error=False): """Add a new revision to current repository. Revision information is passed via the context argument. + + ctx.files() should list all files involved in this commit, i.e. + modified/added/removed files. On merge, it may be wider than the + ctx.files() to be committed, since any file nodes derived directly + from p1 or p2 are excluded from the committed ctx.files(). """ tr = None @@ -2039,6 +2057,7 @@ if ctx.manifestnode(): # reuse an existing manifest revision + self.ui.debug('reusing known manifest\n') mn = ctx.manifestnode() files = ctx.files() elif ctx.files(): @@ -2077,16 +2096,31 @@ raise # update manifest - self.ui.note(_("committing manifest\n")) removed = [f for f in sorted(removed) if f in m1 or f in m2] drop = [f for f in removed if f in m] for f in drop: del m[f] - mn = mctx.write(trp, linkrev, - p1.manifestnode(), p2.manifestnode(), - added, drop) files = changed + removed + md = None + if not files: + # if no "files" actually changed in terms of the changelog, + # try hard to detect unmodified manifest entry so that the + # exact same commit can be reproduced later on convert. + md = m1.diff(m, scmutil.matchfiles(self, ctx.files())) + if not files and md: + self.ui.debug('not reusing manifest (no file change in ' + 'changelog, but manifest differs)\n') + if files or md: + self.ui.note(_("committing manifest\n")) + mn = mctx.write(trp, linkrev, + p1.manifestnode(), p2.manifestnode(), + added, drop) + else: + self.ui.debug('reusing manifest form p1 (listed files ' + 'actually unchanged)\n') + mn = p1.manifestnode() else: + self.ui.debug('reusing manifest from p1 (no file change)\n') mn = p1.manifestnode() files = []
--- a/mercurial/mail.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/mail.py Mon Aug 20 09:48:08 2018 -0700 @@ -73,15 +73,24 @@ def _get_socket(self, host, port, timeout): if self.debuglevel > 0: - self._ui.debug('connect: %r\n' % (host, port)) + self._ui.debug('connect: %r\n' % ((host, port),)) new_socket = socket.create_connection((host, port), timeout) new_socket = sslutil.wrapsocket(new_socket, self.keyfile, self.certfile, ui=self._ui, serverhostname=self._host) - self.file = smtplib.SSLFakeFile(new_socket) + self.file = new_socket.makefile(r'rb') return new_socket +def _pyhastls(): + """Returns true iff Python has TLS support, false otherwise.""" + try: + import ssl + getattr(ssl, 'HAS_TLS', False) + return True + except ImportError: + return False + def _smtp(ui): '''build an smtp connection and return a function to send mail''' local_hostname = ui.config('smtp', 'local_hostname') @@ -89,7 +98,7 @@ # backward compatible: when tls = true, we use starttls. starttls = tls == 'starttls' or stringutil.parsebool(tls) smtps = tls == 'smtps' - if (starttls or smtps) and not util.safehasattr(socket, 'ssl'): + if (starttls or smtps) and not _pyhastls(): raise error.Abort(_("can't use TLS: Python SSL support not installed")) mailhost = ui.config('smtp', 'host') if not mailhost: @@ -143,8 +152,9 @@ def _sendmail(ui, sender, recipients, msg): '''send mail using sendmail.''' program = ui.config('email', 'method') - cmdline = '%s -f %s %s' % (program, stringutil.email(sender), - ' '.join(map(stringutil.email, recipients))) + stremail = lambda x: stringutil.email(encoding.strtolocal(x)) + cmdline = '%s -f %s %s' % (program, stremail(sender), + ' '.join(map(stremail, recipients))) ui.note(_('sending mail: %s\n') % cmdline) fp = procutil.popen(cmdline, 'wb') fp.write(util.tonativeeol(msg)) @@ -160,7 +170,8 @@ # Should be time.asctime(), but Windows prints 2-characters day # of month instead of one. Make them print the same thing. date = time.strftime(r'%a %b %d %H:%M:%S %Y', time.localtime()) - fp.write('From %s %s\n' % (sender, date)) + fp.write('From %s %s\n' % (encoding.strtolocal(sender), + encoding.strtolocal(date))) fp.write(msg) fp.write('\n\n') fp.close() @@ -209,7 +220,7 @@ cs = ['us-ascii', 'utf-8', encoding.encoding, encoding.fallbackencoding] if display: - return mimetextqp(s, subtype, 'us-ascii') + cs = ['us-ascii'] for charset in cs: try: s.decode(pycompat.sysstr(charset)) @@ -252,10 +263,27 @@ order. Tries both encoding and fallbackencoding for input. Only as last resort send as is in fake ascii. Caveat: Do not use for mail parts containing patches!''' + sendcharsets = charsets or _charsets(ui) + if not isinstance(s, bytes): + # We have unicode data, which we need to try and encode to + # some reasonable-ish encoding. Try the encodings the user + # wants, and fall back to garbage-in-ascii. + for ocs in sendcharsets: + try: + return s.encode(pycompat.sysstr(ocs)), ocs + except UnicodeEncodeError: + pass + except LookupError: + ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs) + else: + # Everything failed, ascii-armor what we've got and send it. + return s.encode('ascii', 'backslashreplace') + # We have a bytes of unknown encoding. We'll try and guess a valid + # encoding, falling back to pretending we had ascii even though we + # know that's wrong. try: s.decode('ascii') except UnicodeDecodeError: - sendcharsets = charsets or _charsets(ui) for ics in (encoding.encoding, encoding.fallbackencoding): try: u = s.decode(ics) @@ -263,7 +291,7 @@ continue for ocs in sendcharsets: try: - return u.encode(ocs), ocs + return u.encode(pycompat.sysstr(ocs)), ocs except UnicodeEncodeError: pass except LookupError: @@ -280,40 +308,46 @@ return s def _addressencode(ui, name, addr, charsets=None): + assert isinstance(addr, bytes) name = headencode(ui, name, charsets) try: acc, dom = addr.split('@') - acc = acc.encode('ascii') - dom = dom.decode(encoding.encoding).encode('idna') + acc.decode('ascii') + dom = dom.decode(pycompat.sysstr(encoding.encoding)).encode('idna') addr = '%s@%s' % (acc, dom) except UnicodeDecodeError: raise error.Abort(_('invalid email address: %s') % addr) except ValueError: try: # too strict? - addr = addr.encode('ascii') + addr.decode('ascii') except UnicodeDecodeError: raise error.Abort(_('invalid local address: %s') % addr) - return email.utils.formataddr((name, addr)) + return pycompat.bytesurl( + email.utils.formataddr((name, encoding.strfromlocal(addr)))) def addressencode(ui, address, charsets=None, display=False): '''Turns address into RFC-2047 compliant header.''' if display or not address: return address or '' - name, addr = email.utils.parseaddr(address) - return _addressencode(ui, name, addr, charsets) + name, addr = email.utils.parseaddr(encoding.strfromlocal(address)) + return _addressencode(ui, name, encoding.strtolocal(addr), charsets) def addrlistencode(ui, addrs, charsets=None, display=False): '''Turns a list of addresses into a list of RFC-2047 compliant headers. A single element of input list may contain multiple addresses, but output always has one address per item''' + for a in addrs: + assert isinstance(a, bytes), (r'%r unexpectedly not a bytestr' % a) if display: return [a.strip() for a in addrs if a.strip()] result = [] - for name, addr in email.utils.getaddresses(addrs): + for name, addr in email.utils.getaddresses( + [encoding.strfromlocal(a) for a in addrs]): if name or addr: - result.append(_addressencode(ui, name, addr, charsets)) + r = _addressencode(ui, name, encoding.strtolocal(addr), charsets) + result.append(r) return result def mimeencode(ui, s, charsets=None, display=False):
--- a/mercurial/manifest.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/manifest.py Mon Aug 20 09:48:08 2018 -0700 @@ -10,6 +10,7 @@ import heapq import itertools import struct +import weakref from .i18n import _ from .node import ( @@ -1136,6 +1137,115 @@ for subtree in subm.walksubtrees(matcher=matcher): yield subtree +class manifestfulltextcache(util.lrucachedict): + """File-backed LRU cache for the manifest cache + + File consists of entries, up to EOF: + + - 20 bytes node, 4 bytes length, <length> manifest data + + These are written in reverse cache order (oldest to newest). + + """ + def __init__(self, max): + super(manifestfulltextcache, self).__init__(max) + self._dirty = False + self._read = False + self._opener = None + + def read(self): + if self._read or self._opener is None: + return + + try: + with self._opener('manifestfulltextcache') as fp: + set = super(manifestfulltextcache, self).__setitem__ + # ignore trailing data, this is a cache, corruption is skipped + while True: + node = fp.read(20) + if len(node) < 20: + break + try: + size = struct.unpack('>L', fp.read(4))[0] + except struct.error: + break + value = bytearray(fp.read(size)) + if len(value) != size: + break + set(node, value) + except IOError: + # the file is allowed to be missing + pass + + self._read = True + self._dirty = False + + def write(self): + if not self._dirty or self._opener is None: + return + # rotate backwards to the first used node + with self._opener( + 'manifestfulltextcache', 'w', atomictemp=True, checkambig=True + ) as fp: + node = self._head.prev + while True: + if node.key in self._cache: + fp.write(node.key) + fp.write(struct.pack('>L', len(node.value))) + fp.write(node.value) + if node is self._head: + break + node = node.prev + + def __len__(self): + if not self._read: + self.read() + return super(manifestfulltextcache, self).__len__() + + def __contains__(self, k): + if not self._read: + self.read() + return super(manifestfulltextcache, self).__contains__(k) + + def __iter__(self): + if not self._read: + self.read() + return super(manifestfulltextcache, self).__iter__() + + def __getitem__(self, k): + if not self._read: + self.read() + # the cache lru order can change on read + setdirty = self._cache.get(k) is not self._head + value = super(manifestfulltextcache, self).__getitem__(k) + if setdirty: + self._dirty = True + return value + + def __setitem__(self, k, v): + if not self._read: + self.read() + super(manifestfulltextcache, self).__setitem__(k, v) + self._dirty = True + + def __delitem__(self, k): + if not self._read: + self.read() + super(manifestfulltextcache, self).__delitem__(k) + self._dirty = True + + def get(self, k, default=None): + if not self._read: + self.read() + return super(manifestfulltextcache, self).get(k, default=default) + + def clear(self, clear_persisted_data=False): + super(manifestfulltextcache, self).clear() + if clear_persisted_data: + self._dirty = True + self.write() + self._read = False + class manifestrevlog(revlog.revlog): '''A revlog that stores manifest texts. This is responsible for caching the full-text manifest contents. @@ -1164,7 +1274,7 @@ self._treeondisk = optiontreemanifest or treemanifest - self._fulltextcache = util.lrucachedict(cachesize) + self._fulltextcache = manifestfulltextcache(cachesize) if dir: assert self._treeondisk, 'opts is %r' % opts @@ -1186,13 +1296,35 @@ checkambig=not bool(dir), mmaplargeindex=True) + def _setupmanifestcachehooks(self, repo): + """Persist the manifestfulltextcache on lock release""" + if not util.safehasattr(repo, '_lockref'): + return + + self._fulltextcache._opener = repo.cachevfs + reporef = weakref.ref(repo) + manifestrevlogref = weakref.ref(self) + + def persistmanifestcache(): + repo = reporef() + self = manifestrevlogref() + if repo is None or self is None: + return + if repo.manifestlog._revlog is not self: + # there's a different manifest in play now, abort + return + self._fulltextcache.write() + + if repo._currentlock(repo._lockref) is not None: + repo._afterlock(persistmanifestcache) + @property def fulltextcache(self): return self._fulltextcache - def clearcaches(self): + def clearcaches(self, clear_persisted_data=False): super(manifestrevlog, self).clearcaches() - self._fulltextcache.clear() + self._fulltextcache.clear(clear_persisted_data=clear_persisted_data) self._dirlogcache = {'': self} def dirlog(self, d): @@ -1288,6 +1420,7 @@ self._treeinmem = usetreemanifest self._revlog = repo._constructmanifest() + self._revlog._setupmanifestcachehooks(repo) self._narrowmatch = repo.narrowmatch() # A cache of the manifestctx or treemanifestctx for each directory @@ -1345,9 +1478,9 @@ mancache[node] = m return m - def clearcaches(self): + def clearcaches(self, clear_persisted_data=False): self._dirmancache.clear() - self._revlog.clearcaches() + self._revlog.clearcaches(clear_persisted_data=clear_persisted_data) def rev(self, node): return self._revlog.rev(node) @@ -1421,9 +1554,12 @@ self._data = manifestdict() else: rl = self._revlog() - text = rl.revision(self._node) - arraytext = bytearray(text) - rl._fulltextcache[self._node] = arraytext + if self._node in rl._fulltextcache: + text = pycompat.bytestr(rl._fulltextcache[self._node]) + else: + text = rl.revision(self._node) + arraytext = bytearray(text) + rl._fulltextcache[self._node] = arraytext self._data = manifestdict(text) return self._data @@ -1523,9 +1659,12 @@ m.setnode(self._node) self._data = m else: - text = rl.revision(self._node) - arraytext = bytearray(text) - rl.fulltextcache[self._node] = arraytext + if self._node in rl.fulltextcache: + text = pycompat.bytestr(rl.fulltextcache[self._node]) + else: + text = rl.revision(self._node) + arraytext = bytearray(text) + rl.fulltextcache[self._node] = arraytext self._data = treemanifest(dir=self._dir, text=text) return self._data
--- a/mercurial/match.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/match.py Mon Aug 20 09:48:08 2018 -0700 @@ -8,6 +8,7 @@ from __future__ import absolute_import, print_function import copy +import itertools import os import re @@ -331,6 +332,38 @@ ''' return True + def visitchildrenset(self, dir): + '''Decides whether a directory should be visited based on whether it + has potential matches in it or one of its subdirectories, and + potentially lists which subdirectories of that directory should be + visited. This is based on the match's primary, included, and excluded + patterns. + + This function is very similar to 'visitdir', and the following mapping + can be applied: + + visitdir | visitchildrenlist + ----------+------------------- + False | set() + 'all' | 'all' + True | 'this' OR non-empty set of subdirs to visit + + Example: + Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return + the following values (assuming the implementation of visitchildrenset + is capable of recognizing this; some implementations are not). + + '.' -> {'foo', 'qux'} + 'baz' -> set() + 'foo' -> {'bar'} + # Ideally this would be 'all', but since the prefix nature of matchers + # is applied to the entire matcher, we have to downgrade to this + # 'this' due to the non-prefix 'rootfilesin'-kind matcher. + 'foo/bar' -> 'this' + 'qux' -> 'this' + ''' + return 'this' + def always(self): '''Matcher will match everything and .files() will be empty -- optimization might be possible.''' @@ -367,6 +400,9 @@ def visitdir(self, dir): return 'all' + def visitchildrenset(self, dir): + return 'all' + def __repr__(self): return r'<alwaysmatcher>' @@ -390,6 +426,9 @@ def visitdir(self, dir): return False + def visitchildrenset(self, dir): + return set() + def __repr__(self): return r'<nevermatcher>' @@ -430,6 +469,15 @@ any(parentdir in self._fileset for parentdir in util.finddirs(dir))) + def visitchildrenset(self, dir): + ret = self.visitdir(dir) + if ret is True: + return 'this' + elif not ret: + return set() + assert ret == 'all' + return 'all' + def prefix(self): return self._prefix @@ -445,11 +493,14 @@ self._pats, self.matchfn = _buildmatch(kindpats, '(?:/|$)', listsubrepos, root) self._prefix = _prefix(kindpats) - roots, dirs = _rootsanddirs(kindpats) + roots, dirs, parents = _rootsdirsandparents(kindpats) # roots are directories which are recursively included. self._roots = set(roots) # dirs are directories which are non-recursively included. self._dirs = set(dirs) + # parents are directories which are non-recursively included because + # they are needed to get to items in _dirs or _roots. + self._parents = set(parents) def visitdir(self, dir): if self._prefix and dir in self._roots: @@ -457,9 +508,47 @@ return ('.' in self._roots or dir in self._roots or dir in self._dirs or + dir in self._parents or any(parentdir in self._roots for parentdir in util.finddirs(dir))) + def visitchildrenset(self, dir): + if self._prefix and dir in self._roots: + return 'all' + # Note: this does *not* include the 'dir in self._parents' case from + # visitdir, that's handled below. + if ('.' in self._roots or + dir in self._roots or + dir in self._dirs or + any(parentdir in self._roots + for parentdir in util.finddirs(dir))): + return 'this' + + ret = set() + if dir in self._parents: + # We add a '/' on to `dir` so that we don't return items that are + # prefixed by `dir` but are actually siblings of `dir`. + suffixeddir = dir + '/' if dir != '.' else '' + # Look in all _roots, _dirs, and _parents for things that start with + # 'suffixeddir'. + for d in [q for q in + itertools.chain(self._roots, self._dirs, self._parents) if + q.startswith(suffixeddir)]: + # Don't emit '.' in the response for the root directory + if not suffixeddir and d == '.': + continue + + # We return the item name without the `suffixeddir` prefix or a + # slash suffix + d = d[len(suffixeddir):] + if '/' in d: + # This is a subdirectory-of-a-subdirectory, i.e. + # suffixeddir='foo/', d was 'foo/bar/baz' before removing + # 'foo/'. + d = d[:d.index('/')] + ret.add(d) + return ret + @encoding.strmethod def __repr__(self): return ('<includematcher includes=%r>' % pycompat.bytestr(self._pats)) @@ -486,6 +575,25 @@ def visitdir(self, dir): return dir in self._dirs + def visitchildrenset(self, dir): + if dir in self._dirs: + candidates = self._dirs - {'.'} + if dir != '.': + d = dir + '/' + candidates = set(c[len(d):] for c in candidates if + c.startswith(d)) + # self._dirs includes all of the directories, recursively, so if + # we're attempting to match foo/bar/baz.txt, it'll have '.', 'foo', + # 'foo/bar' in it. Thus we can safely ignore a candidate that has a + # '/' in it, indicating a it's for a subdir-of-a-subdir; the + # immediate subdir will be in there without a slash. + ret = set(c for c in candidates if '/' not in c) + # We need to emit 'this' for foo/bar, not set(), not {'baz.txt'}. + if not ret: + return 'this' + return ret + return set() + def isexact(self): return True @@ -527,6 +635,31 @@ return False return bool(self._m1.visitdir(dir)) + def visitchildrenset(self, dir): + m2_set = self._m2.visitchildrenset(dir) + if m2_set == 'all': + return set() + m1_set = self._m1.visitchildrenset(dir) + # Possible values for m1: 'all', 'this', set(...), set() + # Possible values for m2: 'this', set(...), set() + # If m2 has nothing under here that we care about, return m1, even if + # it's 'all'. This is a change in behavior from visitdir, which would + # return True, not 'all', for some reason. + if not m2_set: + return m1_set + if m1_set in ['all', 'this']: + # Never return 'all' here if m2_set is any kind of non-empty (either + # 'this' or set(foo)), since m2 might return set() for a + # subdirectory. + return 'this' + # Possible values for m1: set(...), set() + # Possible values for m2: 'this', set(...) + # We ignore m2's set results. They're possibly incorrect: + # m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset('.'): + # m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd + # return set(), which is *not* correct, we still need to visit 'dir'! + return m1_set + def isexact(self): return self._m1.isexact() @@ -591,6 +724,25 @@ # bool() because visit1=True + visit2='all' should not be 'all' return bool(visit1 and self._m2.visitdir(dir)) + def visitchildrenset(self, dir): + m1_set = self._m1.visitchildrenset(dir) + if not m1_set: + return set() + m2_set = self._m2.visitchildrenset(dir) + if not m2_set: + return set() + + if m1_set == 'all': + return m2_set + elif m2_set == 'all': + return m1_set + + if m1_set == 'this' or m2_set == 'this': + return 'this' + + assert isinstance(m1_set, set) and isinstance(m2_set, set) + return m1_set.intersection(m2_set) + def always(self): return self._m1.always() and self._m2.always() @@ -672,6 +824,13 @@ dir = self._path + "/" + dir return self._matcher.visitdir(dir) + def visitchildrenset(self, dir): + if dir == '.': + dir = self._path + else: + dir = self._path + "/" + dir + return self._matcher.visitchildrenset(dir) + def always(self): return self._always @@ -744,6 +903,15 @@ return self._matcher.visitdir(dir[len(self._pathprefix):]) return dir in self._pathdirs + def visitchildrenset(self, dir): + if dir == self._path: + return self._matcher.visitchildrenset('.') + if dir.startswith(self._pathprefix): + return self._matcher.visitchildrenset(dir[len(self._pathprefix):]) + if dir in self._pathdirs: + return 'this' + return set() + def isexact(self): return self._matcher.isexact() @@ -784,6 +952,25 @@ r |= v return r + def visitchildrenset(self, dir): + r = set() + this = False + for m in self._matchers: + v = m.visitchildrenset(dir) + if not v: + continue + if v == 'all': + return v + if this or v == 'this': + this = True + # don't break, we might have an 'all' in here. + continue + assert isinstance(v, set) + r = r.union(v) + if this: + return 'this' + return r + @encoding.strmethod def __repr__(self): return ('<unionmatcher matchers=%r>' % self._matchers) @@ -1004,40 +1191,42 @@ roots, dirs = _patternrootsanddirs(kindpats) return roots -def _rootsanddirs(kindpats): +def _rootsdirsandparents(kindpats): '''Returns roots and exact directories from patterns. - roots are directories to match recursively, whereas exact directories should - be matched non-recursively. The returned (roots, dirs) tuple will also - include directories that need to be implicitly considered as either, such as - parent directories. + `roots` are directories to match recursively, `dirs` should + be matched non-recursively, and `parents` are the implicitly required + directories to walk to items in either roots or dirs. - >>> _rootsanddirs( + Returns a tuple of (roots, dirs, parents). + + >>> _rootsdirsandparents( ... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''), ... (b'glob', b'g*', b'')]) - (['g/h', 'g/h', '.'], ['g', '.']) - >>> _rootsanddirs( + (['g/h', 'g/h', '.'], [], ['g', '.']) + >>> _rootsdirsandparents( ... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')]) - ([], ['g/h', '.', 'g', '.']) - >>> _rootsanddirs( + ([], ['g/h', '.'], ['g', '.']) + >>> _rootsdirsandparents( ... [(b'relpath', b'r', b''), (b'path', b'p/p', b''), ... (b'path', b'', b'')]) - (['r', 'p/p', '.'], ['p', '.']) - >>> _rootsanddirs( + (['r', 'p/p', '.'], [], ['p', '.']) + >>> _rootsdirsandparents( ... [(b'relglob', b'rg*', b''), (b're', b're/', b''), ... (b'relre', b'rr', b'')]) - (['.', '.', '.'], ['.']) + (['.', '.', '.'], [], ['.']) ''' r, d = _patternrootsanddirs(kindpats) + p = [] # Append the parents as non-recursive/exact directories, since they must be # scanned to get to either the roots or the other exact directories. - d.extend(util.dirs(d)) - d.extend(util.dirs(r)) + p.extend(util.dirs(d)) + p.extend(util.dirs(r)) # util.dirs() does not include the root directory, so add it manually - d.append('.') + p.append('.') - return r, d + return r, d, p def _explicitfiles(kindpats): '''Returns the potential explicit filenames from the patterns.
--- a/mercurial/mdiff.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/mdiff.py Mon Aug 20 09:48:08 2018 -0700 @@ -357,7 +357,7 @@ # walk backwards from the start of the context up to the start of # the previous hunk context until we find a line starting with an # alphanumeric char. - for i in xrange(astart - 1, lastpos - 1, -1): + for i in pycompat.xrange(astart - 1, lastpos - 1, -1): if l1[i][0:1].isalnum(): func = b' ' + l1[i].rstrip() # split long function name if ASCII. otherwise we have no @@ -381,7 +381,7 @@ hunklines = ( ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))] + delta - + [' ' + l1[x] for x in xrange(a2, aend)] + + [' ' + l1[x] for x in pycompat.xrange(a2, aend)] ) # If either file ends without a newline and the last line of # that file is part of a hunk, a marker is printed. If the @@ -390,7 +390,7 @@ # which the hunk can end in a shared line without a newline. skip = False if not t1.endswith('\n') and astart + alen == len(l1) + 1: - for i in xrange(len(hunklines) - 1, -1, -1): + for i in pycompat.xrange(len(hunklines) - 1, -1, -1): if hunklines[i].startswith(('-', ' ')): if hunklines[i].startswith(' '): skip = True @@ -398,7 +398,7 @@ hunklines.insert(i + 1, _missing_newline_marker) break if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1: - for i in xrange(len(hunklines) - 1, -1, -1): + for i in pycompat.xrange(len(hunklines) - 1, -1, -1): if hunklines[i].startswith('+'): hunklines[i] += '\n' hunklines.insert(i + 1, _missing_newline_marker)
--- a/mercurial/minifileset.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/minifileset.py Mon Aug 20 09:48:08 2018 -0700 @@ -11,20 +11,23 @@ from . import ( error, fileset, + filesetlang, pycompat, ) def _sizep(x): # i18n: "size" is a keyword - expr = fileset.getstring(x, _("size requires an expression")) + expr = filesetlang.getstring(x, _("size requires an expression")) return fileset.sizematcher(expr) def _compile(tree): if not tree: raise error.ParseError(_("missing argument")) op = tree[0] - if op in {'symbol', 'string', 'kindpat'}: - name = fileset.getpattern(tree, {'path'}, _('invalid file pattern')) + if op == 'withstatus': + return _compile(tree[1]) + elif op in {'symbol', 'string', 'kindpat'}: + name = filesetlang.getpattern(tree, {'path'}, _('invalid file pattern')) if name.startswith('**'): # file extension test, ex. "**.tar.gz" ext = name[2:] for c in pycompat.bytestr(ext): @@ -39,18 +42,15 @@ return f raise error.ParseError(_("unsupported file pattern: %s") % name, hint=_('paths must be prefixed with "path:"')) - elif op == 'or': - func1 = _compile(tree[1]) - func2 = _compile(tree[2]) - return lambda n, s: func1(n, s) or func2(n, s) + elif op in {'or', 'patterns'}: + funcs = [_compile(x) for x in tree[1:]] + return lambda n, s: any(f(n, s) for f in funcs) elif op == 'and': func1 = _compile(tree[1]) func2 = _compile(tree[2]) return lambda n, s: func1(n, s) and func2(n, s) elif op == 'not': return lambda n, s: not _compile(tree[1])(n, s) - elif op == 'group': - return _compile(tree[1]) elif op == 'func': symbols = { 'all': lambda n, s: True, @@ -58,7 +58,7 @@ 'size': lambda n, s: _sizep(tree[2])(s), } - name = fileset.getsymbol(tree[1]) + name = filesetlang.getsymbol(tree[1]) if name in symbols: return symbols[name] @@ -67,11 +67,9 @@ func1 = _compile(tree[1]) func2 = _compile(tree[2]) return lambda n, s: func1(n, s) and not func2(n, s) - elif op == 'negate': - raise error.ParseError(_("can't use negate operator in this context")) elif op == 'list': raise error.ParseError(_("can't use a list in this context"), - hint=_('see hg help "filesets.x or y"')) + hint=_('see \'hg help "filesets.x or y"\'')) raise error.ProgrammingError('illegal tree: %r' % (tree,)) def compile(text): @@ -88,5 +86,7 @@ files whose name ends with ".zip", and all files under "bin" in the repo root except for "bin/README". """ - tree = fileset.parse(text) + tree = filesetlang.parse(text) + tree = filesetlang.analyze(tree) + tree = filesetlang.optimize(tree) return _compile(tree)
--- a/mercurial/minirst.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/minirst.py Mon Aug 20 09:48:08 2018 -0700 @@ -316,7 +316,7 @@ # column markers are ASCII so we can calculate column # position in bytes - columns = [x for x in xrange(len(div)) + columns = [x for x in pycompat.xrange(len(div)) if div[x:x + 1] == '=' and (x == 0 or div[x - 1:x] == ' ')] rows = [] @@ -685,7 +685,7 @@ if llen and llen != plen: collapse = False s = [] - for j in xrange(3, plen - 1): + for j in pycompat.xrange(3, plen - 1): parent = parents[j] if (j >= llen or lastparents[j] != parent):
--- a/mercurial/narrowspec.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/narrowspec.py Mon Aug 20 09:48:08 2018 -0700 @@ -13,34 +13,13 @@ from . import ( error, match as matchmod, + repository, + sparse, util, ) FILENAME = 'narrowspec' -def _parsestoredpatterns(text): - """Parses the narrowspec format that's stored on disk.""" - patlist = None - includepats = [] - excludepats = [] - for l in text.splitlines(): - if l == '[includes]': - if patlist is None: - patlist = includepats - else: - raise error.Abort(_('narrowspec includes section must appear ' - 'at most once, before excludes')) - elif l == '[excludes]': - if patlist is not excludepats: - patlist = excludepats - else: - raise error.Abort(_('narrowspec excludes section must appear ' - 'at most once')) - else: - patlist.append(l) - - return set(includepats), set(excludepats) - def parseserverpatterns(text): """Parses the narrowspec format that's returned by the server.""" includepats = set() @@ -107,10 +86,10 @@ return set(normalizepattern(p) for p in pats) def format(includes, excludes): - output = '[includes]\n' + output = '[include]\n' for i in sorted(includes - excludes): output += i + '\n' - output += '[excludes]\n' + output += '[exclude]\n' for e in sorted(excludes): output += e + '\n' return output @@ -129,21 +108,41 @@ def load(repo): try: - spec = repo.vfs.read(FILENAME) + spec = repo.svfs.read(FILENAME) except IOError as e: # Treat "narrowspec does not exist" the same as "narrowspec file exists # and is empty". if e.errno == errno.ENOENT: - # Without this the next call to load will use the cached - # non-existence of the file, which can cause some odd issues. - repo.invalidate(clearfilecache=True) return set(), set() raise - return _parsestoredpatterns(spec) + # maybe we should care about the profiles returned too + includepats, excludepats, profiles = sparse.parseconfig(repo.ui, spec, + 'narrow') + if profiles: + raise error.Abort(_("including other spec files using '%include' is not" + " suported in narrowspec")) + return includepats, excludepats def save(repo, includepats, excludepats): spec = format(includepats, excludepats) - repo.vfs.write(FILENAME, spec) + repo.svfs.write(FILENAME, spec) + +def savebackup(repo, backupname): + if repository.NARROW_REQUIREMENT not in repo.requirements: + return + vfs = repo.vfs + vfs.tryunlink(backupname) + util.copyfile(repo.svfs.join(FILENAME), vfs.join(backupname), hardlink=True) + +def restorebackup(repo, backupname): + if repository.NARROW_REQUIREMENT not in repo.requirements: + return + util.rename(repo.vfs.join(backupname), repo.svfs.join(FILENAME)) + +def clearbackup(repo, backupname): + if repository.NARROW_REQUIREMENT not in repo.requirements: + return + repo.vfs.unlink(backupname) def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes): r""" Restricts the patterns according to repo settings,
--- a/mercurial/node.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/node.py Mon Aug 20 09:48:08 2018 -0700 @@ -21,20 +21,25 @@ raise TypeError(e) nullrev = -1 +# In hex, this is '0000000000000000000000000000000000000000' nullid = b"\0" * 20 nullhex = hex(nullid) # Phony node value to stand-in for new files in some uses of # manifests. -newnodeid = '!' * 20 -addednodeid = ('0' * 15) + 'added' -modifiednodeid = ('0' * 12) + 'modified' +# In hex, this is '2121212121212121212121212121212121212121' +newnodeid = '!!!!!!!!!!!!!!!!!!!!' +# In hex, this is '3030303030303030303030303030306164646564' +addednodeid = '000000000000000added' +# In hex, this is '3030303030303030303030306d6f646966696564' +modifiednodeid = '000000000000modified' wdirfilenodeids = {newnodeid, addednodeid, modifiednodeid} # pseudo identifiers for working directory # (they are experimental, so don't add too many dependencies on them) wdirrev = 0x7fffffff +# In hex, this is 'ffffffffffffffffffffffffffffffffffffffff' wdirid = b"\xff" * 20 wdirhex = hex(wdirid)
--- a/mercurial/obsolete.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/obsolete.py Mon Aug 20 09:48:08 2018 -0700 @@ -394,7 +394,7 @@ off = o3 + metasize * nummeta metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off]) metadata = [] - for idx in xrange(0, len(metapairsize), 2): + for idx in pycompat.xrange(0, len(metapairsize), 2): o1 = off + metapairsize[idx] o2 = o1 + metapairsize[idx + 1] metadata.append((data[off:o1], data[o1:o2]))
--- a/mercurial/parser.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/parser.py Mon Aug 20 09:48:08 2018 -0700 @@ -20,7 +20,6 @@ from .i18n import _ from . import ( - encoding, error, pycompat, util, @@ -198,16 +197,11 @@ # mangle Python's exception into our format raise error.ParseError(pycompat.bytestr(e).lower()) -def _brepr(obj): - if isinstance(obj, bytes): - return b"'%s'" % stringutil.escapestr(obj) - return encoding.strtolocal(repr(obj)) - def _prettyformat(tree, leafnodes, level, lines): if not isinstance(tree, tuple): - lines.append((level, _brepr(tree))) + lines.append((level, stringutil.pprint(tree))) elif tree[0] in leafnodes: - rs = map(_brepr, tree[1:]) + rs = map(stringutil.pprint, tree[1:]) lines.append((level, '(%s %s)' % (tree[0], ' '.join(rs)))) else: lines.append((level, '(%s' % tree[0]))
--- a/mercurial/patch.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/patch.py Mon Aug 20 09:48:08 2018 -0700 @@ -815,7 +815,7 @@ for x, s in enumerate(self.lines): self.hash.setdefault(s, []).append(x) - for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1): + for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1): for toponly in [True, False]: old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly) oldstart = oldstart + self.offset + self.skew @@ -1286,7 +1286,7 @@ self.lena = int(aend) - self.starta if self.starta: self.lena += 1 - for x in xrange(self.lena): + for x in pycompat.xrange(self.lena): l = lr.readline() if l.startswith('---'): # lines addition, old block is empty @@ -1320,7 +1320,7 @@ if self.startb: self.lenb += 1 hunki = 1 - for x in xrange(self.lenb): + for x in pycompat.xrange(self.lenb): l = lr.readline() if l.startswith('\ '): # XXX: the only way to hit this is with an invalid line range. @@ -1396,14 +1396,14 @@ top = 0 bot = 0 hlen = len(self.hunk) - for x in xrange(hlen - 1): + for x in pycompat.xrange(hlen - 1): # the hunk starts with the @@ line, so use x+1 if self.hunk[x + 1].startswith(' '): top += 1 else: break if not toponly: - for x in xrange(hlen - 1): + for x in pycompat.xrange(hlen - 1): if self.hunk[hlen - bot - 1].startswith(' '): bot += 1 else: @@ -2326,7 +2326,7 @@ relfiltered = True if not changes: - changes = repo.status(ctx1, ctx2, match=match) + changes = ctx1.status(ctx2, match=match) modified, added, removed = changes[:3] if not modified and not added and not removed:
--- a/mercurial/phases.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/phases.py Mon Aug 20 09:48:08 2018 -0700 @@ -374,7 +374,7 @@ changes = set() # set of revisions to be changed delroots = [] # set of root deleted by this path - for phase in xrange(targetphase + 1, len(allphases)): + for phase in pycompat.xrange(targetphase + 1, len(allphases)): # filter nodes that are not in a compatible phase already nodes = [n for n in nodes if self.phase(repo, repo[n].rev()) >= phase] @@ -420,7 +420,7 @@ affected = set(repo.revs('(%ln::) - (%ln::)', new, old)) # find the phase of the affected revision - for phase in xrange(targetphase, -1, -1): + for phase in pycompat.xrange(targetphase, -1, -1): if phase: roots = oldroots[phase] revs = set(repo.revs('%ln::%ld', roots, affected))
--- a/mercurial/policy.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/policy.py Mon Aug 20 09:48:08 2018 -0700 @@ -69,7 +69,7 @@ (r'cext', r'bdiff'): 3, (r'cext', r'mpatch'): 1, (r'cext', r'osutil'): 4, - (r'cext', r'parsers'): 5, + (r'cext', r'parsers'): 7, } # map import request to other package or module
--- a/mercurial/pure/osutil.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/pure/osutil.py Mon Aug 20 09:48:08 2018 -0700 @@ -150,7 +150,7 @@ rfds = ctypes.cast(cmsg.cmsg_data, ctypes.POINTER(ctypes.c_int)) rfdscount = ((cmsg.cmsg_len - _cmsghdr.cmsg_data.offset) / ctypes.sizeof(ctypes.c_int)) - return [rfds[i] for i in xrange(rfdscount)] + return [rfds[i] for i in pycompat.xrange(rfdscount)] else: import msvcrt
--- a/mercurial/pure/parsers.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/pure/parsers.py Mon Aug 20 09:48:08 2018 -0700 @@ -39,25 +39,22 @@ class BaseIndexObject(object): def __len__(self): - return self._lgt + len(self._extra) + 1 + return self._lgt + len(self._extra) - def insert(self, i, tup): - assert i == -1 + def append(self, tup): self._extra.append(tup) def _fix_index(self, i): if not isinstance(i, int): raise TypeError("expecting int indexes") - if i < 0: - i = len(self) + i - if i < 0 or i >= len(self): + if i < 0 or i >= len(self) + 1: raise IndexError return i def __getitem__(self, i): + if i == -1: + return (0, 0, 0, -1, -1, -1, -1, nullid) i = self._fix_index(i) - if i == len(self) - 1: - return (0, 0, 0, -1, -1, -1, -1, nullid) if i >= self._lgt: return self._extra[i - self._lgt] index = self._calculate_index(i)
--- a/mercurial/pvec.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/pvec.py Mon Aug 20 09:48:08 2018 -0700 @@ -52,6 +52,7 @@ from .node import nullrev from . import ( + pycompat, util, ) @@ -72,7 +73,7 @@ def _str(v, l): bs = "" - for p in xrange(l): + for p in pycompat.xrange(l): bs = chr(v & 255) + bs v >>= 8 return bs @@ -91,7 +92,7 @@ c += 1 x >>= 1 return c -_htab = [_hweight(x) for x in xrange(256)] +_htab = [_hweight(x) for x in pycompat.xrange(256)] def _hamming(a, b): '''find the hamming distance between two longs''' @@ -152,7 +153,7 @@ pvc = r._pveccache if ctx.rev() not in pvc: cl = r.changelog - for n in xrange(ctx.rev() + 1): + for n in pycompat.xrange(ctx.rev() + 1): if n not in pvc: node = cl.node(n) p1, p2 = cl.parentrevs(n)
--- a/mercurial/pycompat.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/pycompat.py Mon Aug 20 09:48:08 2018 -0700 @@ -331,6 +331,7 @@ else: import cStringIO + xrange = xrange unicode = unicode bytechr = chr byterepr = repr
--- a/mercurial/registrar.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/registrar.py Mon Aug 20 09:48:08 2018 -0700 @@ -247,6 +247,19 @@ implies 'matchctx.status()' at runtime or not (False, by default). + Optional argument 'weight' indicates the estimated run-time cost, useful + for static optimization, default is 1. Higher weight means more expensive. + There are predefined weights in the 'filesetlang' module. + + ====== ============================================================= + Weight Description and examples + ====== ============================================================= + 0.5 basic match patterns (e.g. a symbol) + 10 computing status (e.g. added()) or accessing a few files + 30 reading file content for each (e.g. grep()) + 50 scanning working directory (ignored()) + ====== ============================================================= + 'filesetpredicate' instance in example above can be used to decorate multiple functions. @@ -259,8 +272,9 @@ _getname = _funcregistrarbase._parsefuncdecl _docformat = "``%s``\n %s" - def _extrasetup(self, name, func, callstatus=False): + def _extrasetup(self, name, func, callstatus=False, weight=1): func._callstatus = callstatus + func._weight = weight class _templateregistrarbase(_funcregistrarbase): """Base of decorator to register functions as template specific one @@ -281,7 +295,7 @@ ''' pass - # old API + # old API (DEPRECATED) @templatekeyword('mykeyword') def mykeywordfunc(repo, ctx, templ, cache, revcache, **args): '''Explanation of this template keyword .... @@ -385,7 +399,8 @@ internalmerge = registrar.internalmerge() @internalmerge('mymerge', internalmerge.mergeonly, - onfailure=None, precheck=None): + onfailure=None, precheck=None, + binary=False, symlink=False): def mymergefunc(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None): '''Explanation of this internal merge tool .... @@ -416,6 +431,12 @@ 'files' and 'labels'. If it returns false value, merging is aborted immediately (and file is marked as "unresolved"). + Optional argument 'binary' is a binary files capability of internal + merge tool. 'nomerge' merge type implies binary=True. + + Optional argument 'symlink' is a symlinks capability of inetrnal + merge function. 'nomerge' merge type implies symlink=True. + 'internalmerge' instance in example above can be used to decorate multiple functions. @@ -433,7 +454,14 @@ fullmerge = 'fullmerge' # both premerge and merge def _extrasetup(self, name, func, mergetype, - onfailure=None, precheck=None): + onfailure=None, precheck=None, + binary=False, symlink=False): func.mergetype = mergetype func.onfailure = onfailure func.precheck = precheck + + binarycap = binary or mergetype == self.nomerge + symlinkcap = symlink or mergetype == self.nomerge + + # actual capabilities, which this internal merge tool has + func.capabilities = {"binary": binarycap, "symlink": symlinkcap}
--- a/mercurial/repair.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/repair.py Mon Aug 20 09:48:08 2018 -0700 @@ -24,6 +24,7 @@ exchange, obsolete, obsutil, + pycompat, util, ) from .utils import ( @@ -70,7 +71,7 @@ """find out the filelogs affected by the strip""" files = set() - for x in xrange(striprev, len(repo)): + for x in pycompat.xrange(striprev, len(repo)): files.update(repo[x].files()) return sorted(files) @@ -199,7 +200,7 @@ repo.file(fn).strip(striprev, tr) tr.endgroup() - for i in xrange(offset, len(tr.entries)): + for i in pycompat.xrange(offset, len(tr.entries)): file, troffset, ignore = tr.entries[i] with repo.svfs(file, 'a', checkambig=True) as fp: fp.truncate(troffset) @@ -297,24 +298,24 @@ if roots: strip(self.ui, self.repo, roots, self.backup, self.topic) -def delayedstrip(ui, repo, nodelist, topic=None): +def delayedstrip(ui, repo, nodelist, topic=None, backup=True): """like strip, but works inside transaction and won't strip irreverent revs nodelist must explicitly contain all descendants. Otherwise a warning will be printed that some nodes are not stripped. - Always do a backup. The last non-None "topic" will be used as the backup - topic name. The default backup topic name is "backup". + Will do a backup if `backup` is True. The last non-None "topic" will be + used as the backup topic name. The default backup topic name is "backup". """ tr = repo.currenttransaction() if not tr: nodes = safestriproots(ui, repo, nodelist) - return strip(ui, repo, nodes, True, topic) + return strip(ui, repo, nodes, backup=backup, topic=topic) # transaction postclose callbacks are called in alphabet order. # use '\xff' as prefix so we are likely to be called last. callback = tr.getpostclose('\xffstrip') if callback is None: - callback = stripcallback(ui, repo, True, topic) + callback = stripcallback(ui, repo, backup=backup, topic=topic) tr.addpostclose('\xffstrip', callback) if topic: callback.topic = topic
--- a/mercurial/repository.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/repository.py Mon Aug 20 09:48:08 2018 -0700 @@ -15,6 +15,10 @@ interfaceutil, ) +# When narrowing is finalized and no longer subject to format changes, +# we should move this to just "narrow" or similar. +NARROW_REQUIREMENT = 'narrowhg-experimental' + class ipeerconnection(interfaceutil.Interface): """Represents a "connection" to a repository.
--- a/mercurial/repoview.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/repoview.py Mon Aug 20 09:48:08 2018 -0700 @@ -128,7 +128,7 @@ firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) # protect from nullrev root firstmutable = max(0, firstmutable) - return frozenset(xrange(firstmutable, len(cl))) + return frozenset(pycompat.xrange(firstmutable, len(cl))) # function to compute filtered set # @@ -210,7 +210,7 @@ unfichangelog = unfi.changelog # bypass call to changelog.method unfiindex = unfichangelog.index - unfilen = len(unfiindex) - 1 + unfilen = len(unfiindex) unfinode = unfiindex[unfilen - 1][7] revs = filterrevs(unfi, self.filtername, self._visibilityexceptions)
--- a/mercurial/revlog.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/revlog.py Mon Aug 20 09:48:08 2018 -0700 @@ -27,6 +27,7 @@ from .node import ( bin, hex, + nullhex, nullid, nullrev, wdirfilenodeids, @@ -91,6 +92,7 @@ RevlogError = error.RevlogError LookupError = error.LookupError +AmbiguousPrefixLookupError = error.AmbiguousPrefixLookupError CensoredNodeError = error.CensoredNodeError ProgrammingError = error.ProgrammingError @@ -605,6 +607,7 @@ chainbase = attr.ib() chainlen = attr.ib() compresseddeltalen = attr.ib() + snapshotdepth = attr.ib() class _deltacomputer(object): def __init__(self, revlog): @@ -735,8 +738,21 @@ chainlen, compresseddeltalen = revlog._chaininfo(base) chainlen += 1 compresseddeltalen += deltalen + + revlog = self.revlog + snapshotdepth = None + if deltabase == nullrev: + snapshotdepth = 0 + elif revlog._sparserevlog and revlog.issnapshot(deltabase): + # A delta chain should always be one full snapshot, + # zero or more semi-snapshots, and zero or more deltas + p1, p2 = revlog.rev(revinfo.p1), revlog.rev(revinfo.p2) + if deltabase not in (p1, p2) and revlog.issnapshot(deltabase): + snapshotdepth = len(revlog._deltachain(deltabase)[0]) + return _deltainfo(dist, deltalen, (header, data), deltabase, - chainbase, chainlen, compresseddeltalen) + chainbase, chainlen, compresseddeltalen, + snapshotdepth) def finddeltainfo(self, revinfo, fh): """Find an acceptable delta against a candidate revision @@ -748,15 +764,32 @@ Returns the first acceptable candidate revision, as ordered by _getcandidaterevs """ + if not revinfo.textlen: + return None # empty file do not need delta + cachedelta = revinfo.cachedelta p1 = revinfo.p1 p2 = revinfo.p2 revlog = self.revlog + deltalength = self.revlog.length + deltaparent = self.revlog.deltaparent + deltainfo = None + deltas_limit = revinfo.textlen * LIMIT_DELTA2TEXT for candidaterevs in self._getcandidaterevs(p1, p2, cachedelta): + # filter out delta base that will never produce good delta + candidaterevs = [r for r in candidaterevs + if self.revlog.length(r) <= deltas_limit] nominateddeltas = [] for candidaterev in candidaterevs: + # skip over empty delta (no need to include them in a chain) + while candidaterev != nullrev and not deltalength(candidaterev): + candidaterev = deltaparent(candidaterev) + # no need to try a delta against nullid, this will be handled + # by fulltext later. + if candidaterev == nullrev: + continue # no delta for rawtext-changing revs (see "candelta" for why) if revlog.flags(candidaterev) & REVIDX_RAWTEXT_CHANGING_FLAGS: continue @@ -800,6 +833,15 @@ indexformatv0_pack = indexformatv0.pack indexformatv0_unpack = indexformatv0.unpack +class revlogoldindex(list): + def __getitem__(self, i): + if i == -1: + return (0, 0, 0, -1, -1, -1, -1, nullid) + return list.__getitem__(self, i) + +# maximum <delta-chain-data>/<revision-text-length> ratio +LIMIT_DELTA2TEXT = 2 + class revlogoldio(object): def __init__(self): self.size = indexformatv0.size @@ -821,10 +863,7 @@ nodemap[e[6]] = n n += 1 - # add the magic null revision at -1 - index.append((0, 0, 0, -1, -1, -1, -1, nullid)) - - return index, nodemap, None + return revlogoldindex(index), nodemap, None def packentry(self, entry, node, version, rev): if gettype(entry[0]): @@ -1071,27 +1110,33 @@ yield fp def tip(self): - return self.node(len(self.index) - 2) + return self.node(len(self.index) - 1) def __contains__(self, rev): return 0 <= rev < len(self) def __len__(self): - return len(self.index) - 1 + return len(self.index) def __iter__(self): - return iter(xrange(len(self))) + return iter(pycompat.xrange(len(self))) def revs(self, start=0, stop=None): """iterate over all rev in this revlog (from start to stop)""" step = 1 + length = len(self) if stop is not None: if start > stop: step = -1 stop += step + if stop > length: + stop = length else: - stop = len(self) - return xrange(start, stop, step) + stop = length + return pycompat.xrange(start, stop, step) @util.propertycache def nodemap(self): - self.rev(self.node(0)) + if self.index: + # populate mapping down to the initial node + node0 = self.index[0][7] # get around changelog filtering + self.rev(node0) return self._nodecache def hasnode(self, node): @@ -1141,10 +1186,10 @@ i = self.index p = self._nodepos if p is None: - p = len(i) - 2 + p = len(i) - 1 else: assert p < len(i) - for r in xrange(p, -1, -1): + for r in pycompat.xrange(p, -1, -1): v = i[r][7] n[v] = r if v == node: @@ -1711,11 +1756,6 @@ a, b = self.rev(a), self.rev(b) return self.isancestorrev(a, b) - def descendant(self, a, b): - msg = 'revlog.descendant is deprecated, use revlog.isancestorrev' - util.nouideprecwarn(msg, '4.7') - return self.isancestorrev(a, b) - def isancestorrev(self, a, b): """return True if revision a is an ancestor of revision b @@ -1796,8 +1836,8 @@ # parsers.c radix tree lookup gave multiple matches # fast path: for unfiltered changelog, radix tree is accurate if not getattr(self, 'filteredrevs', None): - raise LookupError(id, self.indexfile, - _('ambiguous identifier')) + raise AmbiguousPrefixLookupError(id, self.indexfile, + _('ambiguous identifier')) # fall through to slow path that filters hidden revisions except (AttributeError, ValueError): # we are pure python, or key was too short to search radix tree @@ -1814,12 +1854,14 @@ nl = [e[7] for e in self.index if e[7].startswith(prefix)] nl = [n for n in nl if hex(n).startswith(id) and self.hasnode(n)] + if nullhex.startswith(id): + nl.append(nullid) if len(nl) > 0: if len(nl) == 1 and not maybewdir: self._pcache[id] = nl[0] return nl[0] - raise LookupError(id, self.indexfile, - _('ambiguous identifier')) + raise AmbiguousPrefixLookupError(id, self.indexfile, + _('ambiguous identifier')) if maybewdir: raise error.WdirUnsupported return None @@ -2070,6 +2112,25 @@ else: return rev - 1 + def issnapshot(self, rev): + """tells whether rev is a snapshot + """ + if rev == nullrev: + return True + deltap = self.deltaparent(rev) + if deltap == nullrev: + return True + p1, p2 = self.parentrevs(rev) + if deltap in (p1, p2): + return False + return self.issnapshot(deltap) + + def snapshotdepth(self, rev): + """number of snapshot in the chain before this one""" + if not self.issnapshot(rev): + raise ProgrammingError('revision %d not a snapshot') + return len(self._deltachain(rev)[0]) - 1 + def revdiff(self, rev1, rev2): """return or calculate a delta between two revisions @@ -2254,7 +2315,9 @@ revlog has grown too large to be an inline revlog, it will convert it to use multiple index and data files. """ - if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline: + tiprev = len(self) - 1 + if (not self._inline or + (self.start(tiprev) + self.length(tiprev)) < _maxinline): return trinfo = tr.find(self.indexfile) @@ -2268,7 +2331,7 @@ else: # revlog was stripped at start of transaction, use all leftover data trindex = len(self) - 1 - dataoff = self.end(-2) + dataoff = self.end(tiprev) tr.add(self.datafile, dataoff) @@ -2454,6 +2517,11 @@ else: deltachain = [] + # search for the first non-snapshot revision + for idx, r in enumerate(deltachain): + if not self.issnapshot(r): + break + deltachain = deltachain[idx:] chunks = _slicechunk(self, deltachain, deltainfo) all_span = [_segmentspan(self, revs, deltainfo) for revs in chunks] distance = max(all_span) @@ -2471,9 +2539,45 @@ # certain size. Be also apply this tradeoff here and relax span # constraint for small enought content. maxdist = self._srmingapsize - if (distance > maxdist or deltainfo.deltalen > textlen or - deltainfo.compresseddeltalen > textlen * 2 or - (self._maxchainlen and deltainfo.chainlen > self._maxchainlen)): + + # Bad delta from read span: + # + # If the span of data read is larger than the maximum allowed. + if maxdist < distance: + return False + + # Bad delta from new delta size: + # + # If the delta size is larger than the target text, storing the + # delta will be inefficient. + if textlen < deltainfo.deltalen: + return False + + # Bad delta from cumulated payload size: + # + # If the sum of delta get larger than K * target text length. + if textlen * LIMIT_DELTA2TEXT < deltainfo.compresseddeltalen: + return False + + # Bad delta from chain length: + # + # If the number of delta in the chain gets too high. + if self._maxchainlen and self._maxchainlen < deltainfo.chainlen: + return False + + # bad delta from intermediate snapshot size limit + # + # If an intermediate snapshot size is higher than the limit. The + # limit exist to prevent endless chain of intermediate delta to be + # created. + if (deltainfo.snapshotdepth is not None and + (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen): + return False + + # bad delta if new intermediate snapshot is larger than the previous + # snapshot + if (deltainfo.snapshotdepth + and self.length(deltainfo.base) < deltainfo.deltalen): return False return True @@ -2550,14 +2654,14 @@ e = (offset_type(offset, flags), l, textlen, base, link, p1r, p2r, node) - self.index.insert(-1, e) + self.index.append(e) self.nodemap[node] = curr entry = self._io.packentry(e, self.node, self.version, curr) self._writeentry(transaction, ifh, dfh, entry, data, link, offset) if alwayscache and rawtext is None: - rawtext = deltacomputer._buildtext(revinfo, fh) + rawtext = deltacomputer.buildtext(revinfo, fh) if type(rawtext) == bytes: # only accept immutable objects self._cache = (node, curr, rawtext) @@ -2798,7 +2902,7 @@ self._cache = None self._chaininfocache = {} self._chunkclear() - for x in xrange(rev, len(self)): + for x in pycompat.xrange(rev, len(self)): del self.nodemap[self.node(x)] del self.index[rev:-1]
--- a/mercurial/revset.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/revset.py Mon Aug 20 09:48:08 2018 -0700 @@ -242,7 +242,7 @@ def listset(repo, subset, *xs, **opts): raise error.ParseError(_("can't use a list in this context"), - hint=_('see hg help "revsets.x or y"')) + hint=_('see \'hg help "revsets.x or y"\'')) def keyvaluepair(repo, subset, k, v, order): raise error.ParseError(_("can't use a key-value pair in this context"))
--- a/mercurial/revsetlang.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/revsetlang.py Mon Aug 20 09:48:08 2018 -0700 @@ -63,7 +63,7 @@ _syminitletters = set(pycompat.iterbytestr( string.ascii_letters.encode('ascii') + string.digits.encode('ascii') + - '._@')) | set(map(pycompat.bytechr, xrange(128, 256))) + '._@')) | set(map(pycompat.bytechr, pycompat.xrange(128, 256))) # default set of valid characters for non-initial letters of symbols _symletters = _syminitletters | set(pycompat.iterbytestr('-/'))
--- a/mercurial/scmutil.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/scmutil.py Mon Aug 20 09:48:08 2018 -0700 @@ -169,64 +169,64 @@ reason = _('timed out waiting for lock held by %r') % inst.locker else: reason = _('lock held by %r') % inst.locker - ui.warn(_("abort: %s: %s\n") - % (inst.desc or stringutil.forcebytestr(inst.filename), reason)) + ui.error(_("abort: %s: %s\n") % ( + inst.desc or stringutil.forcebytestr(inst.filename), reason)) if not inst.locker: - ui.warn(_("(lock might be very busy)\n")) + ui.error(_("(lock might be very busy)\n")) except error.LockUnavailable as inst: - ui.warn(_("abort: could not lock %s: %s\n") % - (inst.desc or stringutil.forcebytestr(inst.filename), - encoding.strtolocal(inst.strerror))) + ui.error(_("abort: could not lock %s: %s\n") % + (inst.desc or stringutil.forcebytestr(inst.filename), + encoding.strtolocal(inst.strerror))) except error.OutOfBandError as inst: if inst.args: msg = _("abort: remote error:\n") else: msg = _("abort: remote error\n") - ui.warn(msg) + ui.error(msg) if inst.args: - ui.warn(''.join(inst.args)) + ui.error(''.join(inst.args)) if inst.hint: - ui.warn('(%s)\n' % inst.hint) + ui.error('(%s)\n' % inst.hint) except error.RepoError as inst: - ui.warn(_("abort: %s!\n") % inst) + ui.error(_("abort: %s!\n") % inst) if inst.hint: - ui.warn(_("(%s)\n") % inst.hint) + ui.error(_("(%s)\n") % inst.hint) except error.ResponseError as inst: - ui.warn(_("abort: %s") % inst.args[0]) + ui.error(_("abort: %s") % inst.args[0]) msg = inst.args[1] if isinstance(msg, type(u'')): msg = pycompat.sysbytes(msg) if not isinstance(msg, bytes): - ui.warn(" %r\n" % (msg,)) + ui.error(" %r\n" % (msg,)) elif not msg: - ui.warn(_(" empty string\n")) + ui.error(_(" empty string\n")) else: - ui.warn("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg))) + ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg))) except error.CensoredNodeError as inst: - ui.warn(_("abort: file censored %s!\n") % inst) + ui.error(_("abort: file censored %s!\n") % inst) except error.RevlogError as inst: - ui.warn(_("abort: %s!\n") % inst) + ui.error(_("abort: %s!\n") % inst) except error.InterventionRequired as inst: - ui.warn("%s\n" % inst) + ui.error("%s\n" % inst) if inst.hint: - ui.warn(_("(%s)\n") % inst.hint) + ui.error(_("(%s)\n") % inst.hint) return 1 except error.WdirUnsupported: - ui.warn(_("abort: working directory revision cannot be specified\n")) + ui.error(_("abort: working directory revision cannot be specified\n")) except error.Abort as inst: - ui.warn(_("abort: %s\n") % inst) + ui.error(_("abort: %s\n") % inst) if inst.hint: - ui.warn(_("(%s)\n") % inst.hint) + ui.error(_("(%s)\n") % inst.hint) except ImportError as inst: - ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst)) + ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst)) m = stringutil.forcebytestr(inst).split()[-1] if m in "mpatch bdiff".split(): - ui.warn(_("(did you forget to compile extensions?)\n")) + ui.error(_("(did you forget to compile extensions?)\n")) elif m in "zlib".split(): - ui.warn(_("(is your Python install correct?)\n")) + ui.error(_("(is your Python install correct?)\n")) except IOError as inst: if util.safehasattr(inst, "code"): - ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst)) + ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst)) elif util.safehasattr(inst, "reason"): try: # usually it is in the form (errno, strerror) reason = inst.reason.args[1] @@ -236,34 +236,34 @@ if isinstance(reason, pycompat.unicode): # SSLError of Python 2.7.9 contains a unicode reason = encoding.unitolocal(reason) - ui.warn(_("abort: error: %s\n") % reason) + ui.error(_("abort: error: %s\n") % reason) elif (util.safehasattr(inst, "args") and inst.args and inst.args[0] == errno.EPIPE): pass elif getattr(inst, "strerror", None): if getattr(inst, "filename", None): - ui.warn(_("abort: %s: %s\n") % ( + ui.error(_("abort: %s: %s\n") % ( encoding.strtolocal(inst.strerror), stringutil.forcebytestr(inst.filename))) else: - ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) + ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) else: raise except OSError as inst: if getattr(inst, "filename", None) is not None: - ui.warn(_("abort: %s: '%s'\n") % ( + ui.error(_("abort: %s: '%s'\n") % ( encoding.strtolocal(inst.strerror), stringutil.forcebytestr(inst.filename))) else: - ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) + ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) except MemoryError: - ui.warn(_("abort: out of memory\n")) + ui.error(_("abort: out of memory\n")) except SystemExit as inst: # Commands shouldn't sys.exit directly, but give a return code. # Just in case catch this and and pass exit code to caller. return inst.code except socket.error as inst: - ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1])) + ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1])) return -1 @@ -437,41 +437,93 @@ return '%d:%s' % (rev, hexfunc(node)) def resolvehexnodeidprefix(repo, prefix): - # Uses unfiltered repo because it's faster when prefix is ambiguous/ - # This matches the shortesthexnodeidprefix() function below. - node = repo.unfiltered().changelog._partialmatch(prefix) + if (prefix.startswith('x') and + repo.ui.configbool('experimental', 'revisions.prefixhexnode')): + prefix = prefix[1:] + try: + # Uses unfiltered repo because it's faster when prefix is ambiguous/ + # This matches the shortesthexnodeidprefix() function below. + node = repo.unfiltered().changelog._partialmatch(prefix) + except error.AmbiguousPrefixLookupError: + revset = repo.ui.config('experimental', 'revisions.disambiguatewithin') + if revset: + # Clear config to avoid infinite recursion + configoverrides = {('experimental', + 'revisions.disambiguatewithin'): None} + with repo.ui.configoverride(configoverrides): + revs = repo.anyrevs([revset], user=True) + matches = [] + for rev in revs: + node = repo.changelog.node(rev) + if hex(node).startswith(prefix): + matches.append(node) + if len(matches) == 1: + return matches[0] + raise if node is None: return repo.changelog.rev(node) # make sure node isn't filtered return node -def shortesthexnodeidprefix(repo, node, minlength=1): - """Find the shortest unambiguous prefix that matches hexnode.""" +def mayberevnum(repo, prefix): + """Checks if the given prefix may be mistaken for a revision number""" + try: + i = int(prefix) + # if we are a pure int, then starting with zero will not be + # confused as a rev; or, obviously, if the int is larger + # than the value of the tip rev + if prefix[0:1] == b'0' or i > len(repo): + return False + return True + except ValueError: + return False + +def shortesthexnodeidprefix(repo, node, minlength=1, cache=None): + """Find the shortest unambiguous prefix that matches hexnode. + + If "cache" is not None, it must be a dictionary that can be used for + caching between calls to this method. + """ # _partialmatch() of filtered changelog could take O(len(repo)) time, # which would be unacceptably slow. so we look for hash collision in # unfiltered space, which means some hashes may be slightly longer. - cl = repo.unfiltered().changelog - - def isrev(prefix): - try: - i = int(prefix) - # if we are a pure int, then starting with zero will not be - # confused as a rev; or, obviously, if the int is larger - # than the value of the tip rev - if prefix[0:1] == b'0' or i > len(cl): - return False - return True - except ValueError: - return False def disambiguate(prefix): """Disambiguate against revnums.""" + if repo.ui.configbool('experimental', 'revisions.prefixhexnode'): + if mayberevnum(repo, prefix): + return 'x' + prefix + else: + return prefix + hexnode = hex(node) for length in range(len(prefix), len(hexnode) + 1): prefix = hexnode[:length] - if not isrev(prefix): + if not mayberevnum(repo, prefix): return prefix + cl = repo.unfiltered().changelog + revset = repo.ui.config('experimental', 'revisions.disambiguatewithin') + if revset: + revs = None + if cache is not None: + revs = cache.get('disambiguationrevset') + if revs is None: + revs = repo.anyrevs([revset], user=True) + if cache is not None: + cache['disambiguationrevset'] = revs + if cl.rev(node) in revs: + hexnode = hex(node) + for length in range(minlength, len(hexnode) + 1): + matches = [] + prefix = hexnode[:length] + for rev in revs: + otherhexnode = repo[rev].hex() + if prefix == otherhexnode[:length]: + matches.append(otherhexnode) + if len(matches) == 1: + return disambiguate(prefix) + try: return disambiguate(cl.shortest(node, minlength)) except error.LookupError: @@ -480,8 +532,8 @@ def isrevsymbol(repo, symbol): """Checks if a symbol exists in the repo. - See revsymbol() for details. Raises error.LookupError if the symbol is an - ambiguous nodeid prefix. + See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the + symbol is an ambiguous nodeid prefix. """ try: revsymbol(repo, symbol) @@ -780,7 +832,7 @@ return self._revcontains(self._torev(node)) def cleanupnodes(repo, replacements, operation, moves=None, metadata=None, - fixphase=False, targetphase=None): + fixphase=False, targetphase=None, backup=True): """do common cleanups when old nodes are replaced by new nodes That includes writing obsmarkers or stripping nodes, and moving bookmarks. @@ -905,7 +957,8 @@ from . import repair # avoid import cycle tostrip = list(replacements) if tostrip: - repair.delayedstrip(repo.ui, repo, tostrip, operation) + repair.delayedstrip(repo.ui, repo, tostrip, operation, + backup=backup) def addremove(repo, matcher, prefix, opts=None): if opts is None: @@ -952,9 +1005,11 @@ if repo.ui.verbose or not m.exact(abs): if abs in unknownset: status = _('adding %s\n') % m.uipath(abs) + label = 'addremove.added' else: status = _('removing %s\n') % m.uipath(abs) - repo.ui.status(status) + label = 'addremove.removed' + repo.ui.status(status, label=label) renames = _findrenames(repo, m, added + unknown, removed + deleted, similarity) @@ -1542,7 +1597,7 @@ @reportsummary def reportnewcs(repo, tr): """Report the range of new revisions pulled/unbundled.""" - newrevs = tr.changes.get('revs', xrange(0, 0)) + newrevs = tr.changes.get('revs', pycompat.xrange(0, 0)) if not newrevs: return @@ -1565,7 +1620,7 @@ """Report statistics of phase changes for changesets pre-existing pull/unbundle. """ - newrevs = tr.changes.get('revs', xrange(0, 0)) + newrevs = tr.changes.get('revs', pycompat.xrange(0, 0)) phasetracking = tr.changes.get('phases', {}) if not phasetracking: return
--- a/mercurial/server.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/server.py Mon Aug 20 09:48:08 2018 -0700 @@ -79,7 +79,7 @@ runargs.append('--daemon-postexec=unlink:%s' % lockpath) # Don't pass --cwd to the child process, because we've already # changed directory. - for i in xrange(1, len(runargs)): + for i in pycompat.xrange(1, len(runargs)): if runargs[i].startswith('--cwd='): del runargs[i] break
--- a/mercurial/setdiscovery.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/setdiscovery.py Mon Aug 20 09:48:08 2018 -0700 @@ -51,30 +51,25 @@ nullrev, ) from . import ( - dagutil, error, util, ) -def _updatesample(dag, nodes, sample, quicksamplesize=0): +def _updatesample(revs, heads, sample, parentfn, quicksamplesize=0): """update an existing sample to match the expected size - The sample is updated with nodes exponentially distant from each head of the - <nodes> set. (H~1, H~2, H~4, H~8, etc). + The sample is updated with revs exponentially distant from each head of the + <revs> set. (H~1, H~2, H~4, H~8, etc). If a target size is specified, the sampling will stop once this size is - reached. Otherwise sampling will happen until roots of the <nodes> set are + reached. Otherwise sampling will happen until roots of the <revs> set are reached. - :dag: a dag object from dagutil - :nodes: set of nodes we want to discover (if None, assume the whole dag) + :revs: set of revs we want to discover (if None, assume the whole dag) + :heads: set of DAG head revs :sample: a sample to update + :parentfn: a callable to resolve parents for a revision :quicksamplesize: optional target size of the sample""" - # if nodes is empty we scan the entire graph - if nodes: - heads = dag.headsetofconnecteds(nodes) - else: - heads = dag.heads() dist = {} visit = collections.deque(heads) seen = set() @@ -91,37 +86,69 @@ if quicksamplesize and (len(sample) >= quicksamplesize): return seen.add(curr) - for p in dag.parents(curr): - if not nodes or p in nodes: + + for p in parentfn(curr): + if p != nullrev and (not revs or p in revs): dist.setdefault(p, d + 1) visit.append(p) -def _takequicksample(dag, nodes, size): +def _takequicksample(repo, headrevs, revs, size): """takes a quick sample of size <size> It is meant for initial sampling and focuses on querying heads and close ancestors of heads. :dag: a dag object - :nodes: set of nodes to discover + :headrevs: set of head revisions in local DAG to consider + :revs: set of revs to discover :size: the maximum size of the sample""" - sample = dag.headsetofconnecteds(nodes) + sample = set(repo.revs('heads(%ld)', revs)) + if len(sample) >= size: return _limitsample(sample, size) - _updatesample(dag, None, sample, quicksamplesize=size) + + _updatesample(None, headrevs, sample, repo.changelog.parentrevs, + quicksamplesize=size) return sample -def _takefullsample(dag, nodes, size): - sample = dag.headsetofconnecteds(nodes) +def _takefullsample(repo, headrevs, revs, size): + sample = set(repo.revs('heads(%ld)', revs)) + # update from heads - _updatesample(dag, nodes, sample) + revsheads = set(repo.revs('heads(%ld)', revs)) + _updatesample(revs, revsheads, sample, repo.changelog.parentrevs) + # update from roots - _updatesample(dag.inverse(), nodes, sample) + revsroots = set(repo.revs('roots(%ld)', revs)) + + # _updatesample() essentially does interaction over revisions to look up + # their children. This lookup is expensive and doing it in a loop is + # quadratic. We precompute the children for all relevant revisions and + # make the lookup in _updatesample() a simple dict lookup. + # + # Because this function can be called multiple times during discovery, we + # may still perform redundant work and there is room to optimize this by + # keeping a persistent cache of children across invocations. + children = {} + + parentrevs = repo.changelog.parentrevs + for rev in repo.changelog.revs(start=min(revsroots)): + # Always ensure revision has an entry so we don't need to worry about + # missing keys. + children.setdefault(rev, []) + + for prev in parentrevs(rev): + if prev == nullrev: + continue + + children.setdefault(prev, []).append(rev) + + _updatesample(revs, revsroots, sample, children.__getitem__) assert sample sample = _limitsample(sample, size) if len(sample) < size: more = size - len(sample) - sample.update(random.sample(list(nodes - sample), more)) + sample.update(random.sample(list(revs - sample), more)) return sample def _limitsample(sample, desiredlen): @@ -142,16 +169,17 @@ roundtrips = 0 cl = local.changelog - localsubset = None + clnode = cl.node + clrev = cl.rev + if ancestorsof is not None: - rev = local.changelog.rev - localsubset = [rev(n) for n in ancestorsof] - dag = dagutil.revlogdag(cl, localsubset=localsubset) + ownheads = [clrev(n) for n in ancestorsof] + else: + ownheads = [rev for rev in cl.headrevs() if rev != nullrev] # early exit if we know all the specified remote heads already ui.debug("query 1; heads\n") roundtrips += 1 - ownheads = dag.heads() sample = _limitsample(ownheads, initialsamplesize) # indices between sample and externalized version must match sample = list(sample) @@ -159,7 +187,7 @@ with remote.commandexecutor() as e: fheads = e.callcommand('heads', {}) fknown = e.callcommand('known', { - 'nodes': dag.externalizeall(sample), + 'nodes': [clnode(r) for r in sample], }) srvheadhashes, yesno = fheads.result(), fknown.result() @@ -173,15 +201,25 @@ # compatibility reasons) ui.status(_("searching for changes\n")) - srvheads = dag.internalizeall(srvheadhashes, filterunknown=True) + srvheads = [] + for node in srvheadhashes: + if node == nullid: + continue + + try: + srvheads.append(clrev(node)) + # Catches unknown and filtered nodes. + except error.LookupError: + continue + if len(srvheads) == len(srvheadhashes): ui.debug("all remote heads known locally\n") - return (srvheadhashes, False, srvheadhashes,) + return srvheadhashes, False, srvheadhashes if len(sample) == len(ownheads) and all(yesno): ui.note(_("all local heads known remotely\n")) - ownheadhashes = dag.externalizeall(ownheads) - return (ownheadhashes, True, srvheadhashes,) + ownheadhashes = [clnode(r) for r in ownheads] + return ownheadhashes, True, srvheadhashes # full blown discovery @@ -202,7 +240,12 @@ if sample: missinginsample = [n for i, n in enumerate(sample) if not yesno[i]] - missing.update(dag.descendantset(missinginsample, missing)) + + if missing: + missing.update(local.revs('descendants(%ld) - descendants(%ld)', + missinginsample, missing)) + else: + missing.update(local.revs('descendants(%ld)', missinginsample)) undecided.difference_update(missing) @@ -224,7 +267,7 @@ if len(undecided) < targetsize: sample = list(undecided) else: - sample = samplefunc(dag, undecided, targetsize) + sample = samplefunc(local, ownheads, undecided, targetsize) roundtrips += 1 progress.update(roundtrips) @@ -235,7 +278,7 @@ with remote.commandexecutor() as e: yesno = e.callcommand('known', { - 'nodes': dag.externalizeall(sample), + 'nodes': [clnode(r) for r in sample], }).result() full = True @@ -247,10 +290,8 @@ # heads(common) == heads(common.bases) since common represents common.bases # and all its ancestors - result = dag.headsetofconnecteds(common.bases) - # common.bases can include nullrev, but our contract requires us to not - # return any heads in that case, so discard that - result.discard(nullrev) + # The presence of nullrev will confuse heads(). So filter it out. + result = set(local.revs('heads(%ld)', common.bases - {nullrev})) elapsed = util.timer() - start progress.complete() ui.debug("%d total queries in %.4fs\n" % (roundtrips, elapsed)) @@ -268,4 +309,5 @@ return ({nullid}, True, srvheadhashes,) anyincoming = (srvheadhashes != [nullid]) - return dag.externalizeall(result), anyincoming, srvheadhashes + result = {clnode(r) for r in result} + return result, anyincoming, srvheadhashes
--- a/mercurial/simplemerge.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/simplemerge.py Mon Aug 20 09:48:08 2018 -0700 @@ -58,7 +58,8 @@ """ if (aend - astart) != (bend - bstart): return False - for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)): + for ia, ib in zip(pycompat.xrange(astart, aend), + pycompat.xrange(bstart, bend)): if a[ia] != b[ib]: return False else:
--- a/mercurial/smartset.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/smartset.py Mon Aug 20 09:48:08 2018 -0700 @@ -152,11 +152,11 @@ # but start > stop is allowed, which should be an empty set. ys = [] it = iter(self) - for x in xrange(start): + for x in pycompat.xrange(start): y = next(it, None) if y is None: break - for x in xrange(stop - start): + for x in pycompat.xrange(stop - start): y = next(it, None) if y is None: break @@ -1005,13 +1005,13 @@ return self.fastdesc() def fastasc(self): - iterrange = xrange(self._start, self._end) + iterrange = pycompat.xrange(self._start, self._end) if self._hiddenrevs: return self._iterfilter(iterrange) return iter(iterrange) def fastdesc(self): - iterrange = xrange(self._end - 1, self._start - 1, -1) + iterrange = pycompat.xrange(self._end - 1, self._start - 1, -1) if self._hiddenrevs: return self._iterfilter(iterrange) return iter(iterrange)
--- a/mercurial/sparse.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/sparse.py Mon Aug 20 09:48:08 2018 -0700 @@ -31,9 +31,11 @@ # a per-repo option, possibly a repo requirement. enabled = False -def parseconfig(ui, raw): +def parseconfig(ui, raw, action): """Parse sparse config file content. + action is the command which is trigerring this read, can be narrow, sparse + Returns a tuple of includes, excludes, and profiles. """ includes = set() @@ -54,8 +56,8 @@ elif line == '[include]': if havesection and current != includes: # TODO pass filename into this API so we can report it. - raise error.Abort(_('sparse config cannot have includes ' + - 'after excludes')) + raise error.Abort(_('%(action)s config cannot have includes ' + 'after excludes') % {'action': action}) havesection = True current = includes continue @@ -64,14 +66,16 @@ current = excludes elif line: if current is None: - raise error.Abort(_('sparse config entry outside of ' - 'section: %s') % line, + raise error.Abort(_('%(action)s config entry outside of ' + 'section: %(line)s') + % {'action': action, 'line': line}, hint=_('add an [include] or [exclude] line ' 'to declare the entry type')) if line.strip().startswith('/'): - ui.warn(_('warning: sparse profile cannot use' + - ' paths starting with /, ignoring %s\n') % line) + ui.warn(_('warning: %(action)s profile cannot use' + ' paths starting with /, ignoring %(line)s\n') + % {'action': action, 'line': line}) continue current.add(line) @@ -102,7 +106,7 @@ raise error.Abort(_('cannot parse sparse patterns from working ' 'directory')) - includes, excludes, profiles = parseconfig(repo.ui, raw) + includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse') ctx = repo[rev] if profiles: @@ -128,7 +132,7 @@ repo.ui.debug(msg) continue - pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw) + pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw, 'sparse') includes.update(pincludes) excludes.update(pexcludes) profiles.update(subprofs) @@ -516,7 +520,7 @@ force=False, removing=False): """Update the sparse config and working directory state.""" raw = repo.vfs.tryread('sparse') - oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw) + oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, 'sparse') oldstatus = repo.status() oldmatch = matcher(repo) @@ -556,7 +560,7 @@ """ with repo.wlock(): raw = repo.vfs.tryread('sparse') - includes, excludes, profiles = parseconfig(repo.ui, raw) + includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse') if not includes and not excludes: return @@ -572,7 +576,7 @@ with repo.wlock(): # read current configuration raw = repo.vfs.tryread('sparse') - includes, excludes, profiles = parseconfig(repo.ui, raw) + includes, excludes, profiles = parseconfig(repo.ui, raw, 'sparse') aincludes, aexcludes, aprofiles = activeconfig(repo) # Import rules on top; only take in rules that are not yet @@ -582,7 +586,8 @@ with util.posixfile(util.expandpath(p), mode='rb') as fh: raw = fh.read() - iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw) + iincludes, iexcludes, iprofiles = parseconfig(repo.ui, raw, + 'sparse') oldsize = len(includes) + len(excludes) + len(profiles) includes.update(iincludes - aincludes) excludes.update(iexcludes - aexcludes) @@ -615,7 +620,8 @@ """ with repo.wlock(): raw = repo.vfs.tryread('sparse') - oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw) + oldinclude, oldexclude, oldprofiles = parseconfig(repo.ui, raw, + 'sparse') if reset: newinclude = set()
--- a/mercurial/statprof.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/statprof.py Mon Aug 20 09:48:08 2018 -0700 @@ -356,7 +356,7 @@ stack = sample.stack sites = ['\1'.join([s.path, str(s.lineno), s.function]) for s in stack] - file.write(time + '\0' + '\0'.join(sites) + '\n') + file.write("%s\0%s\n" % (time, '\0'.join(sites))) def load_data(path): lines = open(path, 'r').read().splitlines()
--- a/mercurial/store.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/store.py Mon Aug 20 09:48:08 2018 -0700 @@ -118,7 +118,7 @@ def decode(s): i = 0 while i < len(s): - for l in xrange(1, 4): + for l in pycompat.xrange(1, 4): try: yield dmap[s[i:i + l]] i += l @@ -127,7 +127,8 @@ pass else: raise KeyError - return (lambda s: ''.join([cmap[s[c:c + 1]] for c in xrange(len(s))]), + return (lambda s: ''.join([cmap[s[c:c + 1]] + for c in pycompat.xrange(len(s))]), lambda s: ''.join(list(decode(s)))) _encodefname, _decodefname = _buildencodefun() @@ -159,7 +160,7 @@ 'the~07quick~adshot' ''' xchr = pycompat.bytechr - cmap = dict([(xchr(x), xchr(x)) for x in xrange(127)]) + cmap = dict([(xchr(x), xchr(x)) for x in pycompat.xrange(127)]) for x in _reserved(): cmap[xchr(x)] = "~%02x" % x for x in range(ord("A"), ord("Z") + 1): @@ -316,8 +317,8 @@ mode = None return mode -_data = ('data meta 00manifest.d 00manifest.i 00changelog.d 00changelog.i' - ' phaseroots obsstore') +_data = ('narrowspec data meta 00manifest.d 00manifest.i' + ' 00changelog.d 00changelog.i phaseroots obsstore') def isrevlog(f, kind, st): return kind == stat.S_IFREG and f[-2:] in ('.i', '.d') @@ -545,7 +546,7 @@ raise def copylist(self): - d = ('data meta dh fncache phaseroots obsstore' + d = ('narrowspec data meta dh fncache phaseroots obsstore' ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i') return (['requires', '00changelog.i'] + ['store/' + f for f in d.split()])
--- a/mercurial/streamclone.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/streamclone.py Mon Aug 20 09:48:08 2018 -0700 @@ -358,7 +358,7 @@ with repo.transaction('clone'): with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): - for i in xrange(filecount): + for i in pycompat.xrange(filecount): # XXX doesn't support '\n' or '\r' in filenames l = fp.readline() try:
--- a/mercurial/templatefilters.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templatefilters.py Mon Aug 20 09:48:08 2018 -0700 @@ -119,7 +119,7 @@ b = b[:len(a)] if a == b: return a - for i in xrange(len(a)): + for i in pycompat.xrange(len(a)): if a[i] != b[i]: return a[:i] return a @@ -266,7 +266,7 @@ num_lines = len(lines) endswithnewline = text[-1:] == '\n' def indenter(): - for i in xrange(num_lines): + for i in pycompat.xrange(num_lines): l = lines[i] if i and l.strip(): yield prefix
--- a/mercurial/templatefuncs.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templatefuncs.py Mon Aug 20 09:48:08 2018 -0700 @@ -596,7 +596,7 @@ yield sep yield argstr -@templatefunc('shortest(node, minlength=4)', requires={'repo'}) +@templatefunc('shortest(node, minlength=4)', requires={'repo', 'cache'}) def shortest(context, mapping, args): """Obtain the shortest representation of a node.""" @@ -629,8 +629,9 @@ return hexnode if not node: return hexnode + cache = context.resource(mapping, 'cache') try: - return scmutil.shortesthexnodeidprefix(repo, node, minlength) + return scmutil.shortesthexnodeidprefix(repo, node, minlength, cache) except error.RepoLookupError: return hexnode
--- a/mercurial/templatekw.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templatekw.py Mon Aug 20 09:48:08 2018 -0700 @@ -168,9 +168,8 @@ @templatekeyword('author', requires={'ctx'}) def showauthor(context, mapping): - """String. The unmodified author of the changeset.""" - ctx = context.resource(mapping, 'ctx') - return ctx.user() + """Alias for ``{user}``""" + return showuser(context, mapping) @templatekeyword('bisect', requires={'repo', 'ctx'}) def showbisect(context, mapping): @@ -293,15 +292,14 @@ lambda k: '%s=%s' % (k, stringutil.escapestr(extras[k]))) def _showfilesbystat(context, mapping, name, index): - repo = context.resource(mapping, 'repo') ctx = context.resource(mapping, 'ctx') revcache = context.resource(mapping, 'revcache') if 'files' not in revcache: - revcache['files'] = repo.status(ctx.p1(), ctx)[:3] + revcache['files'] = ctx.p1().status(ctx)[:3] files = revcache['files'][index] return compatlist(context, mapping, name, files, element='file') -@templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache'}) +@templatekeyword('file_adds', requires={'ctx', 'revcache'}) def showfileadds(context, mapping): """List of strings. Files added by this changeset.""" return _showfilesbystat(context, mapping, 'file_add', 1) @@ -345,12 +343,12 @@ key='name', value='source', fmt='%s (%s)', plural='file_copies') -@templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache'}) +@templatekeyword('file_dels', requires={'ctx', 'revcache'}) def showfiledels(context, mapping): """List of strings. Files removed by this changeset.""" return _showfilesbystat(context, mapping, 'file_del', 2) -@templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache'}) +@templatekeyword('file_mods', requires={'ctx', 'revcache'}) def showfilemods(context, mapping): """List of strings. Files modified by this changeset.""" return _showfilesbystat(context, mapping, 'file_mod', 0) @@ -758,6 +756,12 @@ ui = context.resource(mapping, 'ui') return ui.termwidth() +@templatekeyword('user', requires={'ctx'}) +def showuser(context, mapping): + """String. The unmodified author of the changeset.""" + ctx = context.resource(mapping, 'ctx') + return ctx.user() + @templatekeyword('instabilities', requires={'ctx'}) def showinstabilities(context, mapping): """List of strings. Evolution instabilities affecting the changeset.
--- a/mercurial/templates/map-cmdline.bisect Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templates/map-cmdline.bisect Mon Aug 20 09:48:08 2018 -0700 @@ -1,10 +1,10 @@ %include map-cmdline.default [templates] -changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}\n' +changeset = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}\n' changeset_quiet = '{lshortbisect} {rev}:{node|short}\n' -changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n' -changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n' +changeset_verbose = '{cset}{lbisect}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n' +changeset_debug = '{fullcset}{lbisect}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n' # We take the zeroth word in order to omit "(implicit)" in the label bisectlabel = ' bisect.{word('0', bisect)}'
--- a/mercurial/templates/map-cmdline.default Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templates/map-cmdline.default Mon Aug 20 09:48:08 2018 -0700 @@ -2,10 +2,10 @@ # to replace some keywords with 'lkeyword', for 'labelled keyword' [templates] -changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{summary}\n' +changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{summary}\n' changeset_quiet = '{lnode}' -changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n' -changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n' +changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{ltroubles}{lobsfate}{lfiles}{lfile_copies_switch}{description}\n' +changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{ltroubles}{lobsfate}{lfile_mods}{lfile_adds}{lfile_dels}{lfile_copies_switch}{extras}{description}\n' # File templates lfiles = '{if(files, @@ -54,8 +54,8 @@ bookmark = '{label("log.bookmark", "bookmark: {bookmark}")}\n' -user = '{label("log.user", - "user: {author}")}\n' +luser = '{label("log.user", + "user: {author}")}\n' summary = '{if(desc|strip, "{label('log.summary', 'summary: {desc|firstline}')}\n")}'
--- a/mercurial/templates/map-cmdline.phases Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templates/map-cmdline.phases Mon Aug 20 09:48:08 2018 -0700 @@ -1,5 +1,5 @@ %include map-cmdline.default [templates] -changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{summary}\n' -changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{user}{ldate}{lfiles}{lfile_copies_switch}{description}\n' +changeset = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{summary}\n' +changeset_verbose = '{cset}{branches}{bookmarks}{tags}{lphase}{parents}{luser}{ldate}{lfiles}{lfile_copies_switch}{description}\n'
--- a/mercurial/templates/map-cmdline.status Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templates/map-cmdline.status Mon Aug 20 09:48:08 2018 -0700 @@ -2,9 +2,9 @@ [templates] # Override base templates -changeset = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{summary}{lfiles}\n' -changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{user}{ldate}{description}{lfiles}\n' -changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{user}{ldate}{extras}{description}{lfiles}\n' +changeset = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{summary}{lfiles}\n' +changeset_verbose = '{cset}{branches}{bookmarks}{tags}{parents}{luser}{ldate}{description}{lfiles}\n' +changeset_debug = '{fullcset}{branches}{bookmarks}{tags}{lphase}{parents}{manifest}{luser}{ldate}{extras}{description}{lfiles}\n' # Override the file templates lfiles = '{if(files,
--- a/mercurial/templateutil.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/templateutil.py Mon Aug 20 09:48:08 2018 -0700 @@ -810,8 +810,9 @@ return data def _recursivesymbolblocker(key): - def showrecursion(**args): + def showrecursion(context, mapping): raise error.Abort(_("recursive reference '%s' in template") % key) + showrecursion._requires = () # mark as new-style templatekw return showrecursion def runsymbol(context, mapping, key, default=''): @@ -827,12 +828,16 @@ v = default if callable(v) and getattr(v, '_requires', None) is None: # old templatekw: expand all keywords and resources - # (TODO: deprecate this after porting web template keywords to new API) + # (TODO: drop support for old-style functions. 'f._requires = ()' + # can be removed.) props = {k: context._resources.lookup(context, mapping, k) for k in context._resources.knownkeys()} # pass context to _showcompatlist() through templatekw._showlist() props['templ'] = context props.update(mapping) + ui = props.get('ui') + if ui: + ui.deprecwarn("old-style template keyword '%s'" % key, '4.8') return v(**pycompat.strkwargs(props)) if callable(v): # new templatekw
--- a/mercurial/treediscovery.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/treediscovery.py Mon Aug 20 09:48:08 2018 -0700 @@ -16,6 +16,7 @@ ) from . import ( error, + pycompat, ) def findcommonincoming(repo, remote, heads=None, force=False): @@ -111,7 +112,7 @@ progress.increment() repo.ui.debug("request %d: %s\n" % (reqcnt, " ".join(map(short, r)))) - for p in xrange(0, len(r), 10): + for p in pycompat.xrange(0, len(r), 10): with remote.commandexecutor() as e: branches = e.callcommand('branches', { 'nodes': r[p:p + 10],
--- a/mercurial/ui.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/ui.py Mon Aug 20 09:48:08 2018 -0700 @@ -67,6 +67,9 @@ update.check = noconflict # Show conflicts information in `hg status` status.verbose = True +# Refuse to perform `hg resolve --mark` on files that still have conflict +# markers +resolve.mark-check = abort [diff] git = 1 @@ -392,7 +395,7 @@ def readconfig(self, filename, root=None, trust=False, sections=None, remap=None): try: - fp = open(filename, u'rb') + fp = open(filename, r'rb') except IOError: if not sections: # ignore unless we were looking for something return @@ -1420,6 +1423,7 @@ return getpass.getpass('') except EOFError: raise error.ResponseExpected() + def status(self, *msg, **opts): '''write status message to output (if ui.quiet is False) @@ -1428,6 +1432,7 @@ if not self.quiet: opts[r'label'] = opts.get(r'label', '') + ' ui.status' self.write(*msg, **opts) + def warn(self, *msg, **opts): '''write warning message to output (stderr) @@ -1435,6 +1440,15 @@ ''' opts[r'label'] = opts.get(r'label', '') + ' ui.warning' self.write_err(*msg, **opts) + + def error(self, *msg, **opts): + '''write error message to output (stderr) + + This adds an output label of "ui.error". + ''' + opts[r'label'] = opts.get(r'label', '') + ' ui.error' + self.write_err(*msg, **opts) + def note(self, *msg, **opts): '''write note to output (if ui.verbose is True) @@ -1443,6 +1457,7 @@ if self.verbose: opts[r'label'] = opts.get(r'label', '') + ' ui.note' self.write(*msg, **opts) + def debug(self, *msg, **opts): '''write debug message to output (if ui.debugflag is True)
--- a/mercurial/unionrepo.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/unionrepo.py Mon Aug 20 09:48:08 2018 -0700 @@ -73,7 +73,7 @@ # I have no idea if csize is valid in the base revlog context. e = (flags, None, rsize, base, link, self.rev(p1node), self.rev(p2node), node) - self.index.insert(-1, e) + self.index.append(e) self.nodemap[node] = n self.bundlerevs.add(n) n += 1
--- a/mercurial/util.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/util.py Mon Aug 20 09:48:08 2018 -0700 @@ -36,6 +36,9 @@ import warnings import zlib +from .thirdparty import ( + attr, +) from . import ( encoding, error, @@ -945,12 +948,12 @@ self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res)) - def setsockopt(self, level, optname, value): + def setsockopt(self, res, level, optname, value): if not self.states: return self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % ( - self.name, level, optname, value)) + self.name, level, optname, value, res)) def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True, logdata=False, logdataapis=True): @@ -2874,7 +2877,43 @@ (1, 0.000000001, _('%.3f ns')), ) -_timenesting = [0] +@attr.s +class timedcmstats(object): + """Stats information produced by the timedcm context manager on entering.""" + + # the starting value of the timer as a float (meaning and resulution is + # platform dependent, see util.timer) + start = attr.ib(default=attr.Factory(lambda: timer())) + # the number of seconds as a floating point value; starts at 0, updated when + # the context is exited. + elapsed = attr.ib(default=0) + # the number of nested timedcm context managers. + level = attr.ib(default=1) + + def __bytes__(self): + return timecount(self.elapsed) if self.elapsed else '<unknown>' + + __str__ = encoding.strmethod(__bytes__) + +@contextlib.contextmanager +def timedcm(): + """A context manager that produces timing information for a given context. + + On entering a timedcmstats instance is produced. + + This context manager is reentrant. + + """ + # track nested context managers + timedcm._nested += 1 + timing_stats = timedcmstats(level=timedcm._nested) + try: + yield timing_stats + finally: + timing_stats.elapsed = timer() - timing_stats.start + timedcm._nested -= 1 + +timedcm._nested = 0 def timed(func): '''Report the execution time of a function call to stderr. @@ -2888,18 +2927,13 @@ ''' def wrapper(*args, **kwargs): - start = timer() - indent = 2 - _timenesting[0] += indent - try: - return func(*args, **kwargs) - finally: - elapsed = timer() - start - _timenesting[0] -= indent - stderr = procutil.stderr - stderr.write('%s%s: %s\n' % - (' ' * _timenesting[0], func.__name__, - timecount(elapsed))) + with timedcm() as time_stats: + result = func(*args, **kwargs) + stderr = procutil.stderr + stderr.write('%s%s: %s\n' % ( + ' ' * time_stats.level * 2, pycompat.bytestr(func.__name__), + time_stats)) + return result return wrapper _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
--- a/mercurial/utils/stringutil.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/utils/stringutil.py Mon Aug 20 09:48:08 2018 -0700 @@ -59,6 +59,9 @@ '%s: %s' % (pprint(k, bprefix=bprefix), pprint(v, bprefix=bprefix)) for k, v in sorted(o.items()))) + elif isinstance(o, set): + return 'set([%s])' % (b', '.join( + pprint(k, bprefix=bprefix) for k in sorted(o))) elif isinstance(o, tuple): return '(%s)' % (b', '.join(pprint(a, bprefix=bprefix) for a in o)) else: @@ -111,7 +114,7 @@ elif callable(r): return r() else: - return pycompat.byterepr(r) + return pprint(r) def binary(s): """return true if a string is binary data""" @@ -424,6 +427,8 @@ return encoding.trim(text, maxlength, ellipsis='...') def escapestr(s): + if isinstance(s, memoryview): + s = bytes(s) # call underlying function of s.encode('string_escape') directly for # Python 3 compatibility return codecs.escape_encode(s)[0] @@ -464,7 +469,7 @@ def _cutdown(self, ucstr, space_left): l = 0 colwidth = encoding.ucolwidth - for i in xrange(len(ucstr)): + for i in pycompat.xrange(len(ucstr)): l += colwidth(ucstr[i]) if space_left < l: return (ucstr[:i], ucstr[i:])
--- a/mercurial/win32.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/win32.py Mon Aug 20 09:48:08 2018 -0700 @@ -615,7 +615,7 @@ # callers to recreate f immediately while having other readers do their # implicit zombie filename blocking on a temporary name. - for tries in xrange(10): + for tries in pycompat.xrange(10): temp = '%s-%08x' % (f, random.randint(0, 0xffffffff)) try: os.rename(f, temp) # raises OSError EEXIST if temp exists
--- a/mercurial/wireprotoserver.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/wireprotoserver.py Mon Aug 20 09:48:08 2018 -0700 @@ -502,14 +502,14 @@ def getargs(self, args): data = {} keys = args.split() - for n in xrange(len(keys)): + for n in pycompat.xrange(len(keys)): argline = self._fin.readline()[:-1] arg, l = argline.split() if arg not in keys: raise error.Abort(_("unexpected parameter %r") % arg) if arg == '*': star = {} - for k in xrange(int(l)): + for k in pycompat.xrange(int(l)): argline = self._fin.readline()[:-1] arg, l = argline.split() val = self._fin.read(int(l))
--- a/mercurial/wireprotov1peer.py Sun Aug 19 13:27:02 2018 +0900 +++ b/mercurial/wireprotov1peer.py Mon Aug 20 09:48:08 2018 -0700 @@ -497,7 +497,7 @@ def between(self, pairs): batch = 8 # avoid giant requests r = [] - for i in xrange(0, len(pairs), batch): + for i in pycompat.xrange(0, len(pairs), batch): n = " ".join([wireprototypes.encodelist(p, '-') for p in pairs[i:i + batch]]) d = self._call("between", pairs=n)
--- a/tests/dummysmtpd.py Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/dummysmtpd.py Mon Aug 20 09:48:08 2018 -0700 @@ -26,7 +26,7 @@ def __init__(self, localaddr): smtpd.SMTPServer.__init__(self, localaddr, remoteaddr=None) - def process_message(self, peer, mailfrom, rcpttos, data): + def process_message(self, peer, mailfrom, rcpttos, data, **kwargs): log('%s from=%s to=%s\n' % (peer[0], mailfrom, ', '.join(rcpttos))) def handle_error(self):
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/printrevset.py Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,41 @@ +from __future__ import absolute_import +from mercurial import ( + cmdutil, + commands, + extensions, + logcmdutil, + revsetlang, + smartset, +) + +from mercurial.utils import ( + stringutil, +) + +def logrevset(repo, pats, opts): + revs = logcmdutil._initialrevs(repo, opts) + if not revs: + return None + match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts) + return logcmdutil._makerevset(repo, match, pats, slowpath, opts) + +def uisetup(ui): + def printrevset(orig, repo, pats, opts): + revs, filematcher = orig(repo, pats, opts) + if opts.get(b'print_revset'): + expr = logrevset(repo, pats, opts) + if expr: + tree = revsetlang.parse(expr) + tree = revsetlang.analyze(tree) + else: + tree = [] + ui = repo.ui + ui.write(b'%s\n' % stringutil.pprint(opts.get(b'rev', []))) + ui.write(revsetlang.prettyformat(tree) + b'\n') + ui.write(stringutil.prettyrepr(revs) + b'\n') + revs = smartset.baseset() # display no revisions + return revs, filematcher + extensions.wrapfunction(logcmdutil, 'getrevs', printrevset) + aliases, entry = cmdutil.findcmd(b'log', commands.table) + entry[1].append((b'', b'print-revset', False, + b'print generated revset and exit (DEPRECATED)'))
--- a/tests/run-tests.py Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/run-tests.py Mon Aug 20 09:48:08 2018 -0700 @@ -285,12 +285,12 @@ If path does not exist, return an empty set. """ - cases = set() + cases = [] try: with open(path, 'rb') as f: for l in f: if l.startswith(b'#testcases '): - cases.update(l[11:].split()) + cases.append(sorted(l[11:].split())) except IOError as ex: if ex.errno != errno.ENOENT: raise @@ -1068,6 +1068,7 @@ env["HGUSER"] = "test" env["HGENCODING"] = "ascii" env["HGENCODINGMODE"] = "strict" + env["HGHOSTNAME"] = "test-hostname" env['HGIPV6'] = str(int(self._useipv6)) extraextensions = [] @@ -1242,14 +1243,15 @@ def __init__(self, path, *args, **kwds): # accept an extra "case" parameter - case = kwds.pop('case', None) + case = kwds.pop('case', []) self._case = case - self._allcases = parsettestcases(path) + self._allcases = {x for y in parsettestcases(path) for x in y} super(TTest, self).__init__(path, *args, **kwds) if case: - self.name = '%s#%s' % (self.name, _strpath(case)) - self.errpath = b'%s.%s.err' % (self.errpath[:-4], case) - self._tmpname += b'-%s' % case + casepath = b'#'.join(case) + self.name = '%s#%s' % (self.name, _strpath(casepath)) + self.errpath = b'%s#%s.err' % (self.errpath[:-4], casepath) + self._tmpname += b'-%s' % casepath self._have = {} @property @@ -1323,10 +1325,10 @@ reqs = [] for arg in args: if arg.startswith(b'no-') and arg[3:] in self._allcases: - if arg[3:] == self._case: + if arg[3:] in self._case: return False elif arg in self._allcases: - if arg != self._case: + if arg not in self._case: return False else: reqs.append(arg) @@ -1370,10 +1372,11 @@ if os.getenv('MSYSTEM'): script.append(b'alias pwd="pwd -W"\n') if self._case: + casestr = b'#'.join(self._case) if isinstance(self._case, str): - quoted = shellquote(self._case) + quoted = shellquote(casestr) else: - quoted = shellquote(self._case.decode('utf8')).encode('utf8') + quoted = shellquote(casestr.decode('utf8')).encode('utf8') script.append(b'TESTCASE=%s\n' % quoted) script.append(b'export TESTCASE\n') @@ -2666,31 +2669,42 @@ expanded_args.append(arg) args = expanded_args - testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.]+))') + testcasepattern = re.compile(br'([\w-]+\.t|py)(#([a-zA-Z0-9_\-\.#]+))') tests = [] for t in args: - case = None + case = [] if not (os.path.basename(t).startswith(b'test-') and (t.endswith(b'.py') or t.endswith(b'.t'))): m = testcasepattern.match(t) if m is not None: - t, _, case = m.groups() + t, _, casestr = m.groups() + if casestr: + case = casestr.split(b'#') else: continue if t.endswith(b'.t'): # .t file may contain multiple test cases - cases = sorted(parsettestcases(t)) - if cases: - if case is not None and case in cases: - tests += [{'path': t, 'case': case}] - elif case is not None and case not in cases: + casedimensions = parsettestcases(t) + if casedimensions: + cases = [] + def addcases(case, casedimensions): + if not casedimensions: + cases.append(case) + else: + for c in casedimensions[0]: + addcases(case + [c], casedimensions[1:]) + addcases([], casedimensions) + if case and case in cases: + cases = [case] + elif case: # Ignore invalid cases - pass + cases = [] else: - tests += [{'path': t, 'case': c} for c in sorted(cases)] + pass + tests += [{'path': t, 'case': c} for c in sorted(cases)] else: tests.append({'path': t}) else: @@ -2701,7 +2715,7 @@ def _reloadtest(test, i): # convert a test back to its description dict desc = {'path': test.path} - case = getattr(test, '_case', None) + case = getattr(test, '_case', []) if case: desc['case'] = case return self._gettest(desc, i) @@ -2713,7 +2727,8 @@ desc = testdescs[0] # desc['path'] is a relative path if 'case' in desc: - errpath = b'%s.%s.err' % (desc['path'], desc['case']) + casestr = b'#'.join(desc['case']) + errpath = b'%s#%s.err' % (desc['path'], casestr) else: errpath = b'%s.err' % desc['path'] errpath = os.path.join(self._outputdir, errpath)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-absorb-edit-lines.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,61 @@ + $ cat >> $HGRCPATH << EOF + > [extensions] + > absorb= + > EOF + + $ hg init repo1 + $ cd repo1 + +Make some commits: + + $ for i in 1 2 3; do + > echo $i >> a + > hg commit -A a -m "commit $i" -q + > done + +absorb --edit-lines will run the editor if filename is provided: + + $ hg absorb --edit-lines + nothing applied + [1] + $ HGEDITOR=cat hg absorb --edit-lines a + HG: editing a + HG: "y" means the line to the right exists in the changeset to the top + HG: + HG: /---- 4ec16f85269a commit 1 + HG: |/--- 5c5f95224a50 commit 2 + HG: ||/-- 43f0a75bede7 commit 3 + HG: ||| + yyy : 1 + yy : 2 + y : 3 + nothing applied + [1] + +Edit the file using --edit-lines: + + $ cat > editortext << EOF + > y : a + > yy : b + > y : c + > yy : d + > y y : e + > y : f + > yyy : g + > EOF + $ HGEDITOR='cat editortext >' hg absorb -q --edit-lines a + $ hg cat -r 0 a + d + e + f + g + $ hg cat -r 1 a + b + c + d + g + $ hg cat -r 2 a + a + b + e + g
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-absorb-filefixupstate.py Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,207 @@ +from __future__ import absolute_import, print_function + +import itertools +from mercurial import pycompat +from hgext import absorb + +class simplefctx(object): + def __init__(self, content): + self.content = content + + def data(self): + return self.content + +def insertreturns(x): + # insert "\n"s after each single char + if isinstance(x, bytes): + return b''.join(ch + b'\n' for ch in pycompat.bytestr(x)) + else: + return pycompat.maplist(insertreturns, x) + +def removereturns(x): + # the revert of "insertreturns" + if isinstance(x, bytes): + return x.replace(b'\n', b'') + else: + return pycompat.maplist(removereturns, x) + +def assertlistequal(lhs, rhs, decorator=lambda x: x): + if lhs != rhs: + raise RuntimeError('mismatch:\n actual: %r\n expected: %r' + % tuple(map(decorator, [lhs, rhs]))) + +def testfilefixup(oldcontents, workingcopy, expectedcontents, fixups=None): + """([str], str, [str], [(rev, a1, a2, b1, b2)]?) -> None + + workingcopy is a string, of which every character denotes a single line. + + oldcontents, expectedcontents are lists of strings, every character of + every string denots a single line. + + if fixups is not None, it's the expected fixups list and will be checked. + """ + expectedcontents = insertreturns(expectedcontents) + oldcontents = insertreturns(oldcontents) + workingcopy = insertreturns(workingcopy) + state = absorb.filefixupstate(pycompat.maplist(simplefctx, oldcontents)) + state.diffwith(simplefctx(workingcopy)) + if fixups is not None: + assertlistequal(state.fixups, fixups) + state.apply() + assertlistequal(state.finalcontents, expectedcontents, removereturns) + +def buildcontents(linesrevs): + # linesrevs: [(linecontent : str, revs : [int])] + revs = set(itertools.chain(*[revs for line, revs in linesrevs])) + return [b''] + [ + b''.join([l for l, rs in linesrevs if r in rs]) + for r in sorted(revs) + ] + +# input case 0: one single commit +case0 = [b'', b'11'] + +# replace a single chunk +testfilefixup(case0, b'', [b'', b'']) +testfilefixup(case0, b'2', [b'', b'2']) +testfilefixup(case0, b'22', [b'', b'22']) +testfilefixup(case0, b'222', [b'', b'222']) + +# input case 1: 3 lines, each commit adds one line +case1 = buildcontents([ + (b'1', [1, 2, 3]), + (b'2', [ 2, 3]), + (b'3', [ 3]), +]) + +# 1:1 line mapping +testfilefixup(case1, b'123', case1) +testfilefixup(case1, b'12c', [b'', b'1', b'12', b'12c']) +testfilefixup(case1, b'1b3', [b'', b'1', b'1b', b'1b3']) +testfilefixup(case1, b'1bc', [b'', b'1', b'1b', b'1bc']) +testfilefixup(case1, b'a23', [b'', b'a', b'a2', b'a23']) +testfilefixup(case1, b'a2c', [b'', b'a', b'a2', b'a2c']) +testfilefixup(case1, b'ab3', [b'', b'a', b'ab', b'ab3']) +testfilefixup(case1, b'abc', [b'', b'a', b'ab', b'abc']) + +# non 1:1 edits +testfilefixup(case1, b'abcd', case1) +testfilefixup(case1, b'ab', case1) + +# deletion +testfilefixup(case1, b'', [b'', b'', b'', b'']) +testfilefixup(case1, b'1', [b'', b'1', b'1', b'1']) +testfilefixup(case1, b'2', [b'', b'', b'2', b'2']) +testfilefixup(case1, b'3', [b'', b'', b'', b'3']) +testfilefixup(case1, b'13', [b'', b'1', b'1', b'13']) + +# replaces +testfilefixup(case1, b'1bb3', [b'', b'1', b'1bb', b'1bb3']) + +# (confusing) replaces +testfilefixup(case1, b'1bbb', case1) +testfilefixup(case1, b'bbbb', case1) +testfilefixup(case1, b'bbb3', case1) +testfilefixup(case1, b'1b', case1) +testfilefixup(case1, b'bb', case1) +testfilefixup(case1, b'b3', case1) + +# insertions at the beginning and the end +testfilefixup(case1, b'123c', [b'', b'1', b'12', b'123c']) +testfilefixup(case1, b'a123', [b'', b'a1', b'a12', b'a123']) + +# (confusing) insertions +testfilefixup(case1, b'1a23', case1) +testfilefixup(case1, b'12b3', case1) + +# input case 2: delete in the middle +case2 = buildcontents([ + (b'11', [1, 2]), + (b'22', [1 ]), + (b'33', [1, 2]), +]) + +# deletion (optimize code should make it 2 chunks) +testfilefixup(case2, b'', [b'', b'22', b''], + fixups=[(4, 0, 2, 0, 0), (4, 2, 4, 0, 0)]) + +# 1:1 line mapping +testfilefixup(case2, b'aaaa', [b'', b'aa22aa', b'aaaa']) + +# non 1:1 edits +# note: unlike case0, the chunk is not "continuous" and no edit allowed +testfilefixup(case2, b'aaa', case2) + +# input case 3: rev 3 reverts rev 2 +case3 = buildcontents([ + (b'1', [1, 2, 3]), + (b'2', [ 2 ]), + (b'3', [1, 2, 3]), +]) + +# 1:1 line mapping +testfilefixup(case3, b'13', case3) +testfilefixup(case3, b'1b', [b'', b'1b', b'12b', b'1b']) +testfilefixup(case3, b'a3', [b'', b'a3', b'a23', b'a3']) +testfilefixup(case3, b'ab', [b'', b'ab', b'a2b', b'ab']) + +# non 1:1 edits +testfilefixup(case3, b'a', case3) +testfilefixup(case3, b'abc', case3) + +# deletion +testfilefixup(case3, b'', [b'', b'', b'2', b'']) + +# insertion +testfilefixup(case3, b'a13c', [b'', b'a13c', b'a123c', b'a13c']) + +# input case 4: a slightly complex case +case4 = buildcontents([ + (b'1', [1, 2, 3]), + (b'2', [ 2, 3]), + (b'3', [1, 2, ]), + (b'4', [1, 3]), + (b'5', [ 3]), + (b'6', [ 2, 3]), + (b'7', [ 2 ]), + (b'8', [ 2, 3]), + (b'9', [ 3]), +]) + +testfilefixup(case4, b'1245689', case4) +testfilefixup(case4, b'1a2456bbb', case4) +testfilefixup(case4, b'1abc5689', case4) +testfilefixup(case4, b'1ab5689', [b'', b'134', b'1a3678', b'1ab5689']) +testfilefixup(case4, b'aa2bcd8ee', [b'', b'aa34', b'aa23d78', b'aa2bcd8ee']) +testfilefixup(case4, b'aa2bcdd8ee',[b'', b'aa34', b'aa23678', b'aa24568ee']) +testfilefixup(case4, b'aaaaaa', case4) +testfilefixup(case4, b'aa258b', [b'', b'aa34', b'aa2378', b'aa258b']) +testfilefixup(case4, b'25bb', [b'', b'34', b'23678', b'25689']) +testfilefixup(case4, b'27', [b'', b'34', b'23678', b'245689']) +testfilefixup(case4, b'28', [b'', b'34', b'2378', b'28']) +testfilefixup(case4, b'', [b'', b'34', b'37', b'']) + +# input case 5: replace a small chunk which is near a deleted line +case5 = buildcontents([ + (b'12', [1, 2]), + (b'3', [1]), + (b'4', [1, 2]), +]) + +testfilefixup(case5, b'1cd4', [b'', b'1cd34', b'1cd4']) + +# input case 6: base "changeset" is immutable +case6 = [b'1357', b'0125678'] + +testfilefixup(case6, b'0125678', case6) +testfilefixup(case6, b'0a25678', case6) +testfilefixup(case6, b'0a256b8', case6) +testfilefixup(case6, b'abcdefg', [b'1357', b'a1c5e7g']) +testfilefixup(case6, b'abcdef', case6) +testfilefixup(case6, b'', [b'1357', b'157']) +testfilefixup(case6, b'0123456789', [b'1357', b'0123456789']) + +# input case 7: change an empty file +case7 = [b''] + +testfilefixup(case7, b'1', case7)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-absorb-phase.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,30 @@ + $ cat >> $HGRCPATH << EOF + > [extensions] + > absorb= + > drawdag=$RUNTESTDIR/drawdag.py + > EOF + + $ hg init + $ hg debugdrawdag <<'EOS' + > C + > | + > B + > | + > A + > EOS + + $ hg phase -r A --public -q + $ hg phase -r C --secret --force -q + + $ hg update C -q + $ printf B1 > B + + $ hg absorb -q + + $ hg log -G -T '{desc} {phase}' + @ C secret + | + o B draft + | + o A public +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-absorb-rename.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,359 @@ + $ cat >> $HGRCPATH << EOF + > [diff] + > git=1 + > [extensions] + > absorb= + > EOF + + $ sedi() { # workaround check-code + > pattern="$1" + > shift + > for i in "$@"; do + > sed "$pattern" "$i" > "$i".tmp + > mv "$i".tmp "$i" + > done + > } + +rename a to b, then b to a + + $ hg init repo1 + $ cd repo1 + + $ echo 1 > a + $ hg ci -A a -m 1 + $ hg mv a b + $ echo 2 >> b + $ hg ci -m 2 + $ hg mv b a + $ echo 3 >> a + $ hg ci -m 3 + + $ hg annotate -ncf a + 0 eff892de26ec a: 1 + 1 bf56e1f4f857 b: 2 + 2 0b888b00216c a: 3 + + $ sedi 's/$/a/' a + $ hg absorb -pq + showing changes for a + @@ -0,3 +0,3 @@ + eff892d -1 + bf56e1f -2 + 0b888b0 -3 + eff892d +1a + bf56e1f +2a + 0b888b0 +3a + + $ hg status + + $ hg annotate -ncf a + 0 5d1c5620e6f2 a: 1a + 1 9a14ffe67ae9 b: 2a + 2 9191d121a268 a: 3a + +when the first changeset is public + + $ hg phase --public -r 0 + + $ sedi 's/a/A/' a + + $ hg absorb -pq + showing changes for a + @@ -0,3 +0,3 @@ + -1a + 9a14ffe -2a + 9191d12 -3a + +1A + 9a14ffe +2A + 9191d12 +3A + + $ hg diff + diff --git a/a b/a + --- a/a + +++ b/a + @@ -1,3 +1,3 @@ + -1a + +1A + 2A + 3A + +copy a to b + + $ cd .. + $ hg init repo2 + $ cd repo2 + + $ echo 1 > a + $ hg ci -A a -m 1 + $ hg cp a b + $ echo 2 >> b + $ hg ci -m 2 + + $ hg log -T '{rev}:{node|short} {desc}\n' + 1:17b72129ab68 2 + 0:eff892de26ec 1 + + $ sedi 's/$/a/' a + $ sedi 's/$/b/' b + + $ hg absorb -pq + showing changes for a + @@ -0,1 +0,1 @@ + eff892d -1 + eff892d +1a + showing changes for b + @@ -0,2 +0,2 @@ + -1 + 17b7212 -2 + +1b + 17b7212 +2b + + $ hg diff + diff --git a/b b/b + --- a/b + +++ b/b + @@ -1,2 +1,2 @@ + -1 + +1b + 2b + +copy b to a + + $ cd .. + $ hg init repo3 + $ cd repo3 + + $ echo 1 > b + $ hg ci -A b -m 1 + $ hg cp b a + $ echo 2 >> a + $ hg ci -m 2 + + $ hg log -T '{rev}:{node|short} {desc}\n' + 1:e62c256d8b24 2 + 0:55105f940d5c 1 + + $ sedi 's/$/a/' a + $ sedi 's/$/a/' b + + $ hg absorb -pq + showing changes for a + @@ -0,2 +0,2 @@ + -1 + e62c256 -2 + +1a + e62c256 +2a + showing changes for b + @@ -0,1 +0,1 @@ + 55105f9 -1 + 55105f9 +1a + + $ hg diff + diff --git a/a b/a + --- a/a + +++ b/a + @@ -1,2 +1,2 @@ + -1 + +1a + 2a + +"move" b to both a and c, follow a - sorted alphabetically + + $ cd .. + $ hg init repo4 + $ cd repo4 + + $ echo 1 > b + $ hg ci -A b -m 1 + $ hg cp b a + $ hg cp b c + $ hg rm b + $ echo 2 >> a + $ echo 3 >> c + $ hg commit -m cp + + $ hg log -T '{rev}:{node|short} {desc}\n' + 1:366daad8e679 cp + 0:55105f940d5c 1 + + $ sedi 's/$/a/' a + $ sedi 's/$/c/' c + + $ hg absorb -pq + showing changes for a + @@ -0,2 +0,2 @@ + 55105f9 -1 + 366daad -2 + 55105f9 +1a + 366daad +2a + showing changes for c + @@ -0,2 +0,2 @@ + -1 + 366daad -3 + +1c + 366daad +3c + + $ hg log -G -p -T '{rev}:{node|short} {desc}\n' + @ 1:70606019f91b cp + | diff --git a/b b/a + | rename from b + | rename to a + | --- a/b + | +++ b/a + | @@ -1,1 +1,2 @@ + | 1a + | +2a + | diff --git a/b b/c + | copy from b + | copy to c + | --- a/b + | +++ b/c + | @@ -1,1 +1,2 @@ + | -1a + | +1 + | +3c + | + o 0:bfb67c3539c1 1 + diff --git a/b b/b + new file mode 100644 + --- /dev/null + +++ b/b + @@ -0,0 +1,1 @@ + +1a + +run absorb again would apply the change to c + + $ hg absorb -pq + showing changes for c + @@ -0,1 +0,1 @@ + 7060601 -1 + 7060601 +1c + + $ hg log -G -p -T '{rev}:{node|short} {desc}\n' + @ 1:8bd536cce368 cp + | diff --git a/b b/a + | rename from b + | rename to a + | --- a/b + | +++ b/a + | @@ -1,1 +1,2 @@ + | 1a + | +2a + | diff --git a/b b/c + | copy from b + | copy to c + | --- a/b + | +++ b/c + | @@ -1,1 +1,2 @@ + | -1a + | +1c + | +3c + | + o 0:bfb67c3539c1 1 + diff --git a/b b/b + new file mode 100644 + --- /dev/null + +++ b/b + @@ -0,0 +1,1 @@ + +1a + +"move" b to a, c and d, follow d if a gets renamed to e, and c is deleted + + $ cd .. + $ hg init repo5 + $ cd repo5 + + $ echo 1 > b + $ hg ci -A b -m 1 + $ hg cp b a + $ hg cp b c + $ hg cp b d + $ hg rm b + $ echo 2 >> a + $ echo 3 >> c + $ echo 4 >> d + $ hg commit -m cp + $ hg mv a e + $ hg rm c + $ hg commit -m mv + + $ hg log -T '{rev}:{node|short} {desc}\n' + 2:49911557c471 mv + 1:7bc3d43ede83 cp + 0:55105f940d5c 1 + + $ sedi 's/$/e/' e + $ sedi 's/$/d/' d + + $ hg absorb -pq + showing changes for d + @@ -0,2 +0,2 @@ + 55105f9 -1 + 7bc3d43 -4 + 55105f9 +1d + 7bc3d43 +4d + showing changes for e + @@ -0,2 +0,2 @@ + -1 + 7bc3d43 -2 + +1e + 7bc3d43 +2e + + $ hg diff + diff --git a/e b/e + --- a/e + +++ b/e + @@ -1,2 +1,2 @@ + -1 + +1e + 2e + + $ hg log -G -p -T '{rev}:{node|short} {desc}\n' + @ 2:34be9b0c786e mv + | diff --git a/c b/c + | deleted file mode 100644 + | --- a/c + | +++ /dev/null + | @@ -1,2 +0,0 @@ + | -1 + | -3 + | diff --git a/a b/e + | rename from a + | rename to e + | + o 1:13e56db5948d cp + | diff --git a/b b/a + | rename from b + | rename to a + | --- a/b + | +++ b/a + | @@ -1,1 +1,2 @@ + | -1d + | +1 + | +2e + | diff --git a/b b/c + | copy from b + | copy to c + | --- a/b + | +++ b/c + | @@ -1,1 +1,2 @@ + | -1d + | +1 + | +3 + | diff --git a/b b/d + | copy from b + | copy to d + | --- a/b + | +++ b/d + | @@ -1,1 +1,2 @@ + | 1d + | +4d + | + o 0:0037613a5dc6 1 + diff --git a/b b/b + new file mode 100644 + --- /dev/null + +++ b/b + @@ -0,0 +1,1 @@ + +1d +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-absorb-strip.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,45 @@ +Do not strip innocent children. See https://bitbucket.org/facebook/hg-experimental/issues/6/hg-absorb-merges-diverged-commits + + $ cat >> $HGRCPATH << EOF + > [extensions] + > absorb= + > drawdag=$RUNTESTDIR/drawdag.py + > EOF + + $ hg init + $ hg debugdrawdag << EOF + > E + > | + > D F + > |/ + > C + > | + > B + > | + > A + > EOF + + $ hg up E -q + $ echo 1 >> B + $ echo 2 >> D + $ hg absorb + saved backup bundle to * (glob) + 2 of 2 chunk(s) applied + + $ hg log -G -T '{desc}' + @ E + | + o D + | + o C + | + o B + | + | o F + | | + | o C + | | + | o B + |/ + o A +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-absorb.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,451 @@ + $ cat >> $HGRCPATH << EOF + > [extensions] + > absorb= + > EOF + + $ sedi() { # workaround check-code + > pattern="$1" + > shift + > for i in "$@"; do + > sed "$pattern" "$i" > "$i".tmp + > mv "$i".tmp "$i" + > done + > } + + $ hg init repo1 + $ cd repo1 + +Do not crash with empty repo: + + $ hg absorb + abort: no changeset to change + [255] + +Make some commits: + + $ for i in 1 2 3 4 5; do + > echo $i >> a + > hg commit -A a -m "commit $i" -q + > done + + $ hg annotate a + 0: 1 + 1: 2 + 2: 3 + 3: 4 + 4: 5 + +Change a few lines: + + $ cat > a <<EOF + > 1a + > 2b + > 3 + > 4d + > 5e + > EOF + +Preview absorb changes: + + $ hg absorb --print-changes --dry-run + showing changes for a + @@ -0,2 +0,2 @@ + 4ec16f8 -1 + 5c5f952 -2 + 4ec16f8 +1a + 5c5f952 +2b + @@ -3,2 +3,2 @@ + ad8b8b7 -4 + 4f55fa6 -5 + ad8b8b7 +4d + 4f55fa6 +5e + +Run absorb: + + $ hg absorb + saved backup bundle to * (glob) + 2 of 2 chunk(s) applied + $ hg annotate a + 0: 1a + 1: 2b + 2: 3 + 3: 4d + 4: 5e + +Delete a few lines and related commits will be removed if they will be empty: + + $ cat > a <<EOF + > 2b + > 4d + > EOF + $ hg absorb + saved backup bundle to * (glob) + 3 of 3 chunk(s) applied + $ hg annotate a + 1: 2b + 2: 4d + $ hg log -T '{rev} {desc}\n' -Gp + @ 2 commit 4 + | diff -r 1cae118c7ed8 -r 58a62bade1c6 a + | --- a/a Thu Jan 01 00:00:00 1970 +0000 + | +++ b/a Thu Jan 01 00:00:00 1970 +0000 + | @@ -1,1 +1,2 @@ + | 2b + | +4d + | + o 1 commit 2 + | diff -r 84add69aeac0 -r 1cae118c7ed8 a + | --- a/a Thu Jan 01 00:00:00 1970 +0000 + | +++ b/a Thu Jan 01 00:00:00 1970 +0000 + | @@ -0,0 +1,1 @@ + | +2b + | + o 0 commit 1 + + +Non 1:1 map changes will be ignored: + + $ echo 1 > a + $ hg absorb + nothing applied + [1] + +Insertaions: + + $ cat > a << EOF + > insert before 2b + > 2b + > 4d + > insert aftert 4d + > EOF + $ hg absorb -q + $ hg status + $ hg annotate a + 1: insert before 2b + 1: 2b + 2: 4d + 2: insert aftert 4d + +Bookmarks are moved: + + $ hg bookmark -r 1 b1 + $ hg bookmark -r 2 b2 + $ hg bookmark ba + $ hg bookmarks + b1 1:b35060a57a50 + b2 2:946e4bc87915 + * ba 2:946e4bc87915 + $ sedi 's/insert/INSERT/' a + $ hg absorb -q + $ hg status + $ hg bookmarks + b1 1:a4183e9b3d31 + b2 2:c9b20c925790 + * ba 2:c9b20c925790 + +Non-mofified files are ignored: + + $ touch b + $ hg commit -A b -m b + $ touch c + $ hg add c + $ hg rm b + $ hg absorb + nothing applied + [1] + $ sedi 's/INSERT/Insert/' a + $ hg absorb + saved backup bundle to * (glob) + 2 of 2 chunk(s) applied + $ hg status + A c + R b + +Public commits will not be changed: + + $ hg phase -p 1 + $ sedi 's/Insert/insert/' a + $ hg absorb -pn + showing changes for a + @@ -0,1 +0,1 @@ + -Insert before 2b + +insert before 2b + @@ -3,1 +3,1 @@ + 85b4e0e -Insert aftert 4d + 85b4e0e +insert aftert 4d + $ hg absorb + saved backup bundle to * (glob) + 1 of 2 chunk(s) applied + $ hg diff -U 0 + diff -r 1c8eadede62a a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a * (glob) + @@ -1,1 +1,1 @@ + -Insert before 2b + +insert before 2b + $ hg annotate a + 1: Insert before 2b + 1: 2b + 2: 4d + 2: insert aftert 4d + +Make working copy clean: + + $ hg revert -q -C a b + $ hg forget c + $ rm c + $ hg status + +Merge commit will not be changed: + + $ echo 1 > m1 + $ hg commit -A m1 -m m1 + $ hg bookmark -q -i m1 + $ hg update -q '.^' + $ echo 2 > m2 + $ hg commit -q -A m2 -m m2 + $ hg merge -q m1 + $ hg commit -m merge + $ hg bookmark -d m1 + $ hg log -G -T '{rev} {desc} {phase}\n' + @ 6 merge draft + |\ + | o 5 m2 draft + | | + o | 4 m1 draft + |/ + o 3 b draft + | + o 2 commit 4 draft + | + o 1 commit 2 public + | + o 0 commit 1 public + + $ echo 2 >> m1 + $ echo 2 >> m2 + $ hg absorb + abort: no changeset to change + [255] + $ hg revert -q -C m1 m2 + +Use a new repo: + + $ cd .. + $ hg init repo2 + $ cd repo2 + +Make some commits to multiple files: + + $ for f in a b; do + > for i in 1 2; do + > echo $f line $i >> $f + > hg commit -A $f -m "commit $f $i" -q + > done + > done + +Use pattern to select files to be fixed up: + + $ sedi 's/line/Line/' a b + $ hg status + M a + M b + $ hg absorb a + saved backup bundle to * (glob) + 1 of 1 chunk(s) applied + $ hg status + M b + $ hg absorb --exclude b + nothing applied + [1] + $ hg absorb b + saved backup bundle to * (glob) + 1 of 1 chunk(s) applied + $ hg status + $ cat a b + a Line 1 + a Line 2 + b Line 1 + b Line 2 + +Test config option absorb.max-stack-size: + + $ sedi 's/Line/line/' a b + $ hg log -T '{rev}:{node} {desc}\n' + 3:712d16a8f445834e36145408eabc1d29df05ec09 commit b 2 + 2:74cfa6294160149d60adbf7582b99ce37a4597ec commit b 1 + 1:28f10dcf96158f84985358a2e5d5b3505ca69c22 commit a 2 + 0:f9a81da8dc53380ed91902e5b82c1b36255a4bd0 commit a 1 + $ hg --config absorb.max-stack-size=1 absorb -pn + absorb: only the recent 1 changesets will be analysed + showing changes for a + @@ -0,2 +0,2 @@ + -a Line 1 + -a Line 2 + +a line 1 + +a line 2 + showing changes for b + @@ -0,2 +0,2 @@ + -b Line 1 + 712d16a -b Line 2 + +b line 1 + 712d16a +b line 2 + +Test obsolete markers creation: + + $ cat >> $HGRCPATH << EOF + > [experimental] + > evolution=createmarkers + > [absorb] + > add-noise=1 + > EOF + + $ hg --config absorb.max-stack-size=3 absorb + absorb: only the recent 3 changesets will be analysed + 2 of 2 chunk(s) applied + $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n' + 6:3dfde4199b46 commit b 2 712d16a8f445834e36145408eabc1d29df05ec09 + 5:99cfab7da5ff commit b 1 74cfa6294160149d60adbf7582b99ce37a4597ec + 4:fec2b3bd9e08 commit a 2 28f10dcf96158f84985358a2e5d5b3505ca69c22 + 0:f9a81da8dc53 commit a 1 + $ hg absorb + 1 of 1 chunk(s) applied + $ hg log -T '{rev}:{node|short} {desc} {get(extras, "absorb_source")}\n' + 10:e1c8c1e030a4 commit b 2 3dfde4199b4610ea6e3c6fa9f5bdad8939d69524 + 9:816c30955758 commit b 1 99cfab7da5ffdaf3b9fc6643b14333e194d87f46 + 8:5867d584106b commit a 2 fec2b3bd9e0834b7cb6a564348a0058171aed811 + 7:8c76602baf10 commit a 1 f9a81da8dc53380ed91902e5b82c1b36255a4bd0 + +Executable files: + + $ cat >> $HGRCPATH << EOF + > [diff] + > git=True + > EOF + $ cd .. + $ hg init repo3 + $ cd repo3 + +#if execbit + $ echo > foo.py + $ chmod +x foo.py + $ hg add foo.py + $ hg commit -mfoo +#else + $ hg import -q --bypass - <<EOF + > # HG changeset patch + > foo + > + > diff --git a/foo.py b/foo.py + > new file mode 100755 + > --- /dev/null + > +++ b/foo.py + > @@ -0,0 +1,1 @@ + > + + > EOF + $ hg up -q +#endif + + $ echo bla > foo.py + $ hg absorb --dry-run --print-changes + showing changes for foo.py + @@ -0,1 +0,1 @@ + 99b4ae7 - + 99b4ae7 +bla + $ hg absorb + 1 of 1 chunk(s) applied + $ hg diff -c . + diff --git a/foo.py b/foo.py + new file mode 100755 + --- /dev/null + +++ b/foo.py + @@ -0,0 +1,1 @@ + +bla + $ hg diff + +Remove lines may delete changesets: + + $ cd .. + $ hg init repo4 + $ cd repo4 + $ cat > a <<EOF + > 1 + > 2 + > EOF + $ hg commit -m a12 -A a + $ cat > b <<EOF + > 1 + > 2 + > EOF + $ hg commit -m b12 -A b + $ echo 3 >> b + $ hg commit -m b3 + $ echo 4 >> b + $ hg commit -m b4 + $ echo 1 > b + $ echo 3 >> a + $ hg absorb -pn + showing changes for a + @@ -2,0 +2,1 @@ + bfafb49 +3 + showing changes for b + @@ -1,3 +1,0 @@ + 1154859 -2 + 30970db -3 + a393a58 -4 + $ hg absorb -v | grep became + bfafb49242db: 1 file(s) changed, became 1a2de97fc652 + 115485984805: 2 file(s) changed, became 0c930dfab74c + 30970dbf7b40: became empty and was dropped + a393a58b9a85: became empty and was dropped + $ hg log -T '{rev} {desc}\n' -Gp + @ 5 b12 + | diff --git a/b b/b + | new file mode 100644 + | --- /dev/null + | +++ b/b + | @@ -0,0 +1,1 @@ + | +1 + | + o 4 a12 + diff --git a/a b/a + new file mode 100644 + --- /dev/null + +++ b/a + @@ -0,0 +1,3 @@ + +1 + +2 + +3 + + +Use revert to make the current change and its parent disappear. +This should move us to the non-obsolete ancestor. + + $ cd .. + $ hg init repo5 + $ cd repo5 + $ cat > a <<EOF + > 1 + > 2 + > EOF + $ hg commit -m a12 -A a + $ hg id + bfafb49242db tip + $ echo 3 >> a + $ hg commit -m a123 a + $ echo 4 >> a + $ hg commit -m a1234 a + $ hg id + 82dbe7fd19f0 tip + $ hg revert -r 0 a + $ hg absorb -pn + showing changes for a + @@ -2,2 +2,0 @@ + f1c23dd -3 + 82dbe7f -4 + $ hg absorb --verbose + f1c23dd5d08d: became empty and was dropped + 82dbe7fd19f0: became empty and was dropped + a: 1 of 1 chunk(s) applied + $ hg id + bfafb49242db tip
--- a/tests/test-add.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-add.t Mon Aug 20 09:48:08 2018 -0700 @@ -12,6 +12,9 @@ $ hg forget a $ hg add adding a + $ hg forget a + $ hg add --color debug + [addremove.added ui.status|adding a] $ hg st A a $ mkdir dir
--- a/tests/test-addremove.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-addremove.t Mon Aug 20 09:48:08 2018 -0700 @@ -69,6 +69,12 @@ removing c adding d recording removal of a as rename to b (100% similar) + $ hg addremove -ns 50 --color debug + [addremove.removed ui.status|removing a] + [addremove.added ui.status|adding b] + [addremove.removed ui.status|removing c] + [addremove.added ui.status|adding d] + [ ui.status|recording removal of a as rename to b (100% similar)] $ hg addremove -s 50 removing a adding b
--- a/tests/test-alias.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-alias.t Mon Aug 20 09:48:08 2018 -0700 @@ -651,81 +651,15 @@ $ hg --invalid root hg: option --invalid not recognized - Mercurial Distributed SCM - - basic commands: - - add add the specified files on the next commit - annotate show changeset information by line for each file - clone make a copy of an existing repository - commit commit the specified files or all outstanding changes - diff diff repository (or selected files) - export dump the header and diffs for one or more changesets - forget forget the specified files on the next commit - init create a new repository in the given directory - log show revision history of entire repository or files - merge merge another revision into working directory - pull pull changes from the specified source - push push changes to the specified destination - remove remove the specified files on the next commit - serve start stand-alone webserver - status show changed files in the working directory - summary summarize working directory state - update update working directory (or switch revisions) - - (use 'hg help' for the full list of commands or 'hg -v' for details) + (use 'hg help -v' for a list of global options) [255] $ hg --invalid mylog hg: option --invalid not recognized - Mercurial Distributed SCM - - basic commands: - - add add the specified files on the next commit - annotate show changeset information by line for each file - clone make a copy of an existing repository - commit commit the specified files or all outstanding changes - diff diff repository (or selected files) - export dump the header and diffs for one or more changesets - forget forget the specified files on the next commit - init create a new repository in the given directory - log show revision history of entire repository or files - merge merge another revision into working directory - pull pull changes from the specified source - push push changes to the specified destination - remove remove the specified files on the next commit - serve start stand-alone webserver - status show changed files in the working directory - summary summarize working directory state - update update working directory (or switch revisions) - - (use 'hg help' for the full list of commands or 'hg -v' for details) + (use 'hg help -v' for a list of global options) [255] $ hg --invalid blank hg: option --invalid not recognized - Mercurial Distributed SCM - - basic commands: - - add add the specified files on the next commit - annotate show changeset information by line for each file - clone make a copy of an existing repository - commit commit the specified files or all outstanding changes - diff diff repository (or selected files) - export dump the header and diffs for one or more changesets - forget forget the specified files on the next commit - init create a new repository in the given directory - log show revision history of entire repository or files - merge merge another revision into working directory - pull pull changes from the specified source - push push changes to the specified destination - remove remove the specified files on the next commit - serve start stand-alone webserver - status show changed files in the working directory - summary summarize working directory state - update update working directory (or switch revisions) - - (use 'hg help' for the full list of commands or 'hg -v' for details) + (use 'hg help -v' for a list of global options) [255] environment variable changes in alias commands
--- a/tests/test-amend.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-amend.t Mon Aug 20 09:48:08 2018 -0700 @@ -331,3 +331,37 @@ ? missing_content2_content2-untracked ? missing_content2_content3-untracked ? missing_missing_content3-untracked + +========================================== +Test history-editing-backup config option| +========================================== + $ hg init $TESTTMP/repo4 + $ cd $TESTTMP/repo4 + $ echo a>a + $ hg ci -Aqma + $ echo oops>b + $ hg ci -Aqm "b" + $ echo partiallyfixed > b + +#if obsstore-off + $ hg amend + saved backup bundle to $TESTTMP/repo4/.hg/strip-backup/95e899acf2ce-f11cb050-amend.hg +When history-editing-backup config option is set: + $ cat << EOF >> $HGRCPATH + > [ui] + > history-editing-backup = False + > EOF + $ echo fixed > b + $ hg amend + +#else + $ hg amend +When history-editing-backup config option is set: + $ cat << EOF >> $HGRCPATH + > [ui] + > history-editing-backup = False + > EOF + $ echo fixed > b + $ hg amend + +#endif
--- a/tests/test-bad-extension.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-bad-extension.t Mon Aug 20 09:48:08 2018 -0700 @@ -72,23 +72,56 @@ $ hg --config extensions.badexts=showbadexts.py showbadexts 2>&1 | grep '^BADEXTS' BADEXTS: badext badext2 +#if no-extraextensions show traceback for ImportError of hgext.name if devel.debug.extensions is set $ (hg help help --traceback --debug --config devel.debug.extensions=yes 2>&1) \ > | grep -v '^ ' \ > | egrep 'extension..[^p]|^Exception|Traceback|ImportError|not import' + debug.extensions: loading extensions + debug.extensions: - processing 5 entries + debug.extensions: - loading extension: 'gpg' + debug.extensions: > 'gpg' extension loaded in * (glob) + debug.extensions: - validating extension tables: 'gpg' + debug.extensions: - invoking registered callbacks: 'gpg' + debug.extensions: > callbacks completed in * (glob) + debug.extensions: - loading extension: 'badext' *** failed to import extension badext from $TESTTMP/badext.py: bit bucket overflow Traceback (most recent call last): Exception: bit bucket overflow - could not import hgext.badext2 (No module named *badext2): trying hgext3rd.badext2 (glob) + debug.extensions: - loading extension: 'baddocext' + debug.extensions: > 'baddocext' extension loaded in * (glob) + debug.extensions: - validating extension tables: 'baddocext' + debug.extensions: - invoking registered callbacks: 'baddocext' + debug.extensions: > callbacks completed in * (glob) + debug.extensions: - loading extension: 'badext2' + debug.extensions: - could not import hgext.badext2 (No module named badext2): trying hgext3rd.badext2 Traceback (most recent call last): ImportError: No module named *badext2 (glob) - could not import hgext3rd.badext2 (No module named *badext2): trying badext2 (glob) + debug.extensions: - could not import hgext3rd.badext2 (No module named badext2): trying badext2 Traceback (most recent call last): ImportError: No module named *badext2 (glob) *** failed to import extension badext2: No module named badext2 Traceback (most recent call last): ImportError: No module named badext2 + debug.extensions: > loaded 2 extensions, total time * (glob) + debug.extensions: - loading configtable attributes + debug.extensions: - executing uisetup hooks + debug.extensions: - running uisetup for 'gpg' + debug.extensions: > uisetup for 'gpg' took * (glob) + debug.extensions: - running uisetup for 'baddocext' + debug.extensions: > uisetup for 'baddocext' took * (glob) + debug.extensions: - executing extsetup hooks + debug.extensions: - running extsetup for 'gpg' + debug.extensions: > extsetup for 'gpg' took * (glob) + debug.extensions: - running extsetup for 'baddocext' + debug.extensions: > extsetup for 'baddocext' took * (glob) + debug.extensions: - executing remaining aftercallbacks + debug.extensions: > remaining aftercallbacks completed in * (glob) + debug.extensions: - loading extension registration objects + debug.extensions: > extension registration object loading took * (glob) + debug.extensions: extension loading complete +#endif confirm that there's no crash when an extension's documentation is bad
--- a/tests/test-check-code.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-check-code.t Mon Aug 20 09:48:08 2018 -0700 @@ -22,7 +22,7 @@ >>> commands = [] >>> with open('mercurial/debugcommands.py', 'rb') as fh: ... for line in fh: - ... m = re.match("^@command\('([a-z]+)", line) + ... m = re.match(b"^@command\('([a-z]+)", line) ... if m: ... commands.append(m.group(1)) >>> scommands = list(sorted(commands))
--- a/tests/test-check-py3-compat.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-check-py3-compat.t Mon Aug 20 09:48:08 2018 -0700 @@ -26,17 +26,23 @@ $ testrepohg files 'set:(**.py) - grep(pygments)' \ > -X hgdemandimport/demandimportpy2.py \ > -X hgext/fsmonitor/pywatchman \ + > -X mercurial/thirdparty/cbor \ > | sed 's|\\|/|g' | xargs $PYTHON3 contrib/check-py3-compat.py \ > | sed 's/[0-9][0-9]*)$/*)/' - hgext/convert/transport.py: error importing: <*Error> No module named 'svn.client' (error at transport.py:*) (glob) - mercurial/cffi/bdiff.py: error importing: <ImportError> cannot import name '_bdiff' (error at bdiff.py:*) - mercurial/cffi/bdiffbuild.py: error importing: <ImportError> No module named 'cffi' (error at bdiffbuild.py:*) - mercurial/cffi/mpatch.py: error importing: <ImportError> cannot import name '_mpatch' (error at mpatch.py:*) - mercurial/cffi/mpatchbuild.py: error importing: <ImportError> No module named 'cffi' (error at mpatchbuild.py:*) - mercurial/cffi/osutilbuild.py: error importing: <ImportError> No module named 'cffi' (error at osutilbuild.py:*) - mercurial/scmwindows.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob) - mercurial/win32.py: error importing: <*Error> No module named 'msvcrt' (error at win32.py:*) (glob) - mercurial/windows.py: error importing: <*Error> No module named 'msvcrt' (error at windows.py:*) (glob) + contrib/python-zstandard/setup.py not using absolute_import + contrib/python-zstandard/setup_zstd.py not using absolute_import + contrib/python-zstandard/tests/common.py not using absolute_import + contrib/python-zstandard/tests/test_buffer_util.py not using absolute_import + contrib/python-zstandard/tests/test_compressor.py not using absolute_import + contrib/python-zstandard/tests/test_compressor_fuzzing.py not using absolute_import + contrib/python-zstandard/tests/test_data_structures.py not using absolute_import + contrib/python-zstandard/tests/test_data_structures_fuzzing.py not using absolute_import + contrib/python-zstandard/tests/test_decompressor.py not using absolute_import + contrib/python-zstandard/tests/test_decompressor_fuzzing.py not using absolute_import + contrib/python-zstandard/tests/test_estimate_sizes.py not using absolute_import + contrib/python-zstandard/tests/test_module_attributes.py not using absolute_import + contrib/python-zstandard/tests/test_train_dictionary.py not using absolute_import + setup.py not using absolute_import #endif
--- a/tests/test-clone.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-clone.t Mon Aug 20 09:48:08 2018 -0700 @@ -47,6 +47,7 @@ checklink (symlink !) checklink-target (symlink !) checknoexec (execbit !) + manifestfulltextcache rbc-names-v1 rbc-revs-v1 @@ -641,7 +642,7 @@ $ mkdir a $ chmod 000 a $ hg clone a b - abort: repository a not found! + abort: Permission denied: '$TESTTMP/fail/a/.hg' [255] Inaccessible destination @@ -664,7 +665,7 @@ $ mkfifo a $ hg clone a b - abort: repository a not found! + abort: $ENOTDIR$: '$TESTTMP/fail/a/.hg' [255] $ rm a
--- a/tests/test-completion.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-completion.t Mon Aug 20 09:48:08 2018 -0700 @@ -98,6 +98,7 @@ debugknown debuglabelcomplete debuglocks + debugmanifestfulltextcache debugmergestate debugnamecomplete debugobsolete @@ -273,7 +274,7 @@ debugdiscovery: old, nonheads, rev, ssh, remotecmd, insecure debugdownload: output debugextensions: template - debugfileset: rev, all-files + debugfileset: rev, all-files, show-matcher, show-stage debugformat: template debugfsinfo: debuggetbundle: head, common, type @@ -284,6 +285,7 @@ debugknown: debuglabelcomplete: debuglocks: force-lock, force-wlock, set-lock, set-wlock + debugmanifestfulltextcache: clear, add debugmergestate: debugnamecomplete: debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template
--- a/tests/test-conflict.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-conflict.t Mon Aug 20 09:48:08 2018 -0700 @@ -58,7 +58,7 @@ # To mark files as resolved: hg resolve --mark FILE # To continue: hg commit - # To abort: hg update --clean . (warning: this will discard uncommitted changes) + # To abort: hg merge --abort $ cat a
--- a/tests/test-contrib-perf.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-contrib-perf.t Mon Aug 20 09:48:08 2018 -0700 @@ -55,6 +55,8 @@ benchmark parsing bookmarks from disk to memory perfbranchmap benchmark the update of a branchmap + perfbranchmapload + benchmark reading the branchmap perfbundleread Benchmark reading of bundle files. perfcca (no help text available) @@ -82,6 +84,8 @@ (no help text available) perfheads (no help text available) perfindex (no help text available) + perflinelogedits + (no help text available) perfloadmarkers benchmark the time to parse the on-disk markers for a repo perflog (no help text available) @@ -156,6 +160,7 @@ #endif $ hg perfheads $ hg perfindex + $ hg perflinelogedits -n 1 $ hg perfloadmarkers $ hg perflog $ hg perflookup 2
--- a/tests/test-convert-bzr-merges.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-convert-bzr-merges.t Mon Aug 20 09:48:08 2018 -0700 @@ -83,20 +83,17 @@ $ hg -R hg2hg out source-hg -T compact comparing with source-hg searching for changes - 5[tip]:4,3 6bd55e826939 2009-10-10 08:00 +0100 foo - (octopus merge fixup) - -XXX: The manifest lines should probably agree, to avoid changing the hash when -converting hg -> hg + no changes found + [1] $ hg -R source-hg log --debug -r tip - changeset: 5:b209510f11b2c987f920749cd8e352aa4b3230f2 + changeset: 5:6bd55e8269392769783345686faf7ff7b3b0215d branch: source tag: tip phase: draft parent: 4:1dc38c377bb35eeea4fa955056fbe4440d54a743 parent: 3:4aaba1bfb426b8941bbf63f9dd52301152695164 - manifest: 5:1109e42bdcbd1f51baa69bc91079011d77057dbb + manifest: 4:daa315d56a98ba20811fdd0d9d575861f65cfa8c user: Foo Bar <foo.bar@example.com> date: Sat Oct 10 08:00:04 2009 +0100 extra: branch=source
--- a/tests/test-convert-filemap.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-convert-filemap.t Mon Aug 20 09:48:08 2018 -0700 @@ -780,7 +780,7 @@ converting... 0 3 $ hg -R .-hg log -G -T '{shortest(node)} {desc}\n{files % "- {file}\n"}\n' - o e9ed 3 + o bbfe 3 |\ | o 33a0 2 | | - f
--- a/tests/test-convert-svn-branches.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-convert-svn-branches.t Mon Aug 20 09:48:08 2018 -0700 @@ -85,8 +85,8 @@ $ hg branches newbranch 11:a6d7cc050ad1 default 10:6e2b33404495 - old 9:93c4b0f99529 - old2 8:b52884d7bead (inactive) + old 9:1b494af68c0b + old2 8:5be40b8dcbf6 (inactive) $ hg tags -q tip $ cd ..
--- a/tests/test-convert-svn-encoding.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-convert-svn-encoding.t Mon Aug 20 09:48:08 2018 -0700 @@ -52,6 +52,7 @@ 5 init projA source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@1 converting: 0/6 revisions (0.00%) + reusing manifest from p1 (no file change) committing changelog updating the branch cache 4 hello @@ -118,6 +119,7 @@ converting: 4/6 revisions (66.67%) reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob) scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc) + reusing manifest from p1 (no file change) committing changelog updating the branch cache 0 branch to branch?e @@ -125,6 +127,7 @@ converting: 5/6 revisions (83.33%) reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob) scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc) + reusing manifest from p1 (no file change) committing changelog updating the branch cache reparent to file:/*/$TESTTMP/svn-repo (glob)
--- a/tests/test-convert.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-convert.t Mon Aug 20 09:48:08 2018 -0700 @@ -533,9 +533,11 @@ test bogus URL +#if no-msys $ hg convert -q bzr+ssh://foobar@selenic.com/baz baz abort: bzr+ssh://foobar@selenic.com/baz: missing or unsupported repository [255] +#endif test revset converted() lookup
--- a/tests/test-debugcommands.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-debugcommands.t Mon Aug 20 09:48:08 2018 -0700 @@ -15,6 +15,39 @@ adding a $ hg ci -Am make-it-full #if reporevlogstore + $ hg debugrevlog -c + format : 1 + flags : inline + + revisions : 3 + merges : 0 ( 0.00%) + normal : 3 (100.00%) + revisions : 3 + empty : 0 ( 0.00%) + text : 0 (100.00%) + delta : 0 (100.00%) + snapshot : 3 (100.00%) + lvl-0 : 3 (100.00%) + deltas : 0 ( 0.00%) + revision size : 191 + snapshot : 191 (100.00%) + lvl-0 : 191 (100.00%) + deltas : 0 ( 0.00%) + + chunks : 3 + 0x75 (u) : 3 (100.00%) + chunks size : 191 + 0x75 (u) : 191 (100.00%) + + avg chain length : 0 + max chain length : 0 + max chain reach : 67 + compression ratio : 0 + + uncompressed data size (min/max/avg) : 57 / 66 / 62 + full revision size (min/max/avg) : 58 / 67 / 63 + inter-snapshot size (min/max/avg) : 0 / 0 / 0 + delta size (min/max/avg) : 0 / 0 / 0 $ hg debugrevlog -m format : 1 flags : inline, generaldelta @@ -23,10 +56,15 @@ merges : 0 ( 0.00%) normal : 3 (100.00%) revisions : 3 - full : 3 (100.00%) + empty : 1 (33.33%) + text : 1 (100.00%) + delta : 0 ( 0.00%) + snapshot : 2 (66.67%) + lvl-0 : 2 (66.67%) deltas : 0 ( 0.00%) revision size : 88 - full : 88 (100.00%) + snapshot : 88 (100.00%) + lvl-0 : 88 (100.00%) deltas : 0 ( 0.00%) chunks : 3 @@ -42,7 +80,41 @@ compression ratio : 0 uncompressed data size (min/max/avg) : 0 / 43 / 28 - full revision size (min/max/avg) : 0 / 44 / 29 + full revision size (min/max/avg) : 44 / 44 / 44 + inter-snapshot size (min/max/avg) : 0 / 0 / 0 + delta size (min/max/avg) : 0 / 0 / 0 + $ hg debugrevlog a + format : 1 + flags : inline, generaldelta + + revisions : 1 + merges : 0 ( 0.00%) + normal : 1 (100.00%) + revisions : 1 + empty : 0 ( 0.00%) + text : 0 (100.00%) + delta : 0 (100.00%) + snapshot : 1 (100.00%) + lvl-0 : 1 (100.00%) + deltas : 0 ( 0.00%) + revision size : 3 + snapshot : 3 (100.00%) + lvl-0 : 3 (100.00%) + deltas : 0 ( 0.00%) + + chunks : 1 + 0x75 (u) : 1 (100.00%) + chunks size : 3 + 0x75 (u) : 3 (100.00%) + + avg chain length : 0 + max chain length : 0 + max chain reach : 3 + compression ratio : 0 + + uncompressed data size (min/max/avg) : 2 / 2 / 2 + full revision size (min/max/avg) : 3 / 3 / 3 + inter-snapshot size (min/max/avg) : 0 / 0 / 0 delta size (min/max/avg) : 0 / 0 / 0 #endif @@ -411,6 +483,7 @@ $ ls -r .hg/cache/* .hg/cache/rbc-revs-v1 .hg/cache/rbc-names-v1 + .hg/cache/manifestfulltextcache .hg/cache/branch2-served Test debugcolor
--- a/tests/test-diff-color.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-diff-color.t Mon Aug 20 09:48:08 2018 -0700 @@ -22,7 +22,7 @@ > c > EOF $ hg ci -Am adda - adding a + \x1b[0;32madding a\x1b[0m (esc) $ cat > a <<EOF > c > c @@ -218,7 +218,7 @@ $ hg init sub $ echo b > sub/b $ hg -R sub commit -Am 'create sub' - adding b + \x1b[0;32madding b\x1b[0m (esc) $ echo 'sub = sub' > .hgsub $ hg add .hgsub $ hg commit -m 'add subrepo sub'
--- a/tests/test-extension.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-extension.t Mon Aug 20 09:48:08 2018 -0700 @@ -1255,8 +1255,10 @@ > pass > EOF $ hg --config extensions.path=./path.py help foo > /dev/null - abort: no such help topic: foo - (try 'hg help --keyword foo') + abort: no such help topic: foo (no-windows !) + (try 'hg help --keyword foo') (no-windows !) + \x1b[0;31mabort: no such help topic: foo\x1b[0m (esc) (windows !) + \x1b[0;31m(try 'hg help --keyword foo')\x1b[0m (esc) (windows !) [255] $ cat > throw.py <<EOF @@ -1540,6 +1542,7 @@ reposetup() for $TESTTMP/reposetup-test/src reposetup() for $TESTTMP/reposetup-test/src (chg !) +#if no-extraextensions $ hg --cwd src debugextensions reposetup() for $TESTTMP/reposetup-test/src dodo (untested!) @@ -1547,6 +1550,7 @@ mq reposetuptest (untested!) strip +#endif $ hg clone -U src clone-dst1 reposetup() for $TESTTMP/reposetup-test/src @@ -1683,6 +1687,7 @@ *** failed to import extension deprecatedcmd from $TESTTMP/deprecated/deprecatedcmd.py: missing attributes: norepo, optionalrepo, inferrepo *** (use @command decorator to register 'deprecatedcmd') hg: unknown command 'deprecatedcmd' + (use 'hg help' for a list of commands) [255] the extension shouldn't be loaded at all so the mq works:
--- a/tests/test-fileset.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-fileset.t Mon Aug 20 09:48:08 2018 -0700 @@ -18,13 +18,19 @@ $ fileset -v a1 (symbol 'a1') + * matcher: + <patternmatcher patterns='(?:a1$)'> a1 $ fileset -v 'a*' (symbol 'a*') + * matcher: + <patternmatcher patterns='(?:a[^/]*$)'> a1 a2 $ fileset -v '"re:a\d"' (string 're:a\\d') + * matcher: + <patternmatcher patterns='(?:a\\d)'> a1 a2 $ fileset -v '!re:"a\d"' @@ -32,6 +38,10 @@ (kindpat (symbol 're') (string 'a\\d'))) + * matcher: + <predicatenmatcher + pred=<not + <patternmatcher patterns='(?:a\\d)'>>> b1 b2 $ fileset -v 'path:a1 or glob:b?' @@ -42,10 +52,12 @@ (kindpat (symbol 'glob') (symbol 'b?'))) + * matcher: + <patternmatcher patterns='(?:a1(?:/|$)|b.$)'> a1 b1 b2 - $ fileset -v 'a1 or a2' + $ fileset -v --no-show-matcher 'a1 or a2' (or (symbol 'a1') (symbol 'a2')) @@ -97,6 +109,15 @@ None)) hg: parse error: can't use negate operator in this context [255] + $ fileset -p parsed 'a, b, c' + * parsed: + (list + (symbol 'a') + (symbol 'b') + (symbol 'c')) + hg: parse error: can't use a list in this context + (see 'hg help "filesets.x or y"') + [255] $ fileset '"path":.' hg: parse error: not a symbol @@ -114,6 +135,183 @@ hg: parse error: invalid pattern kind: foo [255] +Show parsed tree at stages: + + $ fileset -p unknown a + abort: invalid stage name: unknown + [255] + + $ fileset -p parsed 'path:a1 or glob:b?' + * parsed: + (or + (kindpat + (symbol 'path') + (symbol 'a1')) + (kindpat + (symbol 'glob') + (symbol 'b?'))) + a1 + b1 + b2 + + $ fileset -p all -s 'a1 or a2 or (grep("b") & clean())' + * parsed: + (or + (symbol 'a1') + (symbol 'a2') + (group + (and + (func + (symbol 'grep') + (string 'b')) + (func + (symbol 'clean') + None)))) + * analyzed: + (or + (symbol 'a1') + (symbol 'a2') + (and + (func + (symbol 'grep') + (string 'b')) + (withstatus + (func + (symbol 'clean') + None) + (string 'clean')))) + * optimized: + (or + (patterns + (symbol 'a1') + (symbol 'a2')) + (and + (withstatus + (func + (symbol 'clean') + None) + (string 'clean')) + (func + (symbol 'grep') + (string 'b')))) + * matcher: + <unionmatcher matchers=[ + <patternmatcher patterns='(?:a1$|a2$)'>, + <intersectionmatcher + m1=<predicatenmatcher pred=clean>, + m2=<predicatenmatcher pred=grep('b')>>]> + a1 + a2 + b1 + b2 + +Union of basic patterns: + + $ fileset -p optimized -s -r. 'a1 or a2 or path:b1' + * optimized: + (patterns + (symbol 'a1') + (symbol 'a2') + (kindpat + (symbol 'path') + (symbol 'b1'))) + * matcher: + <patternmatcher patterns='(?:a1$|a2$|b1(?:/|$))'> + a1 + a2 + b1 + +OR expression should be reordered by weight: + + $ fileset -p optimized -s -r. 'grep("a") or a1 or grep("b") or b2' + * optimized: + (or + (patterns + (symbol 'a1') + (symbol 'b2')) + (func + (symbol 'grep') + (string 'a')) + (func + (symbol 'grep') + (string 'b'))) + * matcher: + <unionmatcher matchers=[ + <patternmatcher patterns='(?:a1$|b2$)'>, + <predicatenmatcher pred=grep('a')>, + <predicatenmatcher pred=grep('b')>]> + a1 + a2 + b1 + b2 + +Use differencematcher for 'x and not y': + + $ fileset -p optimized -s 'a* and not a1' + * optimized: + (minus + (symbol 'a*') + (symbol 'a1')) + * matcher: + <differencematcher + m1=<patternmatcher patterns='(?:a[^/]*$)'>, + m2=<patternmatcher patterns='(?:a1$)'>> + a2 + + $ fileset -p optimized -s '!binary() and a*' + * optimized: + (minus + (symbol 'a*') + (func + (symbol 'binary') + None)) + * matcher: + <differencematcher + m1=<patternmatcher patterns='(?:a[^/]*$)'>, + m2=<predicatenmatcher pred=binary>> + a1 + a2 + +'x - y' is rewritten to 'x and not y' first so the operands can be reordered: + + $ fileset -p analyzed -p optimized -s 'a* - a1' + * analyzed: + (and + (symbol 'a*') + (not + (symbol 'a1'))) + * optimized: + (minus + (symbol 'a*') + (symbol 'a1')) + * matcher: + <differencematcher + m1=<patternmatcher patterns='(?:a[^/]*$)'>, + m2=<patternmatcher patterns='(?:a1$)'>> + a2 + + $ fileset -p analyzed -p optimized -s 'binary() - a*' + * analyzed: + (and + (func + (symbol 'binary') + None) + (not + (symbol 'a*'))) + * optimized: + (and + (not + (symbol 'a*')) + (func + (symbol 'binary') + None)) + * matcher: + <intersectionmatcher + m1=<predicatenmatcher + pred=<not + <patternmatcher patterns='(?:a[^/]*$)'>>>, + m2=<predicatenmatcher pred=binary>> + Test files status $ rm a1 @@ -180,6 +378,156 @@ b2 c1 +Test insertion of status hints + + $ fileset -p optimized 'added()' + * optimized: + (withstatus + (func + (symbol 'added') + None) + (string 'added')) + c1 + + $ fileset -p optimized 'a* & removed()' + * optimized: + (and + (symbol 'a*') + (withstatus + (func + (symbol 'removed') + None) + (string 'removed'))) + a2 + + $ fileset -p optimized 'a* - removed()' + * optimized: + (minus + (symbol 'a*') + (withstatus + (func + (symbol 'removed') + None) + (string 'removed'))) + a1 + + $ fileset -p analyzed -p optimized '(added() + removed()) - a*' + * analyzed: + (and + (withstatus + (or + (func + (symbol 'added') + None) + (func + (symbol 'removed') + None)) + (string 'added removed')) + (not + (symbol 'a*'))) + * optimized: + (and + (not + (symbol 'a*')) + (withstatus + (or + (func + (symbol 'added') + None) + (func + (symbol 'removed') + None)) + (string 'added removed'))) + c1 + + $ fileset -p optimized 'a* + b* + added() + unknown()' + * optimized: + (withstatus + (or + (patterns + (symbol 'a*') + (symbol 'b*')) + (func + (symbol 'added') + None) + (func + (symbol 'unknown') + None)) + (string 'added unknown')) + a1 + a2 + b1 + b2 + c1 + c3 + + $ fileset -p analyzed -p optimized 'removed() & missing() & a*' + * analyzed: + (and + (withstatus + (and + (func + (symbol 'removed') + None) + (func + (symbol 'missing') + None)) + (string 'removed missing')) + (symbol 'a*')) + * optimized: + (and + (symbol 'a*') + (withstatus + (and + (func + (symbol 'removed') + None) + (func + (symbol 'missing') + None)) + (string 'removed missing'))) + + $ fileset -p optimized 'clean() & revs(0, added())' + * optimized: + (and + (withstatus + (func + (symbol 'clean') + None) + (string 'clean')) + (func + (symbol 'revs') + (list + (symbol '0') + (withstatus + (func + (symbol 'added') + None) + (string 'added'))))) + b1 + + $ fileset -p optimized 'clean() & status(null, 0, b* & added())' + * optimized: + (and + (withstatus + (func + (symbol 'clean') + None) + (string 'clean')) + (func + (symbol 'status') + (list + (symbol 'null') + (symbol '0') + (and + (symbol 'b*') + (withstatus + (func + (symbol 'added') + None) + (string 'added')))))) + b1 + Test files properties >>> open('bin', 'wb').write(b'\0a') and None @@ -194,6 +542,19 @@ $ fileset 'binary()' bin + $ fileset -p optimized -s 'binary() and b*' + * optimized: + (and + (symbol 'b*') + (func + (symbol 'binary') + None)) + * matcher: + <intersectionmatcher + m1=<patternmatcher patterns='(?:b[^/]*$)'>, + m2=<predicatenmatcher pred=binary>> + bin + $ fileset 'grep("b{1}")' .hgignore b1 @@ -231,7 +592,7 @@ [255] $ fileset '(1k, 2k)' hg: parse error: can't use a list in this context - (see hg help "filesets.x or y") + (see 'hg help "filesets.x or y"') [255] $ fileset 'size(1k)' 1k
--- a/tests/test-fix.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-fix.t Mon Aug 20 09:48:08 2018 -0700 @@ -502,12 +502,13 @@ $ cd .. -When a fixer prints to stderr, we assume that it has failed. We should show the -error messages to the user, and we should not let the failing fixer affect the -file it was fixing (many code formatters might emit error messages on stderr -and nothing on stdout, which would cause us the clear the file). We show the -user which fixer failed and which revision, but we assume that the fixer will -print the filename if it is relevant. +When a fixer prints to stderr, we don't assume that it has failed. We show the +error messages to the user, and we still let the fixer affect the file it was +fixing if its exit code is zero. Some code formatters might emit error messages +on stderr and nothing on stdout, which would cause us the clear the file, +except that they also exit with a non-zero code. We show the user which fixer +emitted the stderr, and which revision, but we assume that the fixer will print +the filename if it is relevant (since the issue may be non-specific). $ hg init showstderr $ cd showstderr @@ -515,17 +516,37 @@ $ printf "hello\n" > hello.txt $ hg add adding hello.txt - $ cat >> $TESTTMP/cmd.sh <<'EOF' + $ cat > $TESTTMP/fail.sh <<'EOF' > printf 'HELLO\n' > printf "$@: some\nerror" >&2 + > exit 0 # success despite the stderr output > EOF - $ hg --config "fix.fail:command=sh $TESTTMP/cmd.sh {rootpath}" \ + $ hg --config "fix.fail:command=sh $TESTTMP/fail.sh {rootpath}" \ > --config "fix.fail:fileset=hello.txt" \ > fix --working-dir [wdir] fail: hello.txt: some [wdir] fail: error $ cat hello.txt - hello + HELLO + + $ printf "goodbye\n" > hello.txt + $ cat > $TESTTMP/work.sh <<'EOF' + > printf 'GOODBYE\n' + > printf "$@: some\nerror\n" >&2 + > exit 42 # success despite the stdout output + > EOF + $ hg --config "fix.fail:command=sh $TESTTMP/work.sh {rootpath}" \ + > --config "fix.fail:fileset=hello.txt" \ + > fix --working-dir + [wdir] fail: hello.txt: some + [wdir] fail: error + $ cat hello.txt + goodbye + + $ hg --config "fix.fail:command=exit 42" \ + > --config "fix.fail:fileset=hello.txt" \ + > fix --working-dir + [wdir] fail: exited with status 42 $ cd .. @@ -830,9 +851,9 @@ $ hg fix -r 0:2 $ hg log --graph --template '{node|shortest} {files}' - o 3801 bar.whole + o b4e2 bar.whole | - o 38cc + o 59f4 | | @ bc05 bar.whole | |
--- a/tests/test-fncache.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-fncache.t Mon Aug 20 09:48:08 2018 -0700 @@ -88,6 +88,7 @@ .hg/00manifest.i .hg/cache .hg/cache/branch2-served + .hg/cache/manifestfulltextcache .hg/cache/rbc-names-v1 .hg/cache/rbc-revs-v1 .hg/data @@ -121,6 +122,7 @@ .hg/00changelog.i .hg/cache .hg/cache/branch2-served + .hg/cache/manifestfulltextcache .hg/cache/rbc-names-v1 .hg/cache/rbc-revs-v1 .hg/dirstate
--- a/tests/test-generaldelta.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-generaldelta.t Mon Aug 20 09:48:08 2018 -0700 @@ -267,7 +267,7 @@ 51 4 3 50 prev 356 594 611 1.02862 611 0 0.00000 52 4 4 51 p1 58 640 669 1.04531 669 0 0.00000 53 5 1 -1 base 0 0 0 0.00000 0 0 0.00000 - 54 5 2 53 p1 376 640 376 0.58750 376 0 0.00000 + 54 6 1 -1 base 369 640 369 0.57656 369 0 0.00000 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=2800 relax-chain --config format.generaldelta=yes requesting all changes adding changesets @@ -333,7 +333,7 @@ 51 2 13 17 p1 58 594 739 1.24411 2781 2042 2.76319 52 5 1 -1 base 369 640 369 0.57656 369 0 0.00000 53 6 1 -1 base 0 0 0 0.00000 0 0 0.00000 - 54 6 2 53 p1 376 640 376 0.58750 376 0 0.00000 + 54 7 1 -1 base 369 640 369 0.57656 369 0 0.00000 $ hg clone --pull source-repo --config experimental.maxdeltachainspan=0 noconst-chain --config format.generaldelta=yes requesting all changes adding changesets @@ -399,4 +399,4 @@ 51 2 13 17 p1 58 594 739 1.24411 2642 1903 2.57510 52 2 14 51 p1 58 640 797 1.24531 2700 1903 2.38770 53 4 1 -1 base 0 0 0 0.00000 0 0 0.00000 - 54 4 2 53 p1 376 640 376 0.58750 376 0 0.00000 + 54 5 1 -1 base 369 640 369 0.57656 369 0 0.00000
--- a/tests/test-glog-beautifygraph.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-glog-beautifygraph.t Mon Aug 20 09:48:08 2018 -0700 @@ -80,52 +80,8 @@ > hg commit -Aqd "$rev 0" -m "($rev) $msg" > } - $ cat > printrevset.py <<EOF - > from __future__ import absolute_import - > from mercurial import ( - > cmdutil, - > commands, - > extensions, - > logcmdutil, - > revsetlang, - > smartset, - > ) - > - > from mercurial.utils import ( - > stringutil, - > ) - > - > def logrevset(repo, pats, opts): - > revs = logcmdutil._initialrevs(repo, opts) - > if not revs: - > return None - > match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts) - > return logcmdutil._makerevset(repo, match, pats, slowpath, opts) - > - > def uisetup(ui): - > def printrevset(orig, repo, pats, opts): - > revs, filematcher = orig(repo, pats, opts) - > if opts.get(b'print_revset'): - > expr = logrevset(repo, pats, opts) - > if expr: - > tree = revsetlang.parse(expr) - > tree = revsetlang.analyze(tree) - > else: - > tree = [] - > ui = repo.ui - > ui.write(b'%r\n' % (opts.get(b'rev', []),)) - > ui.write(revsetlang.prettyformat(tree) + b'\n') - > ui.write(stringutil.prettyrepr(revs) + b'\n') - > revs = smartset.baseset() # display no revisions - > return revs, filematcher - > extensions.wrapfunction(logcmdutil, 'getrevs', printrevset) - > aliases, entry = cmdutil.findcmd(b'log', commands.table) - > entry[1].append((b'', b'print-revset', False, - > b'print generated revset and exit (DEPRECATED)')) - > EOF - $ echo "[extensions]" >> $HGRCPATH - $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH + $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH $ echo "beautifygraph=" >> $HGRCPATH Set a default of narrow-text UTF-8. @@ -2043,7 +1999,7 @@ <spanset- 0:7>, <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>> $ testlog -r "sort(file('set:copied()'), -rev)" - ["sort(file('set:copied()'), -rev)"] + ['sort(file(\'set:copied()\'), -rev)'] [] <filteredset <fullreposet- 0:7>,
--- a/tests/test-glog.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-glog.t Mon Aug 20 09:48:08 2018 -0700 @@ -81,49 +81,8 @@ > hg commit -Aqd "$rev 0" -m "($rev) $msg" > } - $ cat > printrevset.py <<EOF - > from __future__ import absolute_import - > from mercurial import ( - > cmdutil, - > commands, - > extensions, - > logcmdutil, - > revsetlang, - > smartset, - > ) - > from mercurial.utils import stringutil - > - > def logrevset(repo, pats, opts): - > revs = logcmdutil._initialrevs(repo, opts) - > if not revs: - > return None - > match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts) - > return logcmdutil._makerevset(repo, match, pats, slowpath, opts) - > - > def uisetup(ui): - > def printrevset(orig, repo, pats, opts): - > revs, filematcher = orig(repo, pats, opts) - > if opts.get(b'print_revset'): - > expr = logrevset(repo, pats, opts) - > if expr: - > tree = revsetlang.parse(expr) - > tree = revsetlang.analyze(tree) - > else: - > tree = [] - > ui = repo.ui - > ui.write(b'%r\n' % (opts.get(b'rev', []),)) - > ui.write(revsetlang.prettyformat(tree) + b'\n') - > ui.write(stringutil.prettyrepr(revs) + b'\n') - > revs = smartset.baseset() # display no revisions - > return revs, filematcher - > extensions.wrapfunction(logcmdutil, 'getrevs', printrevset) - > aliases, entry = cmdutil.findcmd(b'log', commands.table) - > entry[1].append((b'', b'print-revset', False, - > b'print generated revset and exit (DEPRECATED)')) - > EOF - $ echo "[extensions]" >> $HGRCPATH - $ echo "printrevset=`pwd`/printrevset.py" >> $HGRCPATH + $ echo "printrevset=$TESTDIR/printrevset.py" >> $HGRCPATH $ hg init repo $ cd repo @@ -1890,7 +1849,7 @@ <spanset- 0:7>, <matchfiles patterns=[], include=['set:copied()'] exclude=[], default='relpath', rev=2147483647>> $ testlog -r "sort(file('set:copied()'), -rev)" - ["sort(file('set:copied()'), -rev)"] + ['sort(file(\'set:copied()\'), -rev)'] [] <filteredset <fullreposet- 0:7>,
--- a/tests/test-graft.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-graft.t Mon Aug 20 09:48:08 2018 -0700 @@ -237,7 +237,7 @@ # To mark files as resolved: hg resolve --mark FILE # To continue: hg graft --continue - # To abort: hg update --clean . (warning: this will discard uncommitted changes) + # To abort: hg graft --abort Commit while interrupted should fail: @@ -699,8 +699,24 @@ summary: 2 ... grafts of grafts unfortunately can't - $ hg graft -q 13 + $ hg graft -q 13 --debug + scanning for duplicate grafts + grafting 13:7a4785234d87 "2" + searching for copies back to rev 12 + unmatched files in other (from topological common ancestor): + g + unmatched files new in both: + b + resolving manifests + branchmerge: True, force: True, partial: False + ancestor: b592ea63bb0c, local: 7e61b508e709+, remote: 7a4785234d87 + starting 4 threads for background file closing (?) + committing files: + b warning: can't find ancestor for 'b' copied from 'a'! + reusing manifest form p1 (listed files actually unchanged) + committing changelog + updating the branch cache $ hg log -r 'destination(13)' All copies of a cset $ hg log -r 'origin(13) or destination(origin(13))' @@ -731,7 +747,7 @@ date: Thu Jan 01 00:00:00 1970 +0000 summary: 2 - changeset: 22:d1cb6591fa4b + changeset: 22:3a4e92d81b97 branch: dev tag: tip user: foo @@ -743,8 +759,8 @@ $ hg graft 'origin(13) or destination(origin(13))' skipping ancestor revision 21:7e61b508e709 - skipping ancestor revision 22:d1cb6591fa4b - skipping revision 2:5c095ad7e90f (already grafted to 22:d1cb6591fa4b) + skipping ancestor revision 22:3a4e92d81b97 + skipping revision 2:5c095ad7e90f (already grafted to 22:3a4e92d81b97) grafting 7:ef0ef43d49e7 "2" warning: can't find ancestor for 'b' copied from 'a'! grafting 13:7a4785234d87 "2" @@ -758,7 +774,7 @@ $ hg graft 19 0 6 skipping ungraftable merge revision 6 skipping ancestor revision 0:68795b066622 - skipping already grafted revision 19:9627f653b421 (22:d1cb6591fa4b also has origin 2:5c095ad7e90f) + skipping already grafted revision 19:9627f653b421 (22:3a4e92d81b97 also has origin 2:5c095ad7e90f) [255] $ hg graft 19 0 6 --force skipping ungraftable merge revision 6 @@ -773,12 +789,12 @@ $ hg ci -m 28 $ hg backout 28 reverting a - changeset 29:53177ba928f6 backs out changeset 28:50a516bb8b57 + changeset 29:9d95e865b00c backs out changeset 28:cc20d29aec8d $ hg graft 28 - skipping ancestor revision 28:50a516bb8b57 + skipping ancestor revision 28:cc20d29aec8d [255] $ hg graft 28 --force - grafting 28:50a516bb8b57 "28" + grafting 28:cc20d29aec8d "28" merging a $ cat a abc @@ -788,7 +804,7 @@ $ echo def > a $ hg ci -m 31 $ hg graft 28 --force --tool internal:fail - grafting 28:50a516bb8b57 "28" + grafting 28:cc20d29aec8d "28" abort: unresolved conflicts, can't continue (use 'hg resolve' and 'hg graft --continue') [255] @@ -801,7 +817,7 @@ (no more unresolved files) continue: hg graft --continue $ hg graft -c - grafting 28:50a516bb8b57 "28" + grafting 28:cc20d29aec8d "28" $ cat a abc @@ -822,8 +838,8 @@ $ hg tag -f something $ hg graft -qr 27 $ hg graft -f 27 - grafting 27:ed6c7e54e319 "28" - note: graft of 27:ed6c7e54e319 created no changes to commit + grafting 27:17d42b8f5d50 "28" + note: graft of 27:17d42b8f5d50 created no changes to commit $ cd ..
--- a/tests/test-grep.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-grep.t Mon Aug 20 09:48:08 2018 -0700 @@ -18,7 +18,7 @@ pattern error $ hg grep '**test**' - grep: invalid match pattern: nothing to repeat + grep: invalid match pattern: nothing to repeat* (glob) [1] simple @@ -491,3 +491,17 @@ ] $ cd .. + +test -rMULTIREV with --all-files + + $ cd sng + $ hg rm um + $ hg commit -m "deletes um" + $ hg grep -r "0:2" "unmod" --all-files + um:0:unmod + um:1:unmod + $ hg grep -r "0:2" "unmod" --all-files um + um:0:unmod + um:1:unmod + $ cd .. +
--- a/tests/test-hardlinks.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-hardlinks.t Mon Aug 20 09:48:08 2018 -0700 @@ -241,6 +241,7 @@ 2 r4/.hg/cache/checkisexec (execbit !) ? r4/.hg/cache/checklink-target (glob) (symlink !) 2 r4/.hg/cache/checknoexec (execbit !) + 2 r4/.hg/cache/manifestfulltextcache 2 r4/.hg/cache/rbc-names-v1 2 r4/.hg/cache/rbc-revs-v1 2 r4/.hg/dirstate @@ -291,6 +292,7 @@ 2 r4/.hg/cache/checkisexec (execbit !) 2 r4/.hg/cache/checklink-target (symlink !) 2 r4/.hg/cache/checknoexec (execbit !) + 2 r4/.hg/cache/manifestfulltextcache 2 r4/.hg/cache/rbc-names-v1 2 r4/.hg/cache/rbc-revs-v1 1 r4/.hg/dirstate
--- a/tests/test-help.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-help.t Mon Aug 20 09:48:08 2018 -0700 @@ -652,29 +652,7 @@ $ hg skjdfks hg: unknown command 'skjdfks' - Mercurial Distributed SCM - - basic commands: - - add add the specified files on the next commit - annotate show changeset information by line for each file - clone make a copy of an existing repository - commit commit the specified files or all outstanding changes - diff diff repository (or selected files) - export dump the header and diffs for one or more changesets - forget forget the specified files on the next commit - init create a new repository in the given directory - log show revision history of entire repository or files - merge merge another revision into working directory - pull pull changes from the specified source - push push changes to the specified destination - remove remove the specified files on the next commit - serve start stand-alone webserver - status show changed files in the working directory - summary summarize working directory state - update update working directory (or switch revisions) - - (use 'hg help' for the full list of commands or 'hg -v' for details) + (use 'hg help' for a list of commands) [255] Typoed command gives suggestion @@ -966,6 +944,9 @@ debuginstall test Mercurial installation debugknown test whether node ids are known to a repo debuglocks show or modify state of locks + debugmanifestfulltextcache + show, clear or amend the contents of the manifest fulltext + cache debugmergestate print merge state debugnamecomplete @@ -1500,8 +1481,10 @@ $ hg help -c commit > /dev/null $ hg help -e -c commit > /dev/null $ hg help -e commit > /dev/null - abort: no such help topic: commit - (try 'hg help --keyword commit') + abort: no such help topic: commit (no-windows !) + (try 'hg help --keyword commit') (no-windows !) + \x1b[0;31mabort: no such help topic: commit\x1b[0m (esc) (windows !) + \x1b[0;31m(try 'hg help --keyword commit')\x1b[0m (esc) (windows !) [255] Test keyword search help @@ -1848,18 +1831,26 @@ This implies premerge. Therefore, files aren't dumped, if premerge runs successfully. Use :forcedump to forcibly write files out. + (actual capabilities: binary, symlink) + ":fail" Rather than attempting to merge files that were modified on both branches, it marks them as unresolved. The resolve command must be used to resolve these conflicts. + (actual capabilities: binary, symlink) + ":forcedump" Creates three versions of the files as same as :dump, but omits premerge. + (actual capabilities: binary, symlink) + ":local" Uses the local 'p1()' version of files as the merged version. + (actual capabilities: binary, symlink) + ":merge" Uses the internal non-interactive simple merge algorithm for merging files. It will fail if there are any conflicts and leave markers in the @@ -1883,10 +1874,14 @@ ":other" Uses the other 'p2()' version of files as the merged version. + (actual capabilities: binary, symlink) + ":prompt" Asks the user which of the local 'p1()' or the other 'p2()' version to keep as the merged version. + (actual capabilities: binary, symlink) + ":tagmerge" Uses the internal tag merge algorithm (experimental). @@ -1896,7 +1891,8 @@ markers are inserted. Internal tools are always available and do not require a GUI but will by - default not handle symlinks or binary files. + default not handle symlinks or binary files. See next section for detail + about "actual capabilities" described above. Choosing a merge tool ===================== @@ -1911,8 +1907,7 @@ must be executable by the shell. 3. If the filename of the file to be merged matches any of the patterns in the merge-patterns configuration section, the first usable merge tool - corresponding to a matching pattern is used. Here, binary capabilities - of the merge tool are not considered. + corresponding to a matching pattern is used. 4. If ui.merge is set it will be considered next. If the value is not the name of a configured tool, the specified value is used and must be executable by the shell. Otherwise the named tool is used if it is @@ -1925,6 +1920,21 @@ internal ":merge" is used. 8. Otherwise, ":prompt" is used. + For historical reason, Mercurial assumes capabilities of internal merge + tools as below while examining rules above, regardless of actual + capabilities of them. + + step specified via binary symlink + ---------------------------------- + 1. --tool o o + 2. HGMERGE o o + 3. merge-patterns o (*) x (*) + 4. ui.merge x (*) x (*) + + If "merge.strict-capability-check" configuration is true, Mercurial checks + capabilities of internal merge tools strictly in (*) cases above. It is + false by default for backward compatibility. + Note: After selecting a merge program, Mercurial will by default attempt to merge the files using a simple merge algorithm first. Only if it
--- a/tests/test-http.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-http.t Mon Aug 20 09:48:08 2018 -0700 @@ -476,7 +476,7 @@ #endif ... and also keep partial clones and pulls working - $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone + $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone adding changesets adding manifests adding file changes @@ -484,7 +484,7 @@ new changesets 8b6053c928fe updating to branch default 4 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ hg pull -R test-partial-clone + $ hg pull -R test/partial/clone pulling from http://localhost:$HGPORT1/ searching for changes adding changesets @@ -494,6 +494,13 @@ new changesets 5fed3813f7f5:56f9bc90cce6 (run 'hg update' to get a working copy) + $ hg clone -U -r 0 test/partial/clone test/another/clone + adding changesets + adding manifests + adding file changes + added 1 changesets with 4 changes to 4 files + new changesets 8b6053c928fe + corrupt cookies file should yield a warning $ cat > $TESTTMP/cookies.txt << EOF
--- a/tests/test-inherit-mode.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-inherit-mode.t Mon Aug 20 09:48:08 2018 -0700 @@ -69,6 +69,7 @@ 00600 ./.hg/00changelog.i 00770 ./.hg/cache/ 00660 ./.hg/cache/branch2-served + 00660 ./.hg/cache/manifestfulltextcache 00660 ./.hg/cache/rbc-names-v1 00660 ./.hg/cache/rbc-revs-v1 00660 ./.hg/dirstate
--- a/tests/test-lfconvert.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-lfconvert.t Mon Aug 20 09:48:08 2018 -0700 @@ -101,6 +101,7 @@ largefiles revlogv1 store + testonly-simplestore (reposimplestore !) "lfconvert" includes a newline at the end of the standin files. $ cat .hglf/large .hglf/sub/maybelarge.dat
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-linelog.py Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,193 @@ +from __future__ import absolute_import, print_function + +import difflib +import random +import unittest + +from mercurial import linelog + +vecratio = 3 # number of replacelines / number of replacelines_vec +maxlinenum = 0xffffff +maxb1 = 0xffffff +maxdeltaa = 10 +maxdeltab = 10 + +def _genedits(seed, endrev): + lines = [] + random.seed(seed) + rev = 0 + for rev in range(0, endrev): + n = len(lines) + a1 = random.randint(0, n) + a2 = random.randint(a1, min(n, a1 + maxdeltaa)) + b1 = random.randint(0, maxb1) + b2 = random.randint(b1, b1 + maxdeltab) + usevec = not bool(random.randint(0, vecratio)) + if usevec: + blines = [(random.randint(0, rev), random.randint(0, maxlinenum)) + for _ in range(b1, b2)] + else: + blines = [(rev, bidx) for bidx in range(b1, b2)] + lines[a1:a2] = blines + yield lines, rev, a1, a2, b1, b2, blines, usevec + +class linelogtests(unittest.TestCase): + def testlinelogencodedecode(self): + program = [linelog._eof(0, 0), + linelog._jge(41, 42), + linelog._jump(0, 43), + linelog._eof(0, 0), + linelog._jl(44, 45), + linelog._line(46, 47), + ] + ll = linelog.linelog(program, maxrev=100) + enc = ll.encode() + # round-trips okay + self.assertEqual(linelog.linelog.fromdata(enc)._program, ll._program) + self.assertEqual(linelog.linelog.fromdata(enc), ll) + # This encoding matches the encoding used by hg-experimental's + # linelog file, or is supposed to if it doesn't. + self.assertEqual(enc, (b'\x00\x00\x01\x90\x00\x00\x00\x06' + b'\x00\x00\x00\xa4\x00\x00\x00*' + b'\x00\x00\x00\x00\x00\x00\x00+' + b'\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\xb1\x00\x00\x00-' + b'\x00\x00\x00\xba\x00\x00\x00/')) + + def testsimpleedits(self): + ll = linelog.linelog() + # Initial revision: add lines 0, 1, and 2 + ll.replacelines(1, 0, 0, 0, 3) + self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)], + [(1, 0), + (1, 1), + (1, 2), + ]) + # Replace line 1 with a new line + ll.replacelines(2, 1, 2, 1, 2) + self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)], + [(1, 0), + (2, 1), + (1, 2), + ]) + # delete a line out of 2 + ll.replacelines(3, 1, 2, 0, 0) + self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)], + [(1, 0), + (1, 2), + ]) + # annotation of 1 is unchanged + self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(1)], + [(1, 0), + (1, 1), + (1, 2), + ]) + ll.annotate(3) # set internal state to revision 3 + start = ll.getoffset(0) + end = ll.getoffset(1) + self.assertEqual(ll.getalllines(start, end), [ + (1, 0), + (2, 1), + (1, 1), + ]) + self.assertEqual(ll.getalllines(), [ + (1, 0), + (2, 1), + (1, 1), + (1, 2), + ]) + + def testparseclinelogfile(self): + # This data is what the replacements in testsimpleedits + # produce when fed to the original linelog.c implementation. + data = (b'\x00\x00\x00\x0c\x00\x00\x00\x0f' + b'\x00\x00\x00\x00\x00\x00\x00\x02' + b'\x00\x00\x00\x05\x00\x00\x00\x06' + b'\x00\x00\x00\x06\x00\x00\x00\x00' + b'\x00\x00\x00\x00\x00\x00\x00\x07' + b'\x00\x00\x00\x06\x00\x00\x00\x02' + b'\x00\x00\x00\x00\x00\x00\x00\x00' + b'\x00\x00\x00\t\x00\x00\x00\t' + b'\x00\x00\x00\x00\x00\x00\x00\x0c' + b'\x00\x00\x00\x08\x00\x00\x00\x05' + b'\x00\x00\x00\x06\x00\x00\x00\x01' + b'\x00\x00\x00\x00\x00\x00\x00\x05' + b'\x00\x00\x00\x0c\x00\x00\x00\x05' + b'\x00\x00\x00\n\x00\x00\x00\x01' + b'\x00\x00\x00\x00\x00\x00\x00\t') + llc = linelog.linelog.fromdata(data) + self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(1)], + [(1, 0), + (1, 1), + (1, 2), + ]) + self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(2)], + [(1, 0), + (2, 1), + (1, 2), + ]) + self.assertEqual([(l.rev, l.linenum) for l in llc.annotate(3)], + [(1, 0), + (1, 2), + ]) + # Check we emit the same bytecode. + ll = linelog.linelog() + # Initial revision: add lines 0, 1, and 2 + ll.replacelines(1, 0, 0, 0, 3) + # Replace line 1 with a new line + ll.replacelines(2, 1, 2, 1, 2) + # delete a line out of 2 + ll.replacelines(3, 1, 2, 0, 0) + diff = '\n ' + '\n '.join(difflib.unified_diff( + ll.debugstr().splitlines(), llc.debugstr().splitlines(), + 'python', 'c', lineterm='')) + self.assertEqual(ll._program, llc._program, 'Program mismatch: ' + diff) + # Done as a secondary step so we get a better result if the + # program is where the mismatch is. + self.assertEqual(ll, llc) + self.assertEqual(ll.encode(), data) + + def testanothersimplecase(self): + ll = linelog.linelog() + ll.replacelines(3, 0, 0, 0, 2) + ll.replacelines(4, 0, 2, 0, 0) + self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(4)], + []) + self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(3)], + [(3, 0), (3, 1)]) + # rev 2 is empty because contents were only ever introduced in rev 3 + self.assertEqual([(l.rev, l.linenum) for l in ll.annotate(2)], + []) + + def testrandomedits(self): + # Inspired by original linelog tests. + seed = random.random() + numrevs = 2000 + ll = linelog.linelog() + # Populate linelog + for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits( + seed, numrevs): + if usevec: + ll.replacelines_vec(rev, a1, a2, blines) + else: + ll.replacelines(rev, a1, a2, b1, b2) + ar = ll.annotate(rev) + self.assertEqual(ll.annotateresult, lines) + # Verify we can get back these states by annotating each rev + for lines, rev, a1, a2, b1, b2, blines, usevec in _genedits( + seed, numrevs): + ar = ll.annotate(rev) + self.assertEqual([(l.rev, l.linenum) for l in ar], lines) + + def testinfinitebadprogram(self): + ll = linelog.linelog.fromdata( + b'\x00\x00\x00\x00\x00\x00\x00\x02' # header + b'\x00\x00\x00\x00\x00\x00\x00\x01' # JUMP to self + ) + with self.assertRaises(linelog.LineLogError): + # should not be an infinite loop and raise + ll.annotate(1) + +if __name__ == '__main__': + import silenttestrunner + silenttestrunner.main(__name__)
--- a/tests/test-match.py Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-match.py Mon Aug 20 09:48:08 2018 -0700 @@ -6,14 +6,816 @@ from mercurial import ( match as matchmod, + util, ) +class BaseMatcherTests(unittest.TestCase): + + def testVisitdir(self): + m = matchmod.basematcher(b'', b'') + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + + def testVisitchildrenset(self): + m = matchmod.basematcher(b'', b'') + self.assertEqual(m.visitchildrenset(b'.'), b'this') + self.assertEqual(m.visitchildrenset(b'dir'), b'this') + +class AlwaysMatcherTests(unittest.TestCase): + + def testVisitdir(self): + m = matchmod.alwaysmatcher(b'', b'') + self.assertEqual(m.visitdir(b'.'), b'all') + self.assertEqual(m.visitdir(b'dir'), b'all') + + def testVisitchildrenset(self): + m = matchmod.alwaysmatcher(b'', b'') + self.assertEqual(m.visitchildrenset(b'.'), b'all') + self.assertEqual(m.visitchildrenset(b'dir'), b'all') + class NeverMatcherTests(unittest.TestCase): def testVisitdir(self): - m = matchmod.nevermatcher('', '') - self.assertFalse(m.visitdir('.')) - self.assertFalse(m.visitdir('dir')) + m = matchmod.nevermatcher(b'', b'') + self.assertFalse(m.visitdir(b'.')) + self.assertFalse(m.visitdir(b'dir')) + + def testVisitchildrenset(self): + m = matchmod.nevermatcher(b'', b'') + self.assertEqual(m.visitchildrenset(b'.'), set()) + self.assertEqual(m.visitchildrenset(b'dir'), set()) + +class PredicateMatcherTests(unittest.TestCase): + # predicatematcher does not currently define either of these methods, so + # this is equivalent to BaseMatcherTests. + + def testVisitdir(self): + m = matchmod.predicatematcher(b'', b'', lambda *a: False) + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + + def testVisitchildrenset(self): + m = matchmod.predicatematcher(b'', b'', lambda *a: False) + self.assertEqual(m.visitchildrenset(b'.'), b'this') + self.assertEqual(m.visitchildrenset(b'dir'), b'this') + +class PatternMatcherTests(unittest.TestCase): + + def testVisitdirPrefix(self): + m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir']) + assert isinstance(m, matchmod.patternmatcher) + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + self.assertEqual(m.visitdir(b'dir/subdir'), b'all') + # OPT: This should probably be 'all' if its parent is? + self.assertTrue(m.visitdir(b'dir/subdir/x')) + self.assertFalse(m.visitdir(b'folder')) + + def testVisitchildrensetPrefix(self): + m = matchmod.match(b'x', b'', patterns=[b'path:dir/subdir']) + assert isinstance(m, matchmod.patternmatcher) + self.assertEqual(m.visitchildrenset(b'.'), b'this') + self.assertEqual(m.visitchildrenset(b'dir'), b'this') + self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all') + # OPT: This should probably be 'all' if its parent is? + self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this') + self.assertEqual(m.visitchildrenset(b'folder'), set()) + + def testVisitdirRootfilesin(self): + m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir']) + assert isinstance(m, matchmod.patternmatcher) + self.assertTrue(m.visitdir(b'.')) + self.assertFalse(m.visitdir(b'dir/subdir/x')) + self.assertFalse(m.visitdir(b'folder')) + # FIXME: These should probably be True. + self.assertFalse(m.visitdir(b'dir')) + self.assertFalse(m.visitdir(b'dir/subdir')) + + def testVisitchildrensetRootfilesin(self): + m = matchmod.match(b'x', b'', patterns=[b'rootfilesin:dir/subdir']) + assert isinstance(m, matchmod.patternmatcher) + self.assertEqual(m.visitchildrenset(b'.'), b'this') + self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set()) + self.assertEqual(m.visitchildrenset(b'folder'), set()) + # FIXME: These should probably be {'subdir'} and 'this', respectively, + # or at least 'this' and 'this'. + self.assertEqual(m.visitchildrenset(b'dir'), set()) + self.assertEqual(m.visitchildrenset(b'dir/subdir'), set()) + + def testVisitdirGlob(self): + m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*']) + assert isinstance(m, matchmod.patternmatcher) + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + self.assertFalse(m.visitdir(b'folder')) + # OPT: these should probably be False. + self.assertTrue(m.visitdir(b'dir/subdir')) + self.assertTrue(m.visitdir(b'dir/subdir/x')) + + def testVisitchildrensetGlob(self): + m = matchmod.match(b'x', b'', patterns=[b'glob:dir/z*']) + assert isinstance(m, matchmod.patternmatcher) + self.assertEqual(m.visitchildrenset(b'.'), b'this') + self.assertEqual(m.visitchildrenset(b'folder'), set()) + self.assertEqual(m.visitchildrenset(b'dir'), b'this') + # OPT: these should probably be set(). + self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this') + self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this') + +class IncludeMatcherTests(unittest.TestCase): + + def testVisitdirPrefix(self): + m = matchmod.match(b'x', b'', include=[b'path:dir/subdir']) + assert isinstance(m, matchmod.includematcher) + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + self.assertEqual(m.visitdir(b'dir/subdir'), b'all') + # OPT: This should probably be 'all' if its parent is? + self.assertTrue(m.visitdir(b'dir/subdir/x')) + self.assertFalse(m.visitdir(b'folder')) + + def testVisitchildrensetPrefix(self): + m = matchmod.match(b'x', b'', include=[b'path:dir/subdir']) + assert isinstance(m, matchmod.includematcher) + self.assertEqual(m.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'all') + # OPT: This should probably be 'all' if its parent is? + self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this') + self.assertEqual(m.visitchildrenset(b'folder'), set()) + + def testVisitdirRootfilesin(self): + m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir']) + assert isinstance(m, matchmod.includematcher) + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + self.assertTrue(m.visitdir(b'dir/subdir')) + self.assertFalse(m.visitdir(b'dir/subdir/x')) + self.assertFalse(m.visitdir(b'folder')) + + def testVisitchildrensetRootfilesin(self): + m = matchmod.match(b'x', b'', include=[b'rootfilesin:dir/subdir']) + assert isinstance(m, matchmod.includematcher) + self.assertEqual(m.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this') + self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set()) + self.assertEqual(m.visitchildrenset(b'folder'), set()) + + def testVisitdirGlob(self): + m = matchmod.match(b'x', b'', include=[b'glob:dir/z*']) + assert isinstance(m, matchmod.includematcher) + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + self.assertFalse(m.visitdir(b'folder')) + # OPT: these should probably be False. + self.assertTrue(m.visitdir(b'dir/subdir')) + self.assertTrue(m.visitdir(b'dir/subdir/x')) + + def testVisitchildrensetGlob(self): + m = matchmod.match(b'x', b'', include=[b'glob:dir/z*']) + assert isinstance(m, matchmod.includematcher) + self.assertEqual(m.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(m.visitchildrenset(b'folder'), set()) + self.assertEqual(m.visitchildrenset(b'dir'), b'this') + # OPT: these should probably be set(). + self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this') + self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), b'this') + +class ExactMatcherTests(unittest.TestCase): + + def testVisitdir(self): + m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'], + exact=True) + assert isinstance(m, matchmod.exactmatcher) + self.assertTrue(m.visitdir(b'.')) + self.assertTrue(m.visitdir(b'dir')) + self.assertTrue(m.visitdir(b'dir/subdir')) + self.assertFalse(m.visitdir(b'dir/subdir/foo.txt')) + self.assertFalse(m.visitdir(b'dir/foo')) + self.assertFalse(m.visitdir(b'dir/subdir/x')) + self.assertFalse(m.visitdir(b'folder')) + + def testVisitchildrenset(self): + m = matchmod.match(b'x', b'', patterns=[b'dir/subdir/foo.txt'], + exact=True) + assert isinstance(m, matchmod.exactmatcher) + self.assertEqual(m.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(m.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(m.visitchildrenset(b'dir/subdir'), b'this') + self.assertEqual(m.visitchildrenset(b'dir/subdir/x'), set()) + self.assertEqual(m.visitchildrenset(b'dir/subdir/foo.txt'), set()) + self.assertEqual(m.visitchildrenset(b'folder'), set()) + +class DifferenceMatcherTests(unittest.TestCase): + + def testVisitdirM2always(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + dm = matchmod.differencematcher(m1, m2) + # dm should be equivalent to a nevermatcher. + self.assertFalse(dm.visitdir(b'.')) + self.assertFalse(dm.visitdir(b'dir')) + self.assertFalse(dm.visitdir(b'dir/subdir')) + self.assertFalse(dm.visitdir(b'dir/subdir/z')) + self.assertFalse(dm.visitdir(b'dir/foo')) + self.assertFalse(dm.visitdir(b'dir/subdir/x')) + self.assertFalse(dm.visitdir(b'folder')) + + def testVisitchildrensetM2always(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + dm = matchmod.differencematcher(m1, m2) + # dm should be equivalent to a nevermatcher. + self.assertEqual(dm.visitchildrenset(b'.'), set()) + self.assertEqual(dm.visitchildrenset(b'dir'), set()) + self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set()) + self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), set()) + self.assertEqual(dm.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), set()) + self.assertEqual(dm.visitchildrenset(b'folder'), set()) + + def testVisitdirM2never(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.nevermatcher(b'', b'') + dm = matchmod.differencematcher(m1, m2) + # dm should be equivalent to a alwaysmatcher. OPT: if m2 is a + # nevermatcher, we could return 'all' for these. + # + # We're testing Equal-to-True instead of just 'assertTrue' since + # assertTrue does NOT verify that it's a bool, just that it's truthy. + # While we may want to eventually make these return 'all', they should + # not currently do so. + self.assertEqual(dm.visitdir(b'.'), True) + self.assertEqual(dm.visitdir(b'dir'), True) + self.assertEqual(dm.visitdir(b'dir/subdir'), True) + self.assertEqual(dm.visitdir(b'dir/subdir/z'), True) + self.assertEqual(dm.visitdir(b'dir/foo'), True) + self.assertEqual(dm.visitdir(b'dir/subdir/x'), True) + self.assertEqual(dm.visitdir(b'folder'), True) + + def testVisitchildrensetM2never(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.nevermatcher(b'', b'') + dm = matchmod.differencematcher(m1, m2) + # dm should be equivalent to a alwaysmatcher. + self.assertEqual(dm.visitchildrenset(b'.'), b'all') + self.assertEqual(dm.visitchildrenset(b'dir'), b'all') + self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'all') + self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all') + self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'all') + self.assertEqual(dm.visitchildrenset(b'folder'), b'all') + + def testVisitdirM2SubdirPrefix(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir']) + dm = matchmod.differencematcher(m1, m2) + self.assertEqual(dm.visitdir(b'.'), True) + self.assertEqual(dm.visitdir(b'dir'), True) + self.assertFalse(dm.visitdir(b'dir/subdir')) + # OPT: We should probably return False for these; we don't because + # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of + # an 'all' pattern, just True. + self.assertEqual(dm.visitdir(b'dir/subdir/z'), True) + self.assertEqual(dm.visitdir(b'dir/subdir/x'), True) + # OPT: We could return 'all' for these. + self.assertEqual(dm.visitdir(b'dir/foo'), True) + self.assertEqual(dm.visitdir(b'folder'), True) + + def testVisitchildrensetM2SubdirPrefix(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir']) + dm = matchmod.differencematcher(m1, m2) + self.assertEqual(dm.visitchildrenset(b'.'), b'this') + self.assertEqual(dm.visitchildrenset(b'dir'), b'this') + self.assertEqual(dm.visitchildrenset(b'dir/subdir'), set()) + self.assertEqual(dm.visitchildrenset(b'dir/foo'), b'all') + self.assertEqual(dm.visitchildrenset(b'folder'), b'all') + # OPT: We should probably return set() for these; we don't because + # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of + # an 'all' pattern, just 'this'. + self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this') + self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this') + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeIncludfe(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir']) + dm = matchmod.differencematcher(m1, m2) + self.assertEqual(dm.visitdir(b'.'), True) + self.assertEqual(dm.visitdir(b'dir'), True) + self.assertEqual(dm.visitdir(b'dir/subdir'), True) + self.assertFalse(dm.visitdir(b'dir/foo')) + self.assertFalse(dm.visitdir(b'folder')) + # OPT: We should probably return False for these; we don't because + # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of + # an 'all' pattern, just True. + self.assertEqual(dm.visitdir(b'dir/subdir/z'), True) + self.assertEqual(dm.visitdir(b'dir/subdir/x'), True) + + def testVisitchildrensetIncludeInclude(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir']) + dm = matchmod.differencematcher(m1, m2) + self.assertEqual(dm.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(dm.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(dm.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(dm.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(dm.visitchildrenset(b'folder'), set()) + # OPT: We should probably return set() for these; we don't because + # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of + # an 'all' pattern, just 'this'. + self.assertEqual(dm.visitchildrenset(b'dir/subdir/z'), b'this') + self.assertEqual(dm.visitchildrenset(b'dir/subdir/x'), b'this') + +class IntersectionMatcherTests(unittest.TestCase): + + def testVisitdirM2always(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + im = matchmod.intersectmatchers(m1, m2) + # im should be equivalent to a alwaysmatcher. + self.assertEqual(im.visitdir(b'.'), b'all') + self.assertEqual(im.visitdir(b'dir'), b'all') + self.assertEqual(im.visitdir(b'dir/subdir'), b'all') + self.assertEqual(im.visitdir(b'dir/subdir/z'), b'all') + self.assertEqual(im.visitdir(b'dir/foo'), b'all') + self.assertEqual(im.visitdir(b'dir/subdir/x'), b'all') + self.assertEqual(im.visitdir(b'folder'), b'all') + + def testVisitchildrensetM2always(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + im = matchmod.intersectmatchers(m1, m2) + # im should be equivalent to a alwaysmatcher. + self.assertEqual(im.visitchildrenset(b'.'), b'all') + self.assertEqual(im.visitchildrenset(b'dir'), b'all') + self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'all') + self.assertEqual(im.visitchildrenset(b'dir/foo'), b'all') + self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'all') + self.assertEqual(im.visitchildrenset(b'folder'), b'all') + + def testVisitdirM2never(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.nevermatcher(b'', b'') + im = matchmod.intersectmatchers(m1, m2) + # im should be equivalent to a nevermatcher. + self.assertFalse(im.visitdir(b'.')) + self.assertFalse(im.visitdir(b'dir')) + self.assertFalse(im.visitdir(b'dir/subdir')) + self.assertFalse(im.visitdir(b'dir/subdir/z')) + self.assertFalse(im.visitdir(b'dir/foo')) + self.assertFalse(im.visitdir(b'dir/subdir/x')) + self.assertFalse(im.visitdir(b'folder')) + + def testVisitchildrensetM2never(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.nevermatcher(b'', b'') + im = matchmod.intersectmatchers(m1, m2) + # im should be equivalent to a nevermqtcher. + self.assertEqual(im.visitchildrenset(b'.'), set()) + self.assertEqual(im.visitchildrenset(b'dir'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set()) + self.assertEqual(im.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set()) + self.assertEqual(im.visitchildrenset(b'folder'), set()) + + def testVisitdirM2SubdirPrefix(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir']) + im = matchmod.intersectmatchers(m1, m2) + self.assertEqual(im.visitdir(b'.'), True) + self.assertEqual(im.visitdir(b'dir'), True) + self.assertEqual(im.visitdir(b'dir/subdir'), b'all') + self.assertFalse(im.visitdir(b'dir/foo')) + self.assertFalse(im.visitdir(b'folder')) + # OPT: We should probably return 'all' for these; we don't because + # patternmatcher.visitdir() (our m2) doesn't return 'all' for subdirs of + # an 'all' pattern, just True. + self.assertEqual(im.visitdir(b'dir/subdir/z'), True) + self.assertEqual(im.visitdir(b'dir/subdir/x'), True) + + def testVisitchildrensetM2SubdirPrefix(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + im = matchmod.intersectmatchers(m1, m2) + self.assertEqual(im.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(im.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(im.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(im.visitchildrenset(b'folder'), set()) + # OPT: We should probably return 'all' for these + self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), b'this') + self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this') + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeIncludfe(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir']) + im = matchmod.intersectmatchers(m1, m2) + self.assertEqual(im.visitdir(b'.'), True) + self.assertEqual(im.visitdir(b'dir'), True) + self.assertFalse(im.visitdir(b'dir/subdir')) + self.assertFalse(im.visitdir(b'dir/foo')) + self.assertFalse(im.visitdir(b'folder')) + self.assertFalse(im.visitdir(b'dir/subdir/z')) + self.assertFalse(im.visitdir(b'dir/subdir/x')) + + def testVisitchildrensetIncludeInclude(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir']) + im = matchmod.intersectmatchers(m1, m2) + self.assertEqual(im.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(im.visitchildrenset(b'dir'), b'this') + self.assertEqual(im.visitchildrenset(b'dir/subdir'), set()) + self.assertEqual(im.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(im.visitchildrenset(b'folder'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set()) + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeInclude2(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'path:folder']) + im = matchmod.intersectmatchers(m1, m2) + # FIXME: is True correct here? + self.assertEqual(im.visitdir(b'.'), True) + self.assertFalse(im.visitdir(b'dir')) + self.assertFalse(im.visitdir(b'dir/subdir')) + self.assertFalse(im.visitdir(b'dir/foo')) + self.assertFalse(im.visitdir(b'folder')) + self.assertFalse(im.visitdir(b'dir/subdir/z')) + self.assertFalse(im.visitdir(b'dir/subdir/x')) + + def testVisitchildrensetIncludeInclude2(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'path:folder']) + im = matchmod.intersectmatchers(m1, m2) + # FIXME: is set() correct here? + self.assertEqual(im.visitchildrenset(b'.'), set()) + self.assertEqual(im.visitchildrenset(b'dir'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir'), set()) + self.assertEqual(im.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(im.visitchildrenset(b'folder'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set()) + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeInclude3(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + im = matchmod.intersectmatchers(m1, m2) + self.assertEqual(im.visitdir(b'.'), True) + self.assertEqual(im.visitdir(b'dir'), True) + self.assertEqual(im.visitdir(b'dir/subdir'), True) + self.assertFalse(im.visitdir(b'dir/foo')) + self.assertFalse(im.visitdir(b'folder')) + self.assertFalse(im.visitdir(b'dir/subdir/z')) + # OPT: this should probably be 'all' not True. + self.assertEqual(im.visitdir(b'dir/subdir/x'), True) + + def testVisitchildrensetIncludeInclude3(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + im = matchmod.intersectmatchers(m1, m2) + self.assertEqual(im.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(im.visitchildrenset(b'dir/subdir'), {b'x'}) + self.assertEqual(im.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(im.visitchildrenset(b'folder'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set()) + # OPT: this should probably be 'all' not 'this'. + self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), b'this') + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeInclude4(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z']) + im = matchmod.intersectmatchers(m1, m2) + # OPT: these next three could probably be False as well. + self.assertEqual(im.visitdir(b'.'), True) + self.assertEqual(im.visitdir(b'dir'), True) + self.assertEqual(im.visitdir(b'dir/subdir'), True) + self.assertFalse(im.visitdir(b'dir/foo')) + self.assertFalse(im.visitdir(b'folder')) + self.assertFalse(im.visitdir(b'dir/subdir/z')) + self.assertFalse(im.visitdir(b'dir/subdir/x')) + + def testVisitchildrensetIncludeInclude4(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z']) + im = matchmod.intersectmatchers(m1, m2) + # OPT: these next two could probably be set() as well. + self.assertEqual(im.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(im.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(im.visitchildrenset(b'dir/subdir'), set()) + self.assertEqual(im.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(im.visitchildrenset(b'folder'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/z'), set()) + self.assertEqual(im.visitchildrenset(b'dir/subdir/x'), set()) + +class UnionMatcherTests(unittest.TestCase): + + def testVisitdirM2always(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + um = matchmod.unionmatcher([m1, m2]) + # um should be equivalent to a alwaysmatcher. + self.assertEqual(um.visitdir(b'.'), b'all') + self.assertEqual(um.visitdir(b'dir'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitdir(b'dir/foo'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all') + self.assertEqual(um.visitdir(b'folder'), b'all') + + def testVisitchildrensetM2always(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + um = matchmod.unionmatcher([m1, m2]) + # um should be equivalent to a alwaysmatcher. + self.assertEqual(um.visitchildrenset(b'.'), b'all') + self.assertEqual(um.visitchildrenset(b'dir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all') + self.assertEqual(um.visitchildrenset(b'folder'), b'all') + + def testVisitdirM1never(self): + m1 = matchmod.nevermatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + um = matchmod.unionmatcher([m1, m2]) + # um should be equivalent to a alwaysmatcher. + self.assertEqual(um.visitdir(b'.'), b'all') + self.assertEqual(um.visitdir(b'dir'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitdir(b'dir/foo'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all') + self.assertEqual(um.visitdir(b'folder'), b'all') + + def testVisitchildrensetM1never(self): + m1 = matchmod.nevermatcher(b'', b'') + m2 = matchmod.alwaysmatcher(b'', b'') + um = matchmod.unionmatcher([m1, m2]) + # um should be equivalent to a alwaysmatcher. + self.assertEqual(um.visitchildrenset(b'.'), b'all') + self.assertEqual(um.visitchildrenset(b'dir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all') + self.assertEqual(um.visitchildrenset(b'folder'), b'all') + + def testVisitdirM2never(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.nevermatcher(b'', b'') + um = matchmod.unionmatcher([m1, m2]) + # um should be equivalent to a alwaysmatcher. + self.assertEqual(um.visitdir(b'.'), b'all') + self.assertEqual(um.visitdir(b'dir'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitdir(b'dir/foo'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all') + self.assertEqual(um.visitdir(b'folder'), b'all') + + def testVisitchildrensetM2never(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.nevermatcher(b'', b'') + um = matchmod.unionmatcher([m1, m2]) + # um should be equivalent to a alwaysmatcher. + self.assertEqual(um.visitchildrenset(b'.'), b'all') + self.assertEqual(um.visitchildrenset(b'dir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all') + self.assertEqual(um.visitchildrenset(b'folder'), b'all') + + def testVisitdirM2SubdirPrefix(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.match(b'', b'', patterns=[b'path:dir/subdir']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitdir(b'.'), b'all') + self.assertEqual(um.visitdir(b'dir'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir'), b'all') + self.assertEqual(um.visitdir(b'dir/foo'), b'all') + self.assertEqual(um.visitdir(b'folder'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all') + + def testVisitchildrensetM2SubdirPrefix(self): + m1 = matchmod.alwaysmatcher(b'', b'') + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitchildrenset(b'.'), b'all') + self.assertEqual(um.visitchildrenset(b'dir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/foo'), b'all') + self.assertEqual(um.visitchildrenset(b'folder'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all') + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeIncludfe(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitdir(b'.'), True) + self.assertEqual(um.visitdir(b'dir'), True) + self.assertEqual(um.visitdir(b'dir/subdir'), b'all') + self.assertFalse(um.visitdir(b'dir/foo')) + self.assertFalse(um.visitdir(b'folder')) + # OPT: These two should probably be 'all' not True. + self.assertEqual(um.visitdir(b'dir/subdir/z'), True) + self.assertEqual(um.visitdir(b'dir/subdir/x'), True) + + def testVisitchildrensetIncludeInclude(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'rootfilesin:dir']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(um.visitchildrenset(b'dir'), b'this') + self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(um.visitchildrenset(b'folder'), set()) + # OPT: These next two could be 'all' instead of 'this'. + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this') + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this') + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeInclude2(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'path:folder']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitdir(b'.'), True) + self.assertEqual(um.visitdir(b'dir'), True) + self.assertEqual(um.visitdir(b'dir/subdir'), b'all') + self.assertFalse(um.visitdir(b'dir/foo')) + self.assertEqual(um.visitdir(b'folder'), b'all') + # OPT: These should probably be 'all' not True. + self.assertEqual(um.visitdir(b'dir/subdir/z'), True) + self.assertEqual(um.visitdir(b'dir/subdir/x'), True) + + def testVisitchildrensetIncludeInclude2(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + m2 = matchmod.match(b'', b'', include=[b'path:folder']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitchildrenset(b'.'), {b'folder', b'dir'}) + self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(um.visitchildrenset(b'folder'), b'all') + # OPT: These next two could be 'all' instead of 'this'. + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this') + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'this') + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeInclude3(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitdir(b'.'), True) + self.assertEqual(um.visitdir(b'dir'), True) + self.assertEqual(um.visitdir(b'dir/subdir'), b'all') + self.assertFalse(um.visitdir(b'dir/foo')) + self.assertFalse(um.visitdir(b'folder')) + self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all') + # OPT: this should probably be 'all' not True. + self.assertEqual(um.visitdir(b'dir/subdir/z'), True) + + def testVisitchildrensetIncludeInclude3(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(um.visitchildrenset(b'dir/subdir'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(um.visitchildrenset(b'folder'), set()) + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all') + # OPT: this should probably be 'all' not 'this'. + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'this') + + # We're using includematcher instead of patterns because it behaves slightly + # better (giving narrower results) than patternmatcher. + def testVisitdirIncludeInclude4(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z']) + um = matchmod.unionmatcher([m1, m2]) + # OPT: these next three could probably be False as well. + self.assertEqual(um.visitdir(b'.'), True) + self.assertEqual(um.visitdir(b'dir'), True) + self.assertEqual(um.visitdir(b'dir/subdir'), True) + self.assertFalse(um.visitdir(b'dir/foo')) + self.assertFalse(um.visitdir(b'folder')) + self.assertEqual(um.visitdir(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitdir(b'dir/subdir/x'), b'all') + + def testVisitchildrensetIncludeInclude4(self): + m1 = matchmod.match(b'', b'', include=[b'path:dir/subdir/x']) + m2 = matchmod.match(b'', b'', include=[b'path:dir/subdir/z']) + um = matchmod.unionmatcher([m1, m2]) + self.assertEqual(um.visitchildrenset(b'.'), {b'dir'}) + self.assertEqual(um.visitchildrenset(b'dir'), {b'subdir'}) + self.assertEqual(um.visitchildrenset(b'dir/subdir'), {b'x', b'z'}) + self.assertEqual(um.visitchildrenset(b'dir/foo'), set()) + self.assertEqual(um.visitchildrenset(b'folder'), set()) + self.assertEqual(um.visitchildrenset(b'dir/subdir/z'), b'all') + self.assertEqual(um.visitchildrenset(b'dir/subdir/x'), b'all') + +class SubdirMatcherTests(unittest.TestCase): + + def testVisitdir(self): + m = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + sm = matchmod.subdirmatcher(b'dir', m) + + self.assertEqual(sm.visitdir(b'.'), True) + self.assertEqual(sm.visitdir(b'subdir'), b'all') + # OPT: These next two should probably be 'all' not True. + self.assertEqual(sm.visitdir(b'subdir/x'), True) + self.assertEqual(sm.visitdir(b'subdir/z'), True) + self.assertFalse(sm.visitdir(b'foo')) + + def testVisitchildrenset(self): + m = matchmod.match(b'', b'', include=[b'path:dir/subdir']) + sm = matchmod.subdirmatcher(b'dir', m) + + self.assertEqual(sm.visitchildrenset(b'.'), {b'subdir'}) + self.assertEqual(sm.visitchildrenset(b'subdir'), b'all') + # OPT: These next two should probably be 'all' not 'this'. + self.assertEqual(sm.visitchildrenset(b'subdir/x'), b'this') + self.assertEqual(sm.visitchildrenset(b'subdir/z'), b'this') + self.assertEqual(sm.visitchildrenset(b'foo'), set()) + +class PrefixdirMatcherTests(unittest.TestCase): + + def testVisitdir(self): + m = matchmod.match(util.localpath(b'root/d'), b'e/f', + [b'../a.txt', b'b.txt']) + pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m) + + # `m` elides 'd' because it's part of the root, and the rest of the + # patterns are relative. + self.assertEqual(bool(m(b'a.txt')), False) + self.assertEqual(bool(m(b'b.txt')), False) + self.assertEqual(bool(m(b'e/a.txt')), True) + self.assertEqual(bool(m(b'e/b.txt')), False) + self.assertEqual(bool(m(b'e/f/b.txt')), True) + + # The prefix matcher re-adds 'd' to the paths, so they need to be + # specified when using the prefixdirmatcher. + self.assertEqual(bool(pm(b'a.txt')), False) + self.assertEqual(bool(pm(b'b.txt')), False) + self.assertEqual(bool(pm(b'd/e/a.txt')), True) + self.assertEqual(bool(pm(b'd/e/b.txt')), False) + self.assertEqual(bool(pm(b'd/e/f/b.txt')), True) + + self.assertEqual(m.visitdir(b'.'), True) + self.assertEqual(m.visitdir(b'e'), True) + self.assertEqual(m.visitdir(b'e/f'), True) + self.assertEqual(m.visitdir(b'e/f/g'), False) + + self.assertEqual(pm.visitdir(b'.'), True) + self.assertEqual(pm.visitdir(b'd'), True) + self.assertEqual(pm.visitdir(b'd/e'), True) + self.assertEqual(pm.visitdir(b'd/e/f'), True) + self.assertEqual(pm.visitdir(b'd/e/f/g'), False) + + def testVisitchildrenset(self): + m = matchmod.match(util.localpath(b'root/d'), b'e/f', + [b'../a.txt', b'b.txt']) + pm = matchmod.prefixdirmatcher(b'root', b'd/e/f', b'd', m) + + # OPT: visitchildrenset could possibly return {'e'} and {'f'} for these + # next two, respectively; patternmatcher does not have this + # optimization. + self.assertEqual(m.visitchildrenset(b'.'), b'this') + self.assertEqual(m.visitchildrenset(b'e'), b'this') + self.assertEqual(m.visitchildrenset(b'e/f'), b'this') + self.assertEqual(m.visitchildrenset(b'e/f/g'), set()) + + # OPT: visitchildrenset could possibly return {'d'}, {'e'}, and {'f'} + # for these next three, respectively; patternmatcher does not have this + # optimization. + self.assertEqual(pm.visitchildrenset(b'.'), b'this') + self.assertEqual(pm.visitchildrenset(b'd'), b'this') + self.assertEqual(pm.visitchildrenset(b'd/e'), b'this') + self.assertEqual(pm.visitchildrenset(b'd/e/f'), b'this') + self.assertEqual(pm.visitchildrenset(b'd/e/f/g'), set()) if __name__ == '__main__': silenttestrunner.main(__name__)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-merge-no-file-change.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,379 @@ + $ cat <<'EOF' >> "$HGRCPATH" + > [extensions] + > convert = + > [templates] + > l = '{rev}:{node|short} p={p1rev},{p2rev} m={manifest} f={files|json}' + > EOF + + $ check_convert_identity () { + > hg convert -q "$1" "$1.converted" + > hg outgoing -q -R "$1.converted" "$1" + > if [ "$?" != 1 ]; then + > echo '*** BUG: hash changes on convert ***' + > hg log -R "$1.converted" -GTl + > fi + > } + +Files added at both parents: + + $ hg init added-both + $ cd added-both + $ touch a b c + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg up -q 0 + $ hg ci -qAm2 c + + $ hg merge + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci --debug -m merge + committing files: + b + not reusing manifest (no file change in changelog, but manifest differs) + committing manifest + committing changelog + updating the branch cache + committed changeset 3:7aa8a293f5d97377037afc21e871e036e718d659 + $ hg log -GTl + @ 3:7aa8a293f5d9 p=2,1 m=3:8667461869a1 f=[] + |\ + | o 2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"] + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] + + + $ cd .. + $ check_convert_identity added-both + +Files added at both parents, but the other removed at the merge: +(In this case, ctx.files() after the commit contains the removed file "b", but +its manifest does not differ from p1.) + + $ hg init added-both-removed-at-merge + $ cd added-both-removed-at-merge + $ touch a b c + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg up -q 0 + $ hg ci -qAm2 c + + $ hg merge + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg rm -f b + $ hg ci --debug -m merge + committing files: + committing manifest + committing changelog + updating the branch cache + committed changeset 3:915745f3ca3d9d699925269474c2d0a9526e8dfa + $ hg log -GTl + @ 3:915745f3ca3d p=2,1 m=3:8e9cf3456921 f=["b"] + |\ + | o 2:e0ea47086fce p=0,-1 m=2:b2e5b07f9374 f=["c"] + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] + + + $ cd .. + $ check_convert_identity added-both + +An identical file added at both parents: + + $ hg init added-identical + $ cd added-identical + $ touch a b + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg up -q 0 + $ touch b + $ hg ci -qAm2 b + + $ hg merge + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci --debug -m merge + reusing manifest from p1 (no file change) + committing changelog + updating the branch cache + committed changeset 3:de26182cd210f0c3fb175ca7616704ab963d3024 + $ hg log -GTl + @ 3:de26182cd210 p=2,1 m=1:686dbf0aeca4 f=[] + |\ + | o 2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"] + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] + + + $ cd .. + $ check_convert_identity added-identical + +#if execbit + +An identical file added at both parents, but the flag differs. Take local: + + $ hg init flag-change-take-p1 + $ cd flag-change-take-p1 + $ touch a b + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg up -q 0 + $ touch b + $ chmod +x b + $ hg ci -qAm2 b + + $ hg merge + warning: cannot merge flags for b without common ancestor - keeping local flags + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ chmod +x b + $ hg ci --debug -m merge + committing files: + b + reusing manifest form p1 (listed files actually unchanged) + committing changelog + updating the branch cache + committed changeset 3:c8d50407916ef8a5a97cb6e36ca9bc844a6ee13e + $ hg log -GTl + @ 3:c8d50407916e p=2,1 m=2:36b69ba4b24b f=[] + |\ + | o 2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"] + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] + + $ hg files -vr3 + 0 a + 0 x b + + $ cd .. + $ check_convert_identity flag-change-take-p1 + +An identical file added at both parents, but the flag differs. Take other: + + $ hg init flag-change-take-p2 + $ cd flag-change-take-p2 + $ touch a b + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg up -q 0 + $ touch b + $ chmod +x b + $ hg ci -qAm2 b + + $ hg merge + warning: cannot merge flags for b without common ancestor - keeping local flags + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ chmod -x b + $ hg ci --debug -m merge + committing files: + b + committing manifest + committing changelog + updating the branch cache + committed changeset 3:06a62a687d87c7d8944743dee1ee9d8c66b3f6e3 + $ hg log -GTl + @ 3:06a62a687d87 p=2,1 m=3:2a315ba1aa45 f=["b"] + |\ + | o 2:99451f16b3f5 p=0,-1 m=2:36b69ba4b24b f=["b"] + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] + + $ hg files -vr3 + 0 a + 0 b + + $ cd .. + $ check_convert_identity flag-change-take-p2 + +#endif + +An identical file added at both parents, one more file added at p2: + + $ hg init added-some-p2 + $ cd added-some-p2 + $ touch a b c + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg ci -qAm2 c + $ hg up -q 0 + $ touch b + $ hg ci -qAm3 b + + $ hg merge + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci --debug -m merge + committing files: + c + not reusing manifest (no file change in changelog, but manifest differs) + committing manifest + committing changelog + updating the branch cache + committed changeset 4:f7fbc4e4d9a8fde03ba475adad675578c8bf472d + $ hg log -GTl + @ 4:f7fbc4e4d9a8 p=3,2 m=3:92acd5bfd716 f=[] + |\ + | o 3:e9d9f3cc981f p=0,-1 m=1:686dbf0aeca4 f=["b"] + | | + o | 2:93c5529a4ec7 p=1,-1 m=2:ae25a31b30b3 f=["c"] + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] + + + $ cd .. + $ check_convert_identity added-some-p2 + +An identical file added at both parents, one more file added at p1: +(In this case, p1 manifest is reused at the merge commit, which means the +manifest DAG does not have the same shape as the changelog.) + + $ hg init added-some-p1 + $ cd added-some-p1 + $ touch a b + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg up -q 0 + $ touch b c + $ hg ci -qAm2 b + $ hg ci -qAm3 c + + $ hg merge + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci --debug -m merge + reusing manifest from p1 (no file change) + committing changelog + updating the branch cache + committed changeset 4:a9f0f589a913f5a149dc10dfbd5af726977c36c4 + $ hg log -GTl + @ 4:a9f0f589a913 p=3,1 m=2:ae25a31b30b3 f=[] + |\ + | o 3:b8dc385241b5 p=2,-1 m=2:ae25a31b30b3 f=["c"] + | | + | o 2:f00991f11eca p=0,-1 m=1:686dbf0aeca4 f=["b"] + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] + + + $ cd .. + $ check_convert_identity added-some-p1 + +A file added at p2, a named branch created at p1: + + $ hg init named-branch-p1 + $ cd named-branch-p1 + $ touch a b + $ hg ci -qAm0 a + $ hg ci -qAm1 b + $ hg up -q 0 + $ hg branch -q foo + $ hg ci -m2 + + $ hg merge default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci --debug -m merge + committing files: + b + not reusing manifest (no file change in changelog, but manifest differs) + committing manifest + committing changelog + updating the branch cache + committed changeset 3:fb97d83b02fd072295cfc2171f21b7d38509bfd7 + $ hg log -GT'{l} branch={branch}' + @ 3:fb97d83b02fd p=2,1 m=2:9091c64f4ea1 f=[] branch=foo + |\ + | o 2:a3a9fa6587e5 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo + | | + o | 1:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default + + + $ cd .. + $ check_convert_identity named-branch-p1 + +A file added at p1, a named branch created at p2: +(In this case, p1 manifest is reused at the merge commit, which means the +manifest DAG does not have the same shape as the changelog.) + + $ hg init named-branch-p2 + $ cd named-branch-p2 + $ touch a b + $ hg ci -qAm0 a + $ hg branch -q foo + $ hg ci -m1 + $ hg up -q 0 + $ hg ci -qAm1 b + + $ hg merge foo + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci --debug -m merge + reusing manifest from p1 (no file change) + committing changelog + updating the branch cache + committed changeset 3:036823e24692218324d4af43b07ff89f8a000096 + $ hg log -GT'{l} branch={branch}' + @ 3:036823e24692 p=2,1 m=1:686dbf0aeca4 f=[] branch=default + |\ + | o 2:64d01526d4c2 p=0,-1 m=1:686dbf0aeca4 f=["b"] branch=default + | | + o | 1:da38c8e00727 p=0,-1 m=0:8515d4bfda76 f=[] branch=foo + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default + + + $ cd .. + $ check_convert_identity named-branch-p2 + +A file changed once at both parents, but amended to have identical content: + + $ hg init amend-p1 + $ cd amend-p1 + $ touch a + $ hg ci -qAm0 a + $ echo foo > a + $ hg ci -m1 + $ hg up -q 0 + $ echo bar > a + $ hg ci -qm2 + $ echo foo > a + $ hg ci -qm3 --amend + + $ hg merge + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci --debug -m merge + reusing manifest from p1 (no file change) + committing changelog + updating the branch cache + committed changeset 3:314e5bc5adf5c58ea571efabe33eedba20a201aa + $ hg log -GT'{l} branch={branch}' + @ 3:314e5bc5adf5 p=2,1 m=1:d33ea248bd73 f=[] branch=default + |\ + | o 2:de9c64f226a3 p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default + | | + o | 1:6a74aec01b3c p=0,-1 m=1:d33ea248bd73 f=["a"] branch=default + |/ + o 0:487a0a245cea p=-1,-1 m=0:8515d4bfda76 f=["a"] branch=default + + + $ cd .. + $ check_convert_identity amend-p1
--- a/tests/test-merge-tools.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-merge-tools.t Mon Aug 20 09:48:08 2018 -0700 @@ -1701,6 +1701,35 @@ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) $ hg update -C 1 > /dev/null + +#else + +Match the non-portable filename commits above for test stability + + $ hg import --bypass -q - << EOF + > # HG changeset patch + > revision 5 + > + > diff --git a/"; exit 1; echo " b/"; exit 1; echo " + > new file mode 100644 + > --- /dev/null + > +++ b/"; exit 1; echo " + > @@ -0,0 +1,1 @@ + > +revision 5 + > EOF + + $ hg import --bypass -q - << EOF + > # HG changeset patch + > revision 6 + > + > diff --git a/"; exit 1; echo " b/"; exit 1; echo " + > new file mode 100644 + > --- /dev/null + > +++ b/"; exit 1; echo " + > @@ -0,0 +1,1 @@ + > +revision 6 + > EOF + #endif Merge post-processing @@ -1737,14 +1766,64 @@ # hg resolve --list U f -#if symlink +missingbinary is a merge-tool that doesn't exist: + + $ echo "missingbinary.executable=doesnotexist" >> .hg/hgrc + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + missingbinary.executable=doesnotexist + # hg update -C 1 + $ hg merge -y -r 2 --config ui.merge=missingbinary + couldn't find merge tool missingbinary (for pattern f) + merging f + couldn't find merge tool missingbinary (for pattern f) + revision 1 + space + revision 0 + space + revision 2 + space + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + + $ hg update -q -C 1 + $ rm f internal merge cannot handle symlinks and shouldn't try: - $ hg update -q -C 1 - $ rm f +#if symlink + $ ln -s symlink f $ hg commit -qm 'f is symlink' + +#else + + $ hg import --bypass -q - << EOF + > # HG changeset patch + > f is symlink + > + > diff --git a/f b/f + > old mode 100644 + > new mode 120000 + > --- a/f + > +++ b/f + > @@ -1,2 +1,1 @@ + > -revision 1 + > -space + > +symlink + > \ No newline at end of file + > EOF + +Resolve 'other [destination] changed f which local [working copy] deleted' prompt + $ hg up -q -C --config ui.interactive=True << EOF + > c + > EOF + +#endif + $ hg merge -r 2 --tool internal:merge merging f warning: internal :merge cannot merge symlinks for f @@ -1753,8 +1832,6 @@ use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon [1] -#endif - Verify naming of temporary files and that extension is preserved: $ hg update -q -C 1 @@ -1782,6 +1859,86 @@ 0 files updated, 1 files merged, 0 files removed, 0 files unresolved (branch merge, don't forget to commit) +Binary files capability checking + + $ hg update -q -C 0 + $ python <<EOF + > with open('b', 'wb') as fp: + > fp.write(b'\x00\x01\x02\x03') + > EOF + $ hg add b + $ hg commit -qm "add binary file (#1)" + + $ hg update -q -C 0 + $ python <<EOF + > with open('b', 'wb') as fp: + > fp.write(b'\x03\x02\x01\x00') + > EOF + $ hg add b + $ hg commit -qm "add binary file (#2)" + +By default, binary files capability of internal merge tools is not +checked strictly. + +(for merge-patterns, chosen unintentionally) + + $ hg merge 9 \ + > --config merge-patterns.b=:merge-other \ + > --config merge-patterns.re:[a-z]=:other + warning: check merge-patterns configurations, if ':merge-other' for binary file 'b' is unintentional + (see 'hg help merge-tools' for binary files capability) + merging b + warning: b looks like a binary file. + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + [1] + $ hg merge --abort -q + +(for ui.merge, ignored unintentionally) + + $ hg merge 9 \ + > --config ui.merge=:other + tool :other (for pattern b) can't handle binary + tool true can't handle binary + tool false can't handle binary + no tool found to merge b + keep (l)ocal [working copy], take (o)ther [merge rev], or leave (u)nresolved for b? u + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + [1] + $ hg merge --abort -q + +With merge.strict-capability-check=true, binary files capability of +internal merge tools is checked strictly. + + $ f --hexdump b + b: + 0000: 03 02 01 00 |....| + +(for merge-patterns) + + $ hg merge 9 --config merge.strict-capability-check=true \ + > --config merge-patterns.b=:merge-other \ + > --config merge-patterns.re:[a-z]=:other + tool :merge-other (for pattern b) can't handle binary + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ f --hexdump b + b: + 0000: 00 01 02 03 |....| + $ hg merge --abort -q + +(for ui.merge) + + $ hg merge 9 --config merge.strict-capability-check=true \ + > --config ui.merge=:other + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ f --hexdump b + b: + 0000: 00 01 02 03 |....| + $ hg merge --abort -q + Check that debugpicktool examines which merge tool is chosen for specified file as expected @@ -1790,6 +1947,7 @@ false.whatever= true.priority=1 true.executable=cat + missingbinary.executable=doesnotexist # hg update -C 1 (default behavior: checking files in the working parent context) @@ -1812,9 +1970,9 @@ (-r REV causes checking files in specified revision) - $ hg manifest -r tip + $ hg manifest -r 8 f.txt - $ hg debugpickmergetool -r tip + $ hg debugpickmergetool -r 8 f.txt = true #if symlink @@ -1824,6 +1982,36 @@ $ hg debugpickmergetool -r 6d00b3726f6e f = :prompt +(by default, it is assumed that no internal merge tools has symlinks +capability) + + $ hg debugpickmergetool \ + > -r 6d00b3726f6e \ + > --config merge-patterns.f=:merge-other \ + > --config merge-patterns.re:[f]=:merge-local \ + > --config merge-patterns.re:[a-z]=:other + f = :prompt + + $ hg debugpickmergetool \ + > -r 6d00b3726f6e \ + > --config ui.merge=:other + f = :prompt + +(with strict-capability-check=true, actual symlink capabilities are +checked striclty) + + $ hg debugpickmergetool --config merge.strict-capability-check=true \ + > -r 6d00b3726f6e \ + > --config merge-patterns.f=:merge-other \ + > --config merge-patterns.re:[f]=:merge-local \ + > --config merge-patterns.re:[a-z]=:other + f = :other + + $ hg debugpickmergetool --config merge.strict-capability-check=true \ + > -r 6d00b3726f6e \ + > --config ui.merge=:other + f = :other + #endif (--verbose shows some configurations)
--- a/tests/test-narrow-clone-no-ellipsis.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-narrow-clone-no-ellipsis.t Mon Aug 20 09:48:08 2018 -0700 @@ -30,10 +30,8 @@ store testonly-simplestore (reposimplestore !) - $ cat .hg/narrowspec - [includes] - path:dir/src/f10 - [excludes] + $ hg tracked + I path:dir/src/f10 $ hg update 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ find * | sort @@ -55,11 +53,9 @@ added 40 changesets with 19 changes to 19 files new changesets *:* (glob) $ cd narrowdir - $ cat .hg/narrowspec - [includes] - path:dir/tests - [excludes] - path:dir/tests/t19 + $ hg tracked + I path:dir/tests + X path:dir/tests/t19 $ hg update 19 files updated, 0 files merged, 0 files removed, 0 files unresolved $ find * | sort @@ -97,11 +93,9 @@ added 40 changesets with 20 changes to 20 files new changesets *:* (glob) $ cd narrowroot - $ cat .hg/narrowspec - [includes] - path:. - [excludes] - path:dir/tests + $ hg tracked + I path:. + X path:dir/tests $ hg update 20 files updated, 0 files merged, 0 files removed, 0 files unresolved $ find * | sort
--- a/tests/test-narrow-clone.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-narrow-clone.t Mon Aug 20 09:48:08 2018 -0700 @@ -34,10 +34,8 @@ store testonly-simplestore (reposimplestore !) - $ cat .hg/narrowspec - [includes] - path:dir/src/f10 - [excludes] + $ hg tracked + I path:dir/src/f10 $ hg tracked I path:dir/src/f10 $ hg update @@ -69,11 +67,9 @@ added 21 changesets with 19 changes to 19 files new changesets *:* (glob) $ cd narrowdir - $ cat .hg/narrowspec - [includes] - path:dir/tests - [excludes] - path:dir/tests/t19 + $ hg tracked + I path:dir/tests + X path:dir/tests/t19 $ hg tracked I path:dir/tests X path:dir/tests/t19 @@ -114,11 +110,9 @@ added 21 changesets with 20 changes to 20 files new changesets *:* (glob) $ cd narrowroot - $ cat .hg/narrowspec - [includes] - path:. - [excludes] - path:dir/tests + $ hg tracked + I path:. + X path:dir/tests $ hg tracked I path:. X path:dir/tests
--- a/tests/test-narrow-debugcommands.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-narrow-debugcommands.t Mon Aug 20 09:48:08 2018 -0700 @@ -1,10 +1,10 @@ $ . "$TESTDIR/narrow-library.sh" $ hg init repo $ cd repo - $ cat << EOF > .hg/narrowspec - > [includes] + $ cat << EOF > .hg/store/narrowspec + > [include] > path:foo - > [excludes] + > [exclude] > EOF $ echo treemanifest >> .hg/requires $ echo narrowhg-experimental >> .hg/requires
--- a/tests/test-narrow-expanddirstate.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-narrow-expanddirstate.t Mon Aug 20 09:48:08 2018 -0700 @@ -27,16 +27,16 @@ $ mkdir outside $ echo other_contents > outside/f2 - $ grep outside .hg/narrowspec + $ hg tracked | grep outside [1] - $ grep outside .hg/dirstate + $ hg files | grep outside [1] $ hg status `hg status` did not add outside. - $ grep outside .hg/narrowspec + $ hg tracked | grep outside [1] - $ grep outside .hg/dirstate + $ hg files | grep outside [1] Unfortunately this is not really a candidate for adding to narrowhg proper, @@ -115,12 +115,12 @@ `hg status` will now add outside, but not patchdir. $ DIRSTATEINCLUDES=path:outside hg status M outside/f2 - $ grep outside .hg/narrowspec - path:outside - $ grep outside .hg/dirstate > /dev/null - $ grep patchdir .hg/narrowspec + $ hg tracked | grep outside + I path:outside + $ hg files | grep outside > /dev/null + $ hg tracked | grep patchdir [1] - $ grep patchdir .hg/dirstate + $ hg files | grep patchdir [1] Get rid of the modification to outside/f2. @@ -142,9 +142,9 @@ 1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej abort: patch failed to apply [255] - $ grep patchdir .hg/narrowspec + $ hg tracked | grep patchdir [1] - $ grep patchdir .hg/dirstate > /dev/null + $ hg files | grep patchdir > /dev/null [1] Let's make it apply cleanly and see that it *did* expand properly @@ -159,6 +159,6 @@ applying $TESTTMP/foo.patch $ cat patchdir/f3 patched_this - $ grep patchdir .hg/narrowspec - path:patchdir - $ grep patchdir .hg/dirstate > /dev/null + $ hg tracked | grep patchdir + I path:patchdir + $ hg files | grep patchdir > /dev/null
--- a/tests/test-narrow-patterns.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-narrow-patterns.t Mon Aug 20 09:48:08 2018 -0700 @@ -88,15 +88,13 @@ 4 files updated, 0 files merged, 0 files removed, 0 files unresolved $ cd narrow - $ cat .hg/narrowspec - [includes] - path:dir1 - path:dir2 - [excludes] - path:dir1/dirA - path:dir1/dirB - path:dir2/dirA - path:dir2/dirB + $ hg tracked + I path:dir1 + I path:dir2 + X path:dir1/dirA + X path:dir1/dirB + X path:dir2/dirA + X path:dir2/dirB $ hg manifest -r tip dir1/bar dir1/dirA/bar @@ -144,14 +142,12 @@ adding file changes added 9 changesets with 6 changes to 6 files new changesets *:* (glob) - $ cat .hg/narrowspec - [includes] - path:dir1 - path:dir2 - [excludes] - path:dir1/dirB - path:dir2/dirA - path:dir2/dirB + $ hg tracked + I path:dir1 + I path:dir2 + X path:dir1/dirB + X path:dir2/dirA + X path:dir2/dirB $ find * | sort dir1 dir1/bar @@ -206,14 +202,12 @@ adding file changes added 11 changesets with 7 changes to 7 files new changesets *:* (glob) - $ cat .hg/narrowspec - [includes] - path:dir1 - path:dir2 - [excludes] - path:dir1/dirA/bar - path:dir1/dirB - path:dir2/dirA + $ hg tracked + I path:dir1 + I path:dir2 + X path:dir1/dirA/bar + X path:dir1/dirB + X path:dir2/dirA $ find * | sort dir1 dir1/bar @@ -266,14 +260,12 @@ adding file changes added 13 changesets with 8 changes to 8 files new changesets *:* (glob) - $ cat .hg/narrowspec - [includes] - path:dir1 - path:dir2 - [excludes] - path:dir1/dirA - path:dir1/dirA/bar - path:dir1/dirB + $ hg tracked + I path:dir1 + I path:dir2 + X path:dir1/dirA + X path:dir1/dirA/bar + X path:dir1/dirB $ find * | sort dir1 dir1/bar @@ -327,13 +319,11 @@ adding file changes added 13 changesets with 9 changes to 9 files new changesets *:* (glob) - $ cat .hg/narrowspec - [includes] - path:dir1 - path:dir2 - [excludes] - path:dir1/dirA/bar - path:dir1/dirB + $ hg tracked + I path:dir1 + I path:dir2 + X path:dir1/dirA/bar + X path:dir1/dirB $ find * | sort dir1 dir1/bar
--- a/tests/test-narrow-pull.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-narrow-pull.t Mon Aug 20 09:48:08 2018 -0700 @@ -166,7 +166,6 @@ We should also be able to unshare without breaking everything: $ hg unshare - devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob) $ hg verify checking changesets checking manifests
--- a/tests/test-parseindex2.py Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-parseindex2.py Mon Aug 20 09:48:08 2018 -0700 @@ -8,12 +8,14 @@ import struct import subprocess import sys +import unittest from mercurial.node import ( nullid, nullrev, ) from mercurial import ( + node as nodemod, policy, pycompat, ) @@ -61,9 +63,6 @@ e[0] = offset_type(0, type) index[0] = tuple(e) - # add the magic null revision at -1 - index.append((0, 0, 0, -1, -1, -1, -1, nullid)) - return index, cache data_inlined = ( @@ -132,88 +131,92 @@ stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return p.communicate() # returns stdout, stderr -def printhexfail(testnumber, hexversion, stdout, expected): +def hexfailmsg(testnumber, hexversion, stdout, expected): try: hexstring = hex(hexversion) except TypeError: hexstring = None - print("FAILED: version test #%s with Python %s and patched " - "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" % - (testnumber, sys.version_info, hexversion, hexstring, expected, - stdout)) - -def testversionokay(testnumber, hexversion): - stdout, stderr = importparsers(hexversion) - if stdout: - printhexfail(testnumber, hexversion, stdout, expected="no stdout") - -def testversionfail(testnumber, hexversion): - stdout, stderr = importparsers(hexversion) - # We include versionerrortext to distinguish from other ImportErrors. - errtext = b"ImportError: %s" % pycompat.sysbytes(parsers.versionerrortext) - if errtext not in stdout: - printhexfail(testnumber, hexversion, stdout, - expected="stdout to contain %r" % errtext) + return ("FAILED: version test #%s with Python %s and patched " + "sys.hexversion %r (%r):\n Expected %s but got:\n-->'%s'\n" % + (testnumber, sys.version_info, hexversion, hexstring, expected, + stdout)) def makehex(major, minor, micro): return int("%x%02x%02x00" % (major, minor, micro), 16) -def runversiontests(): - """Check the version-detection logic when importing parsers.""" - info = sys.version_info - major, minor, micro = info[0], info[1], info[2] - # Test same major-minor versions. - testversionokay(1, makehex(major, minor, micro)) - testversionokay(2, makehex(major, minor, micro + 1)) - # Test different major-minor versions. - testversionfail(3, makehex(major + 1, minor, micro)) - testversionfail(4, makehex(major, minor + 1, micro)) - testversionfail(5, "'foo'") +class parseindex2tests(unittest.TestCase): + + def assertversionokay(self, testnumber, hexversion): + stdout, stderr = importparsers(hexversion) + self.assertFalse( + stdout, hexfailmsg(testnumber, hexversion, stdout, 'no stdout')) + + def assertversionfail(self, testnumber, hexversion): + stdout, stderr = importparsers(hexversion) + # We include versionerrortext to distinguish from other ImportErrors. + errtext = b"ImportError: %s" % pycompat.sysbytes( + parsers.versionerrortext) + self.assertIn(errtext, stdout, + hexfailmsg(testnumber, hexversion, stdout, + expected="stdout to contain %r" % errtext)) -def runtest() : - # Only test the version-detection logic if it is present. - try: - parsers.versionerrortext - except AttributeError: - pass - else: - runversiontests() + def testversiondetection(self): + """Check the version-detection logic when importing parsers.""" + # Only test the version-detection logic if it is present. + try: + parsers.versionerrortext + except AttributeError: + return + info = sys.version_info + major, minor, micro = info[0], info[1], info[2] + # Test same major-minor versions. + self.assertversionokay(1, makehex(major, minor, micro)) + self.assertversionokay(2, makehex(major, minor, micro + 1)) + # Test different major-minor versions. + self.assertversionfail(3, makehex(major + 1, minor, micro)) + self.assertversionfail(4, makehex(major, minor + 1, micro)) + self.assertversionfail(5, "'foo'") - # Check that parse_index2() raises TypeError on bad arguments. - try: - parse_index2(0, True) - except TypeError: - pass - else: - print("Expected to get TypeError.") + def testbadargs(self): + # Check that parse_index2() raises TypeError on bad arguments. + with self.assertRaises(TypeError): + parse_index2(0, True) - # Check parsers.parse_index2() on an index file against the original - # Python implementation of parseindex, both with and without inlined data. - - py_res_1 = py_parseindex(data_inlined, True) - c_res_1 = parse_index2(data_inlined, True) + def testparseindexfile(self): + # Check parsers.parse_index2() on an index file against the + # original Python implementation of parseindex, both with and + # without inlined data. - py_res_2 = py_parseindex(data_non_inlined, False) - c_res_2 = parse_index2(data_non_inlined, False) + want = py_parseindex(data_inlined, True) + got = parse_index2(data_inlined, True) + self.assertEqual(want, got) # inline data - if py_res_1 != c_res_1: - print("Parse index result (with inlined data) differs!") - - if py_res_2 != c_res_2: - print("Parse index result (no inlined data) differs!") + want = py_parseindex(data_non_inlined, False) + got = parse_index2(data_non_inlined, False) + self.assertEqual(want, got) # no inline data - ix = parsers.parse_index2(data_inlined, True)[0] - for i, r in enumerate(ix): - if r[7] == nullid: - i = -1 - try: - if ix[r[7]] != i: - print('Reverse lookup inconsistent for %r' - % r[7].encode('hex')) - except TypeError: - # pure version doesn't support this - break + ix = parsers.parse_index2(data_inlined, True)[0] + for i, r in enumerate(ix): + if r[7] == nullid: + i = -1 + try: + self.assertEqual( + ix[r[7]], i, + 'Reverse lookup inconsistent for %r' % nodemod.hex(r[7])) + except TypeError: + # pure version doesn't support this + break - print("done") + def testminusone(self): + want = (0, 0, 0, -1, -1, -1, -1, nullid) + index, junk = parsers.parse_index2(data_inlined, True) + got = index[-1] + self.assertEqual(want, got) # inline data -runtest() + index, junk = parsers.parse_index2(data_non_inlined, False) + got = index[-1] + self.assertEqual(want, got) # no inline data + +if __name__ == '__main__': + import silenttestrunner + silenttestrunner.main(__name__)
--- a/tests/test-parseindex2.py.out Sun Aug 19 13:27:02 2018 +0900 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1 +0,0 @@ -done
--- a/tests/test-patchbomb-bookmark.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-patchbomb-bookmark.t Mon Aug 20 09:48:08 2018 -0700 @@ -35,7 +35,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2] bookmark - Message-Id: <patchbomb.347155260@*> (glob) + Message-Id: <patchbomb.347155260@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1981 00:01:00 +0000 From: test @@ -50,10 +50,10 @@ X-Mercurial-Node: accde9b8b6dce861c185d0825c1affc09a79cb26 X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <accde9b8b6dce861c185.347155261@*> (glob) - X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob) - In-Reply-To: <patchbomb.347155260@*> (glob) - References: <patchbomb.347155260@*> (glob) + Message-Id: <accde9b8b6dce861c185.347155261@test-hostname> + X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname> + In-Reply-To: <patchbomb.347155260@test-hostname> + References: <patchbomb.347155260@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1981 00:01:01 +0000 From: test @@ -81,10 +81,10 @@ X-Mercurial-Node: 417defd1559c396ba06a44dce8dc1c2d2d653f3f X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <417defd1559c396ba06a.347155262@*> (glob) - X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@*> (glob) - In-Reply-To: <patchbomb.347155260@*> (glob) - References: <patchbomb.347155260@*> (glob) + Message-Id: <417defd1559c396ba06a.347155262@test-hostname> + X-Mercurial-Series-Id: <accde9b8b6dce861c185.347155261@test-hostname> + In-Reply-To: <patchbomb.347155260@test-hostname> + References: <patchbomb.347155260@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1981 00:01:02 +0000 From: test @@ -145,8 +145,8 @@ X-Mercurial-Node: 8dab2639fd35f1e337ad866c372a5c44f1064e3c X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <8dab2639fd35f1e337ad.378691260@*> (glob) - X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@*> (glob) + Message-Id: <8dab2639fd35f1e337ad.378691260@test-hostname> + X-Mercurial-Series-Id: <8dab2639fd35f1e337ad.378691260@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Fri, 01 Jan 1982 00:01:00 +0000 From: test
--- a/tests/test-patchbomb.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-patchbomb.t Mon Aug 20 09:48:08 2018 -0700 @@ -2,7 +2,6 @@ wildcards in test expectations due to how many things like hostnames tend to make it into outputs. As a result, you may need to perform the following regular expression substitutions: -@$HOSTNAME> -> @*> (glob) Mercurial-patchbomb/.* -> Mercurial-patchbomb/* (glob) /mixed; boundary="===+[0-9]+==" -> /mixed; boundary="===*== (glob)" --===+[0-9]+=+--$ -> --===*=-- (glob) @@ -45,8 +44,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <8580ff50825a50c8f716.60@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.60@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -84,8 +83,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <*@*> (glob) - X-Mercurial-Series-Id: <*@*> (glob) + Message-Id: <8580ff50825a50c8f716.60@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -159,8 +158,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <8580ff50825a50c8f716.60@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.60@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -197,8 +196,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <8580ff50825a50c8f716.60@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.60@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -236,7 +235,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2] test - Message-Id: <patchbomb.120@*> (glob) + Message-Id: <patchbomb.120@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:02:00 +0000 From: quux @@ -252,10 +251,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.121@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob) - In-Reply-To: <patchbomb.120@*> (glob) - References: <patchbomb.120@*> (glob) + Message-Id: <8580ff50825a50c8f716.121@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname> + In-Reply-To: <patchbomb.120@test-hostname> + References: <patchbomb.120@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:02:01 +0000 From: quux @@ -284,10 +283,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.122@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@*> (glob) - In-Reply-To: <patchbomb.120@*> (glob) - References: <patchbomb.120@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.122@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.121@test-hostname> + In-Reply-To: <patchbomb.120@test-hostname> + References: <patchbomb.120@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:02:02 +0000 From: quux @@ -366,7 +365,7 @@ Content-Type: multipart/mixed; boundary="===*==" (glob) MIME-Version: 1.0 Subject: test - Message-Id: <patchbomb.180@*> (glob) + Message-Id: <patchbomb.180@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:03:00 +0000 From: quux @@ -412,7 +411,7 @@ Content-Type: multipart/mixed; boundary="===*==" (glob) MIME-Version: 1.0 Subject: test - Message-Id: <patchbomb.180@*> (glob) + Message-Id: <patchbomb.180@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:03:00 +0000 From: quux @@ -439,10 +438,11 @@ CgZcySARUyA2A2LGZKiZ3Y+Lu786z4z4MWXmsrAZCsqrl1az5y21PMcjpbThzWeXGT+/nutbmvvz zXYS3BoGxdrJDIYmlimJJiZpRokmqYYmaSYWFknmSSkmhqbmliamiZYWxuYmBhbJBgZcUBNZQe5K Epm7xF/LT+RLx/a9juFTomaYO/Rgsx4rwBN+IMCUDLOKAQBrsmti + (?) --===============*==-- (glob) utf-8 patch: - $ $PYTHON -c 'fp = open("utf", "wb"); fp.write("h\xC3\xB6mma!\n"); fp.close();' + $ $PYTHON -c 'fp = open("utf", "wb"); fp.write(b"h\xC3\xB6mma!\n"); fp.close();' $ hg commit -A -d '4 0' -m 'utf-8 content' adding description adding utf @@ -454,14 +454,14 @@ displaying [PATCH] utf-8 content ... MIME-Version: 1.0 - Content-Type: text/plain; charset="us-ascii" - Content-Transfer-Encoding: 8bit + Content-Type: text/plain; charset="iso-8859-1" + Content-Transfer-Encoding: quoted-printable Subject: [PATCH] utf-8 content X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <909a00e13e9d78b575ae.240@*> (glob) - X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob) + Message-Id: <909a00e13e9d78b575ae.240@test-hostname> + X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:04:00 +0000 From: quux @@ -487,7 +487,7 @@ --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/utf Thu Jan 01 00:00:04 1970 +0000 @@ -0,0 +1,1 @@ - +h\xc3\xb6mma! (esc) + +h=C3=B6mma! mime encoded mbox (base64): @@ -506,8 +506,8 @@ X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <909a00e13e9d78b575ae.240@*> (glob) - X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@*> (glob) + Message-Id: <909a00e13e9d78b575ae.240@test-hostname> + X-Mercurial-Series-Id: <909a00e13e9d78b575ae.240@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:04:00 +0000 From: Q <quux> @@ -526,7 +526,14 @@ QEAgLTAsMCArMSwxIEBACitow7ZtbWEhCg== - $ $PYTHON -c 'print open("mbox").read().split("\n\n")[1].decode("base64")' + >>> import base64 + >>> patch = base64.b64decode(open("mbox").read().split("\n\n")[1]) + >>> if not isinstance(patch, str): + ... import sys + ... sys.stdout.flush() + ... junk = sys.stdout.buffer.write(patch + b"\n") + ... else: + ... print(patch) # HG changeset patch # User test # Date 4 0 @@ -551,7 +558,7 @@ $ rm mbox mime encoded mbox (quoted-printable): - $ $PYTHON -c 'fp = open("long", "wb"); fp.write("%s\nfoo\n\nbar\n" % ("x" * 1024)); fp.close();' + $ $PYTHON -c 'fp = open("long", "wb"); fp.write(b"%s\nfoo\n\nbar\n" % (b"x" * 1024)); fp.close();' $ hg commit -A -d '4 0' -m 'long line' adding long @@ -568,8 +575,8 @@ X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1 X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob) - X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob) + Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname> + X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:04:00 +0000 From: quux @@ -622,8 +629,8 @@ X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1 X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob) - X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@*> (glob) + Message-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname> + X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.240@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:04:00 +0000 From: quux @@ -665,7 +672,7 @@ $ rm mbox iso-8859-1 patch: - $ $PYTHON -c 'fp = open("isolatin", "wb"); fp.write("h\xF6mma!\n"); fp.close();' + $ $PYTHON -c 'fp = open("isolatin", "wb"); fp.write(b"h\xF6mma!\n"); fp.close();' $ hg commit -A -d '5 0' -m 'isolatin 8-bit encoding' adding isolatin @@ -684,8 +691,8 @@ X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720 X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <240fb913fc1b7ff15ddb.300@*> (glob) - X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@*> (glob) + Message-Id: <240fb913fc1b7ff15ddb.300@test-hostname> + X-Mercurial-Series-Id: <240fb913fc1b7ff15ddb.300@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:05:00 +0000 From: quux @@ -732,8 +739,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -791,7 +798,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -811,10 +818,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -847,10 +854,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -888,8 +895,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -931,8 +938,8 @@ X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1 X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob) - X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob) + Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname> + X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -991,7 +998,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 3] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1006,10 +1013,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 3 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -1044,10 +1051,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 3 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -1082,10 +1089,10 @@ X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1 X-Mercurial-Series-Index: 3 X-Mercurial-Series-Total: 3 - Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:03 +0000 From: quux @@ -1142,8 +1149,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1193,8 +1200,8 @@ X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1 X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob) - X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@*> (glob) + Message-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname> + X-Mercurial-Series-Id: <a2ea8fc83dd8b93cfd86.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1260,8 +1267,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1323,7 +1330,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 3] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1338,10 +1345,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 3 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -1385,10 +1392,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 3 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -1432,10 +1439,10 @@ X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1 X-Mercurial-Series-Index: 3 X-Mercurial-Series-Total: 3 - Message-Id: <a2ea8fc83dd8b93cfd86.63@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <a2ea8fc83dd8b93cfd86.63@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:03 +0000 From: quux @@ -1503,7 +1510,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 1] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1519,10 +1526,10 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -1556,7 +1563,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 1] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1573,10 +1580,10 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.61@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.61@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -1612,7 +1619,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1628,10 +1635,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -1660,10 +1667,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -1699,8 +1706,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1737,8 +1744,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1779,8 +1786,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1823,7 +1830,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -1838,10 +1845,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -1876,10 +1883,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -1923,8 +1930,8 @@ X-Mercurial-Node: 7aead2484924c445ad8ce2613df91f52f9e502ed X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <7aead2484924c445ad8c.60@*> (glob) - X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@*> (glob) + Message-Id: <7aead2484924c445ad8c.60@test-hostname> + X-Mercurial-Series-Id: <7aead2484924c445ad8c.60@test-hostname> In-Reply-To: <baz> References: <baz> User-Agent: Mercurial-patchbomb/* (glob) @@ -1966,8 +1973,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.60@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.60@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname> In-Reply-To: <baz> References: <baz> User-Agent: Mercurial-patchbomb/* (glob) @@ -1998,8 +2005,8 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname> In-Reply-To: <baz> References: <baz> User-Agent: Mercurial-patchbomb/* (glob) @@ -2038,7 +2045,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> In-Reply-To: <baz> References: <baz> User-Agent: Mercurial-patchbomb/* (glob) @@ -2056,10 +2063,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -2088,10 +2095,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -2129,8 +2136,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -2167,7 +2174,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2 fooFlag] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -2183,10 +2190,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -2215,10 +2222,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -2256,8 +2263,8 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -2293,7 +2300,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2 fooFlag barFlag] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -2309,10 +2316,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -2341,10 +2348,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -2383,8 +2390,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <8580ff50825a50c8f716.315532860@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob) + Message-Id: <8580ff50825a50c8f716.315532860@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:00 +0000 From: quux @@ -2422,7 +2429,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 2 R1] test - Message-Id: <patchbomb.60@*> (glob) + Message-Id: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -2438,10 +2445,10 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 2 - Message-Id: <8580ff50825a50c8f716.61@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.61@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:01 +0000 From: quux @@ -2469,10 +2476,10 @@ X-Mercurial-Node: 97d72e5f12c7e84f85064aa72e5a297142c36ed9 X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 2 - Message-Id: <97d72e5f12c7e84f8506.62@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@*> (glob) - In-Reply-To: <patchbomb.60@*> (glob) - References: <patchbomb.60@*> (glob) + Message-Id: <97d72e5f12c7e84f8506.62@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.61@test-hostname> + In-Reply-To: <patchbomb.60@test-hostname> + References: <patchbomb.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:02 +0000 From: quux @@ -2508,8 +2515,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <8580ff50825a50c8f716.60@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@*> (glob) + Message-Id: <8580ff50825a50c8f716.60@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.60@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Thu, 01 Jan 1970 00:01:00 +0000 From: quux @@ -2531,10 +2538,11 @@ test multi-byte domain parsing: - $ UUML=`$PYTHON -c 'import sys; sys.stdout.write("\374")'` + >>> with open('toaddress.txt', 'wb') as f: + ... f.write(b'bar@\xfcnicode.com') and None $ HGENCODING=iso-8859-1 $ export HGENCODING - $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "bar@${UUML}nicode.com" -s test -r 0 + $ hg email --date '1980-1-1 0:1' -m tmp.mbox -f quux -t "`cat toaddress.txt`" -s test -r 0 this patch series consists of 1 patches. Cc: @@ -2550,8 +2558,8 @@ X-Mercurial-Node: 8580ff50825a50c8f716709acdf8de0deddcd6ab X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <8580ff50825a50c8f716.315532860@*> (glob) - X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@*> (glob) + Message-Id: <8580ff50825a50c8f716.315532860@test-hostname> + X-Mercurial-Series-Id: <8580ff50825a50c8f716.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:00 +0000 From: quux @@ -2625,7 +2633,7 @@ Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Subject: [PATCH 0 of 6] test - Message-Id: <patchbomb.315532860@*> (glob) + Message-Id: <patchbomb.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:00 +0000 From: test @@ -2640,10 +2648,10 @@ X-Mercurial-Node: ff2c9fa2018b15fa74b33363bda9527323e2a99f X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 6 - Message-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob) - In-Reply-To: <patchbomb.315532860@*> (glob) - References: <patchbomb.315532860@*> (glob) + Message-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname> + In-Reply-To: <patchbomb.315532860@test-hostname> + References: <patchbomb.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:01 +0000 From: test @@ -2665,16 +2673,16 @@ displaying [PATCH 2 of 6] utf-8 content ... MIME-Version: 1.0 - Content-Type: text/plain; charset="us-ascii" - Content-Transfer-Encoding: 8bit + Content-Type: text/plain; charset="iso-8859-1" + Content-Transfer-Encoding: quoted-printable Subject: [PATCH 2 of 6] utf-8 content X-Mercurial-Node: 909a00e13e9d78b575aeee23dddbada46d5a143f X-Mercurial-Series-Index: 2 X-Mercurial-Series-Total: 6 - Message-Id: <909a00e13e9d78b575ae.315532862@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob) - In-Reply-To: <patchbomb.315532860@*> (glob) - References: <patchbomb.315532860@*> (glob) + Message-Id: <909a00e13e9d78b575ae.315532862@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname> + In-Reply-To: <patchbomb.315532860@test-hostname> + References: <patchbomb.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:02 +0000 From: test @@ -2699,7 +2707,7 @@ --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/utf Thu Jan 01 00:00:04 1970 +0000 @@ -0,0 +1,1 @@ - +h\xc3\xb6mma! (esc) + +h=C3=B6mma! displaying [PATCH 3 of 6] long line ... MIME-Version: 1.0 @@ -2709,10 +2717,10 @@ X-Mercurial-Node: a2ea8fc83dd8b93cfd86ac97b28287204ab806e1 X-Mercurial-Series-Index: 3 X-Mercurial-Series-Total: 6 - Message-Id: <a2ea8fc83dd8b93cfd86.315532863@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob) - In-Reply-To: <patchbomb.315532860@*> (glob) - References: <patchbomb.315532860@*> (glob) + Message-Id: <a2ea8fc83dd8b93cfd86.315532863@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname> + In-Reply-To: <patchbomb.315532860@test-hostname> + References: <patchbomb.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:03 +0000 From: test @@ -2750,16 +2758,16 @@ displaying [PATCH 4 of 6] isolatin 8-bit encoding ... MIME-Version: 1.0 - Content-Type: text/plain; charset="us-ascii" - Content-Transfer-Encoding: 8bit + Content-Type: text/plain; charset="iso-8859-1" + Content-Transfer-Encoding: quoted-printable Subject: [PATCH 4 of 6] isolatin 8-bit encoding X-Mercurial-Node: 240fb913fc1b7ff15ddb9f33e73d82bf5277c720 X-Mercurial-Series-Index: 4 X-Mercurial-Series-Total: 6 - Message-Id: <240fb913fc1b7ff15ddb.315532864@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob) - In-Reply-To: <patchbomb.315532860@*> (glob) - References: <patchbomb.315532860@*> (glob) + Message-Id: <240fb913fc1b7ff15ddb.315532864@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname> + In-Reply-To: <patchbomb.315532860@test-hostname> + References: <patchbomb.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:04 +0000 From: test @@ -2777,7 +2785,7 @@ --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/isolatin Thu Jan 01 00:00:05 1970 +0000 @@ -0,0 +1,1 @@ - +h\xf6mma! (esc) + +h=F6mma! displaying [PATCH 5 of 6] Added tag zero, zero.foo for changeset 8580ff50825a ... MIME-Version: 1.0 @@ -2787,10 +2795,10 @@ X-Mercurial-Node: 5d5ef15dfe5e7bd3a4ee154b5fff76c7945ec433 X-Mercurial-Series-Index: 5 X-Mercurial-Series-Total: 6 - Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob) - In-Reply-To: <patchbomb.315532860@*> (glob) - References: <patchbomb.315532860@*> (glob) + Message-Id: <5d5ef15dfe5e7bd3a4ee.315532865@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname> + In-Reply-To: <patchbomb.315532860@test-hostname> + References: <patchbomb.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:05 +0000 From: test @@ -2819,10 +2827,10 @@ X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268 X-Mercurial-Series-Index: 6 X-Mercurial-Series-Total: 6 - Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@*> (glob) - X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@*> (glob) - In-Reply-To: <patchbomb.315532860@*> (glob) - References: <patchbomb.315532860@*> (glob) + Message-Id: <2f9fa9b998c5fe3ac2bd.315532866@test-hostname> + X-Mercurial-Series-Id: <ff2c9fa2018b15fa74b3.315532861@test-hostname> + In-Reply-To: <patchbomb.315532860@test-hostname> + References: <patchbomb.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:06 +0000 From: test @@ -2864,8 +2872,8 @@ X-Mercurial-Node: 2f9fa9b998c5fe3ac2bd9a2b14bfcbeecbc7c268 X-Mercurial-Series-Index: 1 X-Mercurial-Series-Total: 1 - Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob) - X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@*> (glob) + Message-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname> + X-Mercurial-Series-Id: <2f9fa9b998c5fe3ac2bd.315532860@test-hostname> User-Agent: Mercurial-patchbomb/* (glob) Date: Tue, 01 Jan 1980 00:01:00 +0000 From: test
--- a/tests/test-py3-commands.t Sun Aug 19 13:27:02 2018 +0900 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,239 +0,0 @@ -#require py3exe - -This test helps in keeping a track on which commands we can run on -Python 3 and see what kind of errors are coming up. -The full traceback is hidden to have a stable output. - $ HGBIN=`which hg` - - $ for cmd in version debuginstall ; do - > echo $cmd - > $PYTHON3 $HGBIN $cmd 2>&1 2>&1 | tail -1 - > done - version - warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - debuginstall - no problems detected - -#if test-repo -Make a clone so that any features in the developer's .hg/hgrc that -might confuse Python 3 don't break this test. When we can do commit in -Python 3, we'll stop doing this. We use e76ed1e480ef for the clone -because it has different files than 273ce12ad8f1, so we can test both -`files` from dirstate and `files` loaded from a specific revision. - - $ hg clone -r e76ed1e480ef "`dirname "$TESTDIR"`" testrepo 2>&1 | tail -1 - 15 files updated, 0 files merged, 0 files removed, 0 files unresolved - -Test using -R, which exercises some URL code: - $ $PYTHON3 $HGBIN -R testrepo files -r 273ce12ad8f1 | tail -1 - testrepo/tkmerge - -Now prove `hg files` is reading the whole manifest. We have to grep -out some potential warnings that come from hgrc as yet. - $ cd testrepo - $ $PYTHON3 $HGBIN files -r 273ce12ad8f1 - .hgignore - PKG-INFO - README - hg - mercurial/__init__.py - mercurial/byterange.py - mercurial/fancyopts.py - mercurial/hg.py - mercurial/mdiff.py - mercurial/revlog.py - mercurial/transaction.py - notes.txt - setup.py - tkmerge - - $ $PYTHON3 $HGBIN files -r 273ce12ad8f1 | wc -l - \s*14 (re) - $ $PYTHON3 $HGBIN files | wc -l - \s*15 (re) - -Test if log-like commands work: - - $ $PYTHON3 $HGBIN tip - changeset: 10:e76ed1e480ef - tag: tip - user: oxymoron@cinder.waste.org - date: Tue May 03 23:37:43 2005 -0800 - summary: Fix linking of changeset revs when merging - - - $ $PYTHON3 $HGBIN log -r0 - changeset: 0:9117c6561b0b - user: mpm@selenic.com - date: Tue May 03 13:16:10 2005 -0800 - summary: Add back links from file revisions to changeset revisions - - - $ cd .. -#endif - -Test if `hg config` works: - - $ $PYTHON3 $HGBIN config - devel.all-warnings=true - devel.default-date=0 0 - largefiles.usercache=$TESTTMP/.cache/largefiles - ui.slash=True - ui.interactive=False - ui.mergemarkers=detailed - ui.promptecho=True - web.address=localhost - web.ipv6=False - - $ cat > included-hgrc <<EOF - > [extensions] - > babar = imaginary_elephant - > EOF - $ cat >> $HGRCPATH <<EOF - > %include $TESTTMP/included-hgrc - > EOF - $ $PYTHON3 $HGBIN version | tail -1 - *** failed to import extension babar from imaginary_elephant: *: 'imaginary_elephant' (glob) - warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - - $ rm included-hgrc - $ touch included-hgrc - -Test bytes-ness of policy.policy with HGMODULEPOLICY - - $ HGMODULEPOLICY=py - $ export HGMODULEPOLICY - $ $PYTHON3 `which hg` debuginstall 2>&1 2>&1 | tail -1 - no problems detected - -`hg init` can create empty repos -`hg status works fine` -`hg summary` also works! - - $ $PYTHON3 `which hg` init py3repo - $ cd py3repo - $ echo "This is the file 'iota'." > iota - $ $PYTHON3 $HGBIN status - ? iota - $ $PYTHON3 $HGBIN add iota - $ $PYTHON3 $HGBIN status - A iota - $ hg diff --nodates --git - diff --git a/iota b/iota - new file mode 100644 - --- /dev/null - +++ b/iota - @@ -0,0 +1,1 @@ - +This is the file 'iota'. - $ $PYTHON3 $HGBIN commit --message 'commit performed in Python 3' - $ $PYTHON3 $HGBIN status - - $ mkdir A - $ echo "This is the file 'mu'." > A/mu - $ $PYTHON3 $HGBIN addremove - adding A/mu - $ $PYTHON3 $HGBIN status - A A/mu - $ HGEDITOR='echo message > ' $PYTHON3 $HGBIN commit - $ $PYTHON3 $HGBIN status - $ $PYHON3 $HGBIN summary - parent: 1:e1e9167203d4 tip - message - branch: default - commit: (clean) - update: (current) - phases: 2 draft - -Test weird unicode-vs-bytes stuff - - $ $PYTHON3 $HGBIN help | egrep -v '^ |^$' - Mercurial Distributed SCM - list of commands: - additional help topics: - (use 'hg help -v' to show built-in aliases and global options) - - $ $PYTHON3 $HGBIN help help | egrep -v '^ |^$' - hg help [-ecks] [TOPIC] - show help for a given topic or a help overview - options ([+] can be repeated): - (some details hidden, use --verbose to show complete help) - - $ $PYTHON3 $HGBIN help -k notopic - abort: no matches - (try 'hg help' for a list of topics) - [255] - -Prove the repo is valid using the Python 2 `hg`: - $ hg verify - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - 2 files, 2 changesets, 2 total revisions - $ hg log - changeset: 1:e1e9167203d4 - tag: tip - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: message - - changeset: 0:71c96e924262 - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: commit performed in Python 3 - - - $ $PYTHON3 $HGBIN log -G - @ changeset: 1:e1e9167203d4 - | tag: tip - | user: test - | date: Thu Jan 01 00:00:00 1970 +0000 - | summary: message - | - o changeset: 0:71c96e924262 - user: test - date: Thu Jan 01 00:00:00 1970 +0000 - summary: commit performed in Python 3 - - $ $PYTHON3 $HGBIN log -Tjson - [ - { - "bookmarks": [], - "branch": "default", - "date": [0, 0], - "desc": "message", - "node": "e1e9167203d450ca2f558af628955b5f5afd4489", - "parents": ["71c96e924262969ff0d8d3d695b0f75412ccc3d8"], - "phase": "draft", - "rev": 1, - "tags": ["tip"], - "user": "test" - }, - { - "bookmarks": [], - "branch": "default", - "date": [0, 0], - "desc": "commit performed in Python 3", - "node": "71c96e924262969ff0d8d3d695b0f75412ccc3d8", - "parents": ["0000000000000000000000000000000000000000"], - "phase": "draft", - "rev": 0, - "tags": [], - "user": "test" - } - ] - -Show that update works now! - - $ $PYTHON3 $HGBIN up 0 - 0 files updated, 0 files merged, 1 files removed, 0 files unresolved - $ $PYTHON3 $HGBIN identify - 71c96e924262 - -branches and bookmarks also works! - - $ $PYTHON3 $HGBIN branches - default 1:e1e9167203d4 - $ $PYTHON3 $HGBIN bookmark book - $ $PYTHON3 $HGBIN bookmarks - * book 0:71c96e924262
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-rebase-backup.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,150 @@ + $ cat << EOF >> $HGRCPATH + > [extensions] + > rebase= + > EOF + +========================================== +Test history-editing-backup config option | +========================================== +Test with Pre-obsmarker rebase: +1) When config option is not set: + $ hg init repo1 + $ cd repo1 + $ echo a>a + $ hg ci -qAma + $ echo b>b + $ hg ci -qAmb + $ echo c>c + $ hg ci -qAmc + $ hg up 0 -q + $ echo d>d + $ hg ci -qAmd + $ echo e>e + $ hg ci -qAme + $ hg log -GT "{rev}: {firstline(desc)}\n" + @ 4: e + | + o 3: d + | + | o 2: c + | | + | o 1: b + |/ + o 0: a + + $ hg rebase -s 1 -d . + rebasing 1:d2ae7f538514 "b" + rebasing 2:177f92b77385 "c" + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/d2ae7f538514-c7ed7a78-rebase.hg + $ hg log -GT "{rev}: {firstline(desc)}\n" + o 4: c + | + o 3: b + | + @ 2: e + | + o 1: d + | + o 0: a + + +2) When config option is set: + $ cat << EOF >> $HGRCPATH + > [ui] + > history-editing-backup = False + > EOF + + $ echo f>f + $ hg ci -Aqmf + $ echo g>g + $ hg ci -Aqmg + $ hg log -GT "{rev}: {firstline(desc)}\n" + @ 6: g + | + o 5: f + | + | o 4: c + | | + | o 3: b + |/ + o 2: e + | + o 1: d + | + o 0: a + + $ hg rebase -s 3 -d . + rebasing 3:05bff2a95b12 "b" + rebasing 4:1762bde4404d "c" + + $ hg log -GT "{rev}: {firstline(desc)}\n" + o 6: c + | + o 5: b + | + @ 4: g + | + o 3: f + | + o 2: e + | + o 1: d + | + o 0: a + +Test when rebased revisions are stripped during abort: +====================================================== + + $ echo conflict > c + $ hg ci -Am "conflict with c" + adding c + created new head + $ hg log -GT "{rev}: {firstline(desc)}\n" + @ 7: conflict with c + | + | o 6: c + | | + | o 5: b + |/ + o 4: g + | + o 3: f + | + o 2: e + | + o 1: d + | + o 0: a + +When history-editing-backup = True: + $ cat << EOF >> $HGRCPATH + > [ui] + > history-editing-backup = True + > EOF + $ hg rebase -s 5 -d . + rebasing 5:1f8148a544ee "b" + rebasing 6:f8bc7d28e573 "c" + merging c + warning: conflicts while merging c! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --abort + saved backup bundle to $TESTTMP/repo1/.hg/strip-backup/818c1a43c916-2b644d96-backup.hg + rebase aborted + +When history-editing-backup = False: + $ cat << EOF >> $HGRCPATH + > [ui] + > history-editing-backup = False + > EOF + $ hg rebase -s 5 -d . + rebasing 5:1f8148a544ee "b" + rebasing 6:f8bc7d28e573 "c" + merging c + warning: conflicts while merging c! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --abort + rebase aborted + $ cd .. +
--- a/tests/test-rebase-inmemory.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-rebase-inmemory.t Mon Aug 20 09:48:08 2018 -0700 @@ -156,7 +156,33 @@ |/ o 0: b173517d0057 'a' + +Test reporting of path conflicts + + $ hg rm a + $ mkdir a + $ touch a/a + $ hg ci -Am "a/a" + adding a/a + $ hg tglog + @ 4: daf7dfc139cb 'a/a' + | + o 3: 844a7de3e617 'c' + | + | o 2: 09c044d2cb43 'd' + | | + | o 1: fc055c3b4d33 'b' + |/ + o 0: b173517d0057 'a' + + $ hg rebase -r . -d 2 + rebasing 4:daf7dfc139cb "a/a" (tip) + saved backup bundle to $TESTTMP/repo1/repo2/.hg/strip-backup/daf7dfc139cb-fdbfcf4f-rebase.hg + + $ cd .. + Test dry-run rebasing + $ hg init repo3 $ cd repo3 $ echo a>a @@ -325,6 +351,25 @@ hit a merge conflict [1] +In-memory rebase that fails due to merge conflicts + + $ hg rebase -s 2 -d 7 + rebasing 2:177f92b77385 "c" + rebasing 3:055a42cdd887 "d" + rebasing 4:e860deea161a "e" + merging e + transaction abort! + rollback completed + hit merge conflicts; re-running rebase without in-memory merge + rebase aborted + rebasing 2:177f92b77385 "c" + rebasing 3:055a42cdd887 "d" + rebasing 4:e860deea161a "e" + merging e + warning: conflicts while merging e! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + ========================== Test for --confirm option| ========================== @@ -509,3 +554,31 @@ o 0:cb9a9f314b8b test a +#if execbit + +Test a metadata-only in-memory merge + $ cd $TESTTMP + $ hg init no_exception + $ cd no_exception +# Produce the following graph: +# o 'add +x to foo.txt' +# | o r1 (adds bar.txt, just for something to rebase to) +# |/ +# o r0 (adds foo.txt, no +x) + $ echo hi > foo.txt + $ hg ci -qAm r0 + $ echo hi > bar.txt + $ hg ci -qAm r1 + $ hg co -qr ".^" + $ chmod +x foo.txt + $ hg ci -qAm 'add +x to foo.txt' +issue5960: this was raising an AttributeError exception + $ hg rebase -r . -d 1 + rebasing 2:539b93e77479 "add +x to foo.txt" (tip) + saved backup bundle to $TESTTMP/no_exception/.hg/strip-backup/*.hg (glob) + $ hg diff -c tip + diff --git a/foo.txt b/foo.txt + old mode 100644 + new mode 100755 + +#endif
--- a/tests/test-rebase-obsolete.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-rebase-obsolete.t Mon Aug 20 09:48:08 2018 -0700 @@ -15,6 +15,7 @@ > [extensions] > rebase= > drawdag=$TESTDIR/drawdag.py + > strip= > EOF Setup rebase canonical repo @@ -1788,3 +1789,312 @@ | o 0:426bada5c675 A +==================== +Test --stop option | +==================== + $ cd .. + $ hg init rbstop + $ cd rbstop + $ echo a>a + $ hg ci -Aqma + $ echo b>b + $ hg ci -Aqmb + $ echo c>c + $ hg ci -Aqmc + $ echo d>d + $ hg ci -Aqmd + $ hg up 0 -q + $ echo f>f + $ hg ci -Aqmf + $ echo D>d + $ hg ci -Aqm "conflict with d" + $ hg up 3 -q + $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n" + o 5:00bfc9898aeb test + | conflict with d + | + o 4:dafd40200f93 test + | f + | + | @ 3:055a42cdd887 test + | | d + | | + | o 2:177f92b77385 test + | | c + | | + | o 1:d2ae7f538514 test + |/ b + | + o 0:cb9a9f314b8b test + a + + $ hg rebase -s 1 -d 5 + rebasing 1:d2ae7f538514 "b" + rebasing 2:177f92b77385 "c" + rebasing 3:055a42cdd887 "d" + merging d + warning: conflicts while merging d! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --stop + 1 new orphan changesets + $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n" + o 7:7fffad344617 test + | c + | + o 6:b15528633407 test + | b + | + o 5:00bfc9898aeb test + | conflict with d + | + o 4:dafd40200f93 test + | f + | + | @ 3:055a42cdd887 test + | | d + | | + | x 2:177f92b77385 test + | | c + | | + | x 1:d2ae7f538514 test + |/ b + | + o 0:cb9a9f314b8b test + a + +Test it aborts if unstable csets is not allowed: +=============================================== + $ cat >> $HGRCPATH << EOF + > [experimental] + > evolution.allowunstable=False + > EOF + + $ hg strip 6 --no-backup -q + $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n" + o 5:00bfc9898aeb test + | conflict with d + | + o 4:dafd40200f93 test + | f + | + | @ 3:055a42cdd887 test + | | d + | | + | o 2:177f92b77385 test + | | c + | | + | o 1:d2ae7f538514 test + |/ b + | + o 0:cb9a9f314b8b test + a + + $ hg rebase -s 1 -d 5 + rebasing 1:d2ae7f538514 "b" + rebasing 2:177f92b77385 "c" + rebasing 3:055a42cdd887 "d" + merging d + warning: conflicts while merging d! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --stop + abort: cannot remove original changesets with unrebased descendants + (either enable obsmarkers to allow unstable revisions or use --keep to keep original changesets) + [255] + $ hg rebase --abort + saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg + rebase aborted + +Test --stop when --keep is passed: +================================== + $ hg rebase -s 1 -d 5 --keep + rebasing 1:d2ae7f538514 "b" + rebasing 2:177f92b77385 "c" + rebasing 3:055a42cdd887 "d" + merging d + warning: conflicts while merging d! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --stop + $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n" + o 7:7fffad344617 test + | c + | + o 6:b15528633407 test + | b + | + o 5:00bfc9898aeb test + | conflict with d + | + o 4:dafd40200f93 test + | f + | + | @ 3:055a42cdd887 test + | | d + | | + | o 2:177f92b77385 test + | | c + | | + | o 1:d2ae7f538514 test + |/ b + | + o 0:cb9a9f314b8b test + a + +Test --stop aborts when --collapse was passed: +============================================= + $ cat >> $HGRCPATH << EOF + > [experimental] + > evolution.allowunstable=True + > EOF + + $ hg strip 6 + saved backup bundle to $TESTTMP/rbstop/.hg/strip-backup/b15528633407-6eb72b6f-backup.hg + $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n" + o 5:00bfc9898aeb test + | conflict with d + | + o 4:dafd40200f93 test + | f + | + | @ 3:055a42cdd887 test + | | d + | | + | o 2:177f92b77385 test + | | c + | | + | o 1:d2ae7f538514 test + |/ b + | + o 0:cb9a9f314b8b test + a + + $ hg rebase -s 1 -d 5 --collapse -m "collapsed b c d" + rebasing 1:d2ae7f538514 "b" + rebasing 2:177f92b77385 "c" + rebasing 3:055a42cdd887 "d" + merging d + warning: conflicts while merging d! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --stop + abort: cannot stop in --collapse session + [255] + $ hg rebase --abort + rebase aborted + $ hg diff + $ hg log -G --template "{rev}:{short(node)} {person(author)}\n{firstline(desc)} {topic}\n\n" + o 5:00bfc9898aeb test + | conflict with d + | + o 4:dafd40200f93 test + | f + | + | @ 3:055a42cdd887 test + | | d + | | + | o 2:177f92b77385 test + | | c + | | + | o 1:d2ae7f538514 test + |/ b + | + o 0:cb9a9f314b8b test + a + +Test --stop raise errors with conflicting options: +================================================= + $ hg rebase -s 3 -d 5 + rebasing 3:055a42cdd887 "d" + merging d + warning: conflicts while merging d! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --stop --dry-run + abort: cannot specify both --dry-run and --stop + [255] + + $ hg rebase -s 3 -d 5 + abort: rebase in progress + (use 'hg rebase --continue' or 'hg rebase --abort') + [255] + $ hg rebase --stop --continue + abort: cannot use --stop with --continue + [255] + +Test --stop moves bookmarks of original revisions to new rebased nodes: +====================================================================== + $ cd .. + $ hg init repo + $ cd repo + + $ echo a > a + $ hg ci -Am A + adding a + + $ echo b > b + $ hg ci -Am B + adding b + $ hg book X + $ hg book Y + + $ echo c > c + $ hg ci -Am C + adding c + $ hg book Z + + $ echo d > d + $ hg ci -Am D + adding d + + $ hg up 0 -q + $ echo e > e + $ hg ci -Am E + adding e + created new head + + $ echo doubt > d + $ hg ci -Am "conflict with d" + adding d + + $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n" + @ 5: 39adf30bc1be 'conflict with d' bookmarks: + | + o 4: 9c1e55f411b6 'E' bookmarks: + | + | o 3: 67a385d4e6f2 'D' bookmarks: Z + | | + | o 2: 49cb3485fa0c 'C' bookmarks: Y + | | + | o 1: 6c81ed0049f8 'B' bookmarks: X + |/ + o 0: 1994f17a630e 'A' bookmarks: + + $ hg rebase -s 1 -d 5 + rebasing 1:6c81ed0049f8 "B" (X) + rebasing 2:49cb3485fa0c "C" (Y) + rebasing 3:67a385d4e6f2 "D" (Z) + merging d + warning: conflicts while merging d! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + $ hg rebase --stop + 1 new orphan changesets + $ hg log -GT "{rev}: {node|short} '{desc}' bookmarks: {bookmarks}\n" + o 7: 9c86c650b686 'C' bookmarks: Y + | + o 6: 9b87b54e5fd8 'B' bookmarks: X + | + @ 5: 39adf30bc1be 'conflict with d' bookmarks: + | + o 4: 9c1e55f411b6 'E' bookmarks: + | + | * 3: 67a385d4e6f2 'D' bookmarks: Z + | | + | x 2: 49cb3485fa0c 'C' bookmarks: + | | + | x 1: 6c81ed0049f8 'B' bookmarks: + |/ + o 0: 1994f17a630e 'A' bookmarks: +
--- a/tests/test-rebase-parameters.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-rebase-parameters.t Mon Aug 20 09:48:08 2018 -0700 @@ -61,7 +61,7 @@ [1] $ hg rebase --continue --abort - abort: cannot use both abort and continue + abort: cannot use --abort with --continue [255] $ hg rebase --continue --collapse
--- a/tests/test-remove.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-remove.t Mon Aug 20 09:48:08 2018 -0700 @@ -520,6 +520,14 @@ deleting [===========================================>] 1/1\r (no-eol) (esc) \r (no-eol) (esc) removing a + $ hg remove a -nv --color debug + \r (no-eol) (esc) + deleting [===========================================>] 1/1\r (no-eol) (esc) + \r (no-eol) (esc) + \r (no-eol) (esc) + deleting [===========================================>] 1/1\r (no-eol) (esc) + \r (no-eol) (esc) + [addremove.removed ui.status|removing a] $ hg diff $ cat >> .hg/hgrc <<EOF
--- a/tests/test-resolve.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-resolve.t Mon Aug 20 09:48:08 2018 -0700 @@ -373,4 +373,219 @@ $ hg resolve -l +resolve -m can be configured to look for remaining conflict markers + $ hg up -qC 2 + $ hg merge -q --tool=internal:merge 1 + warning: conflicts while merging file1! (edit, then use 'hg resolve --mark') + warning: conflicts while merging file2! (edit, then use 'hg resolve --mark') + [1] + $ hg resolve -l + U file1 + U file2 + $ echo 'remove markers' > file1 + $ hg --config commands.resolve.mark-check=abort resolve -m + warning: the following files still have conflict markers: + file2 + abort: conflict markers detected + (use --all to mark anyway) + [255] + $ hg resolve -l + U file1 + U file2 +Try with --all from the hint + $ hg --config commands.resolve.mark-check=abort resolve -m --all + warning: the following files still have conflict markers: + file2 + (no more unresolved files) + $ hg resolve -l + R file1 + R file2 +Test option value 'warn' + $ hg resolve --unmark + $ hg resolve -l + U file1 + U file2 + $ hg --config commands.resolve.mark-check=warn resolve -m + warning: the following files still have conflict markers: + file2 + (no more unresolved files) + $ hg resolve -l + R file1 + R file2 +If the file is already marked as resolved, we don't warn about it + $ hg resolve --unmark file1 + $ hg resolve -l + U file1 + R file2 + $ hg --config commands.resolve.mark-check=warn resolve -m + (no more unresolved files) + $ hg resolve -l + R file1 + R file2 +If the user passes an invalid value, we treat it as 'none'. + $ hg resolve --unmark + $ hg resolve -l + U file1 + U file2 + $ hg --config commands.resolve.mark-check=nope resolve -m + (no more unresolved files) + $ hg resolve -l + R file1 + R file2 +Test explicitly setting the otion to 'none' + $ hg resolve --unmark + $ hg resolve -l + U file1 + U file2 + $ hg --config commands.resolve.mark-check=none resolve -m + (no more unresolved files) + $ hg resolve -l + R file1 + R file2 + $ cd .. + +====================================================== +Test 'hg resolve' confirm config option functionality | +====================================================== + $ cat >> $HGRCPATH << EOF + > [extensions] + > rebase= + > EOF + + $ hg init repo2 + $ cd repo2 + + $ echo boss > boss + $ hg ci -Am "add boss" + adding boss + + $ for emp in emp1 emp2 emp3; do echo work > $emp; done; + $ hg ci -Aqm "added emp1 emp2 emp3" + + $ hg up 0 + 0 files updated, 0 files merged, 3 files removed, 0 files unresolved + + $ for emp in emp1 emp2 emp3; do echo nowork > $emp; done; + $ hg ci -Aqm "added lazy emp1 emp2 emp3" + + $ hg log -GT "{rev} {node|short} {firstline(desc)}\n" + @ 2 0acfd4a49af0 added lazy emp1 emp2 emp3 + | + | o 1 f30f98a8181f added emp1 emp2 emp3 + |/ + o 0 88660038d466 add boss + + $ hg rebase -s 1 -d 2 + rebasing 1:f30f98a8181f "added emp1 emp2 emp3" + merging emp1 + merging emp2 + merging emp3 + warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') + warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') + warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark') + unresolved conflicts (see hg resolve, then hg rebase --continue) + [1] + +Test when commands.resolve.confirm config option is not set: +=========================================================== + $ hg resolve --all + merging emp1 + merging emp2 + merging emp3 + warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') + warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') + warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark') + [1] + +Test when config option is set: +============================== + $ cat >> $HGRCPATH << EOF + > [ui] + > interactive = True + > [commands] + > resolve.confirm = True + > EOF + + $ hg resolve + abort: no files or directories specified + (use --all to re-merge all unresolved files) + [255] + $ hg resolve --all << EOF + > n + > EOF + re-merge all unresolved files (yn)? n + abort: user quit + [255] + + $ hg resolve --all << EOF + > y + > EOF + re-merge all unresolved files (yn)? y + merging emp1 + merging emp2 + merging emp3 + warning: conflicts while merging emp1! (edit, then use 'hg resolve --mark') + warning: conflicts while merging emp2! (edit, then use 'hg resolve --mark') + warning: conflicts while merging emp3! (edit, then use 'hg resolve --mark') + [1] + +Test that commands.resolve.confirm respect --mark option (only when no patterns args are given): +=============================================================================================== + + $ hg resolve -m emp1 + $ hg resolve -l + R emp1 + U emp2 + U emp3 + + $ hg resolve -m << EOF + > n + > EOF + mark all unresolved files as resolved (yn)? n + abort: user quit + [255] + + $ hg resolve -m << EOF + > y + > EOF + mark all unresolved files as resolved (yn)? y + (no more unresolved files) + continue: hg rebase --continue + $ hg resolve -l + R emp1 + R emp2 + R emp3 + +Test that commands.resolve.confirm respect --unmark option (only when no patterns args are given): +=============================================================================================== + + $ hg resolve -u emp1 + + $ hg resolve -l + U emp1 + R emp2 + R emp3 + + $ hg resolve -u << EOF + > n + > EOF + mark all resolved files as unresolved (yn)? n + abort: user quit + [255] + + $ hg resolve -m << EOF + > y + > EOF + mark all unresolved files as resolved (yn)? y + (no more unresolved files) + continue: hg rebase --continue + + $ hg resolve -l + R emp1 + R emp2 + R emp3 + + $ hg rebase --abort + rebase aborted + $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-revisions.t Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,45 @@ + $ hg init repo + $ cd repo + + $ echo 0 > a + $ hg ci -qAm 0 + $ for i in 5 8 14 43 167; do + > hg up -q 0 + > echo $i > a + > hg ci -qm $i + > done + $ cat <<EOF >> .hg/hgrc + > [alias] + > l = log -T '{rev}:{shortest(node,1)}\n' + > EOF + + $ hg l + 5:00f + 4:7ba5d + 3:7ba57 + 2:72 + 1:9 + 0:b + $ cat <<EOF >> .hg/hgrc + > [experimental] + > revisions.disambiguatewithin=not 4 + > EOF + $ hg l + 5:0 + 4:7ba5d + 3:7b + 2:72 + 1:9 + 0:b +9 was unambiguous and still is + $ hg l -r 9 + 1:9 +7 was ambiguous and still is + $ hg l -r 7 + abort: 00changelog.i@7: ambiguous identifier! + [255] +7b is no longer ambiguous + $ hg l -r 7b + 3:7b + + $ cd ..
--- a/tests/test-revset.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-revset.t Mon Aug 20 09:48:08 2018 -0700 @@ -1773,6 +1773,16 @@ Test hexadecimal revision $ log 'id(2)' + $ log 'id(5)' + 2 + $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x5)' + 2 + $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x5' + 2 + $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x)' + $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x' + abort: 00changelog.i@: ambiguous identifier! + [255] $ log 'id(23268)' 4 $ log 'id(2785f51eece)'
--- a/tests/test-revset2.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-revset2.t Mon Aug 20 09:48:08 2018 -0700 @@ -346,7 +346,7 @@ test ',' in `_list` $ log '0,1' hg: parse error: can't use a list in this context - (see hg help "revsets.x or y") + (see 'hg help "revsets.x or y"') [255] $ try '0,1,2' (list @@ -354,7 +354,7 @@ (symbol '1') (symbol '2')) hg: parse error: can't use a list in this context - (see hg help "revsets.x or y") + (see 'hg help "revsets.x or y"') [255] test that chained `or` operations make balanced addsets
--- a/tests/test-run-tests.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-run-tests.t Mon Aug 20 09:48:08 2018 -0700 @@ -850,7 +850,7 @@ > EOF --- $TESTTMP/test-cases.t - +++ $TESTTMP/test-cases.t.a.err + +++ $TESTTMP/test-cases.t#a.err @@ -1,6 +1,7 @@ #testcases a b #if a @@ -861,7 +861,7 @@ $ echo 2 Accept this change? [n] . --- $TESTTMP/test-cases.t - +++ $TESTTMP/test-cases.t.b.err + +++ $TESTTMP/test-cases.t#b.err @@ -5,4 +5,5 @@ #endif #if b @@ -896,6 +896,40 @@ .. # Ran 2 tests, 0 skipped, 0 failed. +When using multiple dimensions of "#testcases" in .t files + + $ cat > test-cases.t <<'EOF' + > #testcases a b + > #testcases c d + > #if a d + > $ echo $TESTCASE + > a#d + > #endif + > #if b c + > $ echo yes + > no + > #endif + > EOF + $ rt test-cases.t + .. + --- $TESTTMP/test-cases.t + +++ $TESTTMP/test-cases.t#b#c.err + @@ -6,5 +6,5 @@ + #endif + #if b c + $ echo yes + - no + + yes + #endif + + ERROR: test-cases.t#b#c output changed + !. + Failed test-cases.t#b#c: output changed + # Ran 4 tests, 0 skipped, 1 failed. + python hash seed: * (glob) + [1] + + $ rm test-cases.t#b#c.err $ rm test-cases.t (reinstall) @@ -1540,7 +1574,7 @@ $ rt . --- $TESTTMP/anothertests/cases/test-cases-abc.t - +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err + +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err @@ -7,7 +7,7 @@ $ V=C #endif @@ -1563,7 +1597,7 @@ $ rt --restart --- $TESTTMP/anothertests/cases/test-cases-abc.t - +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err + +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err @@ -7,7 +7,7 @@ $ V=C #endif @@ -1584,11 +1618,11 @@ --restart works with outputdir $ mkdir output - $ mv test-cases-abc.t.B.err output + $ mv test-cases-abc.t#B.err output $ rt --restart --outputdir output --- $TESTTMP/anothertests/cases/test-cases-abc.t - +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t.B.err + +++ $TESTTMP/anothertests/cases/output/test-cases-abc.t#B.err @@ -7,7 +7,7 @@ $ V=C #endif @@ -1631,7 +1665,7 @@ $ rt "test-cases-abc.t#B" --- $TESTTMP/anothertests/cases/test-cases-abc.t - +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err + +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err @@ -7,7 +7,7 @@ $ V=C #endif @@ -1654,7 +1688,7 @@ $ rt test-cases-abc.t#B test-cases-abc.t#C --- $TESTTMP/anothertests/cases/test-cases-abc.t - +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err + +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err @@ -7,7 +7,7 @@ $ V=C #endif @@ -1677,7 +1711,7 @@ $ rt test-cases-abc.t#B test-cases-abc.t#D --- $TESTTMP/anothertests/cases/test-cases-abc.t - +++ $TESTTMP/anothertests/cases/test-cases-abc.t.B.err + +++ $TESTTMP/anothertests/cases/test-cases-abc.t#B.err @@ -7,7 +7,7 @@ $ V=C #endif @@ -1711,7 +1745,7 @@ $ rt test-cases-advanced-cases.t --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t - +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err + +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err @@ -1,3 +1,3 @@ #testcases simple case-with-dashes casewith_-.chars $ echo $TESTCASE @@ -1721,7 +1755,7 @@ ERROR: test-cases-advanced-cases.t#case-with-dashes output changed ! --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t - +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err + +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err @@ -1,3 +1,3 @@ #testcases simple case-with-dashes casewith_-.chars $ echo $TESTCASE @@ -1739,7 +1773,7 @@ $ rt "test-cases-advanced-cases.t#case-with-dashes" --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t - +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.case-with-dashes.err + +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#case-with-dashes.err @@ -1,3 +1,3 @@ #testcases simple case-with-dashes casewith_-.chars $ echo $TESTCASE @@ -1756,7 +1790,7 @@ $ rt "test-cases-advanced-cases.t#casewith_-.chars" --- $TESTTMP/anothertests/cases/test-cases-advanced-cases.t - +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t.casewith_-.chars.err + +++ $TESTTMP/anothertests/cases/test-cases-advanced-cases.t#casewith_-.chars.err @@ -1,3 +1,3 @@ #testcases simple case-with-dashes casewith_-.chars $ echo $TESTCASE
--- a/tests/test-share.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-share.t Mon Aug 20 09:48:08 2018 -0700 @@ -32,6 +32,7 @@ [1] $ ls -1 ../repo1/.hg/cache branch2-served + manifestfulltextcache rbc-names-v1 rbc-revs-v1 tags2-visible @@ -297,15 +298,15 @@ test behavior when sharing a shared repo - $ hg share -B repo3 repo5 + $ hg share -B repo3 missingdir/repo5 updating working directory 2 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ cd repo5 + $ cd missingdir/repo5 $ hg book bm1 3:b87954705719 bm3 4:62f4ded848e4 bm4 5:92793bfc8cad - $ cd .. + $ cd ../.. test what happens when an active bookmark is deleted
--- a/tests/test-ssh-bundle1.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-ssh-bundle1.t Mon Aug 20 09:48:08 2018 -0700 @@ -59,10 +59,12 @@ non-existent absolute path +#if no-msys $ hg clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local remote: abort: repository /$TESTTMP/nonexistent not found! abort: no suitable response from remote hg! [255] +#endif clone remote via stream @@ -502,7 +504,7 @@ $ cat dummylog Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio - Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio + Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio (no-msys !) Got arguments 1:user@dummy 2:hg -R remote serve --stdio Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio (no-reposimplestore !) Got arguments 1:user@dummy 2:hg -R remote serve --stdio (no-reposimplestore !)
--- a/tests/test-status-color.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-status-color.t Mon Aug 20 09:48:08 2018 -0700 @@ -168,10 +168,10 @@ $ touch modified removed deleted ignored $ echo "^ignored$" > .hgignore $ hg ci -A -m 'initial checkin' - adding .hgignore - adding deleted - adding modified - adding removed + \x1b[0;32madding .hgignore\x1b[0m (esc) + \x1b[0;32madding deleted\x1b[0m (esc) + \x1b[0;32madding modified\x1b[0m (esc) + \x1b[0;32madding removed\x1b[0m (esc) $ hg log --color=debug [log.changeset changeset.draft|changeset: 0:389aef86a55e] [log.tag|tag: tip] @@ -296,10 +296,10 @@ $ touch modified removed deleted ignored $ echo "^ignored$" > .hgignore $ hg commit -A -m 'initial checkin' - adding .hgignore - adding deleted - adding modified - adding removed + \x1b[0;32madding .hgignore\x1b[0m (esc) + \x1b[0;32madding deleted\x1b[0m (esc) + \x1b[0;32madding modified\x1b[0m (esc) + \x1b[0;32madding removed\x1b[0m (esc) $ touch added unknown ignored $ hg add added $ echo "test" >> modified @@ -393,6 +393,7 @@ $ hg unknowncommand > /dev/null hg: unknown command 'unknowncommand' + (use 'hg help' for a list of commands) [255] color coding of error message without curses @@ -400,6 +401,7 @@ $ echo 'raise ImportError' > curses.py $ PYTHONPATH=`pwd`:$PYTHONPATH hg unknowncommand > /dev/null hg: unknown command 'unknowncommand' + (use 'hg help' for a list of commands) [255] $ cd ..
--- a/tests/test-strict.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-strict.t Mon Aug 20 09:48:08 2018 -0700 @@ -15,29 +15,7 @@ $ hg an a hg: unknown command 'an' - Mercurial Distributed SCM - - basic commands: - - add add the specified files on the next commit - annotate show changeset information by line for each file - clone make a copy of an existing repository - commit commit the specified files or all outstanding changes - diff diff repository (or selected files) - export dump the header and diffs for one or more changesets - forget forget the specified files on the next commit - init create a new repository in the given directory - log show revision history of entire repository or files - merge merge another revision into working directory - pull pull changes from the specified source - push push changes to the specified destination - remove remove the specified files on the next commit - serve start stand-alone webserver - status show changed files in the working directory - summary summarize working directory state - update update working directory (or switch revisions) - - (use 'hg help' for the full list of commands or 'hg -v' for details) + (use 'hg help' for a list of commands) [255] $ hg annotate a 0: a
--- a/tests/test-template-functions.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-template-functions.t Mon Aug 20 09:48:08 2018 -0700 @@ -892,6 +892,11 @@ $ hg log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden 4:107 + $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n' + 4:x10 + $ hg --config experimental.revisions.prefixhexnode=yes log -r 4 -T '{rev}:{shortest(node, 0)}\n' --hidden + 4:x10 + node 'c562' should be unique if the other 'c562' nodes are hidden (but we don't try the slow path to filter out hidden nodes for now)
--- a/tests/test-template-keywords.t Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/test-template-keywords.t Mon Aug 20 09:48:08 2018 -0700 @@ -91,7 +91,7 @@ $ for key in author branch branches date desc file_adds file_dels file_mods \ > file_copies file_copies_switch files \ > manifest node parents rev tags diffstat extras \ - > p1rev p2rev p1node p2node; do + > p1rev p2rev p1node p2node user; do > for mode in '' --verbose --debug; do > hg log $mode --template "$key$mode: {$key}\n" > done @@ -702,6 +702,33 @@ p2node--debug: 0000000000000000000000000000000000000000 p2node--debug: 0000000000000000000000000000000000000000 p2node--debug: 0000000000000000000000000000000000000000 + user: test + user: User Name <user@hostname> + user: person + user: person + user: person + user: person + user: other@place + user: A. N. Other <other@place> + user: User Name <user@hostname> + user--verbose: test + user--verbose: User Name <user@hostname> + user--verbose: person + user--verbose: person + user--verbose: person + user--verbose: person + user--verbose: other@place + user--verbose: A. N. Other <other@place> + user--verbose: User Name <user@hostname> + user--debug: test + user--debug: User Name <user@hostname> + user--debug: person + user--debug: person + user--debug: person + user--debug: person + user--debug: other@place + user--debug: A. N. Other <other@place> + user--debug: User Name <user@hostname> Add a dummy commit to make up for the instability of the above:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-util.py Mon Aug 20 09:48:08 2018 -0700 @@ -0,0 +1,137 @@ +# unit tests for mercuril.util utilities +from __future__ import absolute_import + +import contextlib +import itertools +import unittest + +from mercurial import pycompat, util, utils + +@contextlib.contextmanager +def mocktimer(incr=0.1, *additional_targets): + """Replaces util.timer and additional_targets with a mock + + The timer starts at 0. On each call the time incremented by the value + of incr. If incr is an iterable, then the time is incremented by the + next value from that iterable, looping in a cycle when reaching the end. + + additional_targets must be a sequence of (object, attribute_name) tuples; + the mock is set with setattr(object, attribute_name, mock). + + """ + time = [0] + try: + incr = itertools.cycle(incr) + except TypeError: + incr = itertools.repeat(incr) + + def timer(): + time[0] += next(incr) + return time[0] + + # record original values + orig = util.timer + additional_origs = [(o, a, getattr(o, a)) for o, a in additional_targets] + + # mock out targets + util.timer = timer + for obj, attr in additional_targets: + setattr(obj, attr, timer) + + try: + yield + finally: + # restore originals + util.timer = orig + for args in additional_origs: + setattr(*args) + +# attr.s default factory for util.timedstats.start binds the timer we +# need to mock out. +_start_default = (util.timedcmstats.start.default, 'factory') + +@contextlib.contextmanager +def capturestderr(): + """Replace utils.procutil.stderr with a pycompat.bytesio instance + + The instance is made available as the return value of __enter__. + + This contextmanager is reentrant. + + """ + orig = utils.procutil.stderr + utils.procutil.stderr = pycompat.bytesio() + try: + yield utils.procutil.stderr + finally: + utils.procutil.stderr = orig + +class timedtests(unittest.TestCase): + def testtimedcmstatsstr(self): + stats = util.timedcmstats() + self.assertEqual(str(stats), '<unknown>') + self.assertEqual(bytes(stats), b'<unknown>') + stats.elapsed = 12.34 + self.assertEqual(str(stats), pycompat.sysstr(util.timecount(12.34))) + self.assertEqual(bytes(stats), util.timecount(12.34)) + + def testtimedcmcleanexit(self): + # timestamps 1, 4, elapsed time of 4 - 1 = 3 + with mocktimer([1, 3], _start_default): + with util.timedcm() as stats: + # actual context doesn't matter + pass + + self.assertEqual(stats.start, 1) + self.assertEqual(stats.elapsed, 3) + self.assertEqual(stats.level, 1) + + def testtimedcmnested(self): + # timestamps 1, 3, 6, 10, elapsed times of 6 - 3 = 3 and 10 - 1 = 9 + with mocktimer([1, 2, 3, 4], _start_default): + with util.timedcm() as outer_stats: + with util.timedcm() as inner_stats: + # actual context doesn't matter + pass + + self.assertEqual(outer_stats.start, 1) + self.assertEqual(outer_stats.elapsed, 9) + self.assertEqual(outer_stats.level, 1) + + self.assertEqual(inner_stats.start, 3) + self.assertEqual(inner_stats.elapsed, 3) + self.assertEqual(inner_stats.level, 2) + + def testtimedcmexception(self): + # timestamps 1, 4, elapsed time of 4 - 1 = 3 + with mocktimer([1, 3], _start_default): + try: + with util.timedcm() as stats: + raise ValueError() + except ValueError: + pass + + self.assertEqual(stats.start, 1) + self.assertEqual(stats.elapsed, 3) + self.assertEqual(stats.level, 1) + + def testtimeddecorator(self): + @util.timed + def testfunc(callcount=1): + callcount -= 1 + if callcount: + testfunc(callcount) + + # timestamps 1, 2, 3, 4, elapsed time of 3 - 2 = 1 and 4 - 1 = 3 + with mocktimer(1, _start_default): + with capturestderr() as out: + testfunc(2) + + self.assertEqual(out.getvalue(), ( + b' testfunc: 1.000 s\n' + b' testfunc: 3.000 s\n' + )) + +if __name__ == '__main__': + import silenttestrunner + silenttestrunner.main(__name__)
--- a/tests/wireprotohelpers.sh Sun Aug 19 13:27:02 2018 +0900 +++ b/tests/wireprotohelpers.sh Mon Aug 20 09:48:08 2018 -0700 @@ -20,19 +20,19 @@ wireprotov2server, ) -@wireprotov1server.wireprotocommand('customreadonly', permission='pull') +@wireprotov1server.wireprotocommand(b'customreadonly', permission=b'pull') def customreadonlyv1(repo, proto): return wireprototypes.bytesresponse(b'customreadonly bytes response') -@wireprotov2server.wireprotocommand('customreadonly', permission='pull') +@wireprotov2server.wireprotocommand(b'customreadonly', permission=b'pull') def customreadonlyv2(repo, proto): return wireprototypes.cborresponse(b'customreadonly bytes response') -@wireprotov1server.wireprotocommand('customreadwrite', permission='push') +@wireprotov1server.wireprotocommand(b'customreadwrite', permission=b'push') def customreadwrite(repo, proto): return wireprototypes.bytesresponse(b'customreadwrite bytes response') -@wireprotov2server.wireprotocommand('customreadwrite', permission='push') +@wireprotov2server.wireprotocommand(b'customreadwrite', permission=b'push') def customreadwritev2(repo, proto): return wireprototypes.cborresponse(b'customreadwrite bytes response') EOF